diff options
Diffstat (limited to 'arch/mips/kernel')
68 files changed, 450 insertions, 450 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 007c33d7371..c48ed923fd5 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o -obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o +obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP_UP) += smp-up.o @@ -53,7 +53,7 @@ obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_CPU_MIPSR2) += spram.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o -obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o +obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o obj-$(CONFIG_I8259) += i8259.o obj-$(CONFIG_IRQ_CPU) += irq_cpu.o diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index 9fdd8bcdd21..e06f777e9c4 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -6,7 +6,7 @@ * * Heavily inspired by the 32-bit Sparc compat code which is * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) - * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) + * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) */ #define ELF_ARCH EM_MIPS @@ -48,7 +48,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define TASK32_SIZE 0x7fff8000UL #undef ELF_ET_DYN_BASE -#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) #include <asm/processor.h> #include <linux/module.h> @@ -67,8 +67,8 @@ struct elf_prstatus32 pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; - struct compat_timeval pr_utime; /* User time */ - struct compat_timeval pr_stime; /* System time */ + struct compat_timeval pr_utime; /* User time */ + struct compat_timeval pr_stime; /* System time */ struct compat_timeval pr_cutime;/* Cumulative user time */ struct compat_timeval pr_cstime;/* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ @@ -88,7 +88,7 @@ struct elf_prpsinfo32 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; /* Lots missing */ char pr_fname[16]; /* filename of executable */ - char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ + char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; #define elf_caddr_t u32 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index ff448233dab..556a4357d7f 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -6,7 +6,7 @@ * * Heavily inspired by the 32-bit Sparc compat code which is * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) - * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) + * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) */ #define ELF_ARCH EM_MIPS @@ -50,7 +50,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define TASK32_SIZE 0x7fff8000UL #undef ELF_ET_DYN_BASE -#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) #include <asm/processor.h> @@ -86,8 +86,8 @@ struct elf_prstatus32 pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; - struct compat_timeval pr_utime; /* User time */ - struct compat_timeval pr_stime; /* System time */ + struct compat_timeval pr_utime; /* User time */ + struct compat_timeval pr_stime; /* System time */ struct compat_timeval pr_cutime;/* Cumulative user time */ struct compat_timeval pr_cstime;/* Cumulative system time */ elf_gregset_t pr_reg; /* GP registers */ @@ -107,7 +107,7 @@ struct elf_prpsinfo32 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; /* Lots missing */ char pr_fname[16]; /* filename of executable */ - char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ + char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; #define elf_caddr_t u32 diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S index e908e81330b..64c4fd62cf0 100644 --- a/arch/mips/kernel/bmips_vec.S +++ b/arch/mips/kernel/bmips_vec.S @@ -170,7 +170,7 @@ bmips_smp_entry: /* switch to permanent stack and continue booting */ - .global bmips_secondary_reentry + .global bmips_secondary_reentry bmips_secondary_reentry: la k0, bmips_smp_boot_sp lw sp, 0(k0) @@ -182,7 +182,7 @@ bmips_secondary_reentry: #endif /* CONFIG_SMP */ .align 4 - .global bmips_reset_nmi_vec_end + .global bmips_reset_nmi_vec_end bmips_reset_nmi_vec_end: END(bmips_reset_nmi_vec) @@ -206,7 +206,7 @@ LEAF(bmips_smp_int_vec) eret .align 4 - .global bmips_smp_int_vec_end + .global bmips_smp_int_vec_end bmips_smp_int_vec_end: END(bmips_smp_int_vec) diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d735d0e58f..83ffe950f71 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -57,7 +57,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, */ case bcond_op: switch (insn.i_format.rt) { - case bltz_op: + case bltz_op: case bltzl_op: if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -197,8 +197,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, bit += (bit != 0); bit += 23; switch (insn.i_format.rt & 3) { - case 0: /* bc1f */ - case 2: /* bc1fl */ + case 0: /* bc1f */ + case 2: /* bc1fl */ if (~fcr31 & (1 << bit)) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == 2) @@ -208,8 +208,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case 1: /* bc1t */ - case 3: /* bc1tl */ + case 1: /* bc1t */ + case 3: /* bc1tl */ if (fcr31 & (1 << bit)) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == 3) diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index 69bbfae183b..15f618b40cf 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c @@ -41,7 +41,7 @@ * the rest of the system */ static void sibyte_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) + struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); void __iomem *cfg, *init; @@ -144,7 +144,7 @@ void __cpuinit sb1480_clockevent_init(void) bcm1480_unmask_irq(cpu, irq); - action->handler = sibyte_counter_handler; + action->handler = sibyte_counter_handler; action->flags = IRQF_PERCPU | IRQF_TIMER; action->name = name; action->dev_id = cd; diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index ed648cb5a69..ff1f01b7227 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c @@ -1,7 +1,7 @@ /* * DS1287 clockevent driver * - * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> + * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -89,7 +89,7 @@ static void ds1287_event_handler(struct clock_event_device *dev) static struct clock_event_device ds1287_clockevent = { .name = "ds1287", .features = CLOCK_EVT_FEAT_PERIODIC, - .set_next_event = ds1287_set_next_event, + .set_next_event = ds1287_set_next_event, .set_mode = ds1287_set_mode, .event_handler = ds1287_event_handler, }; diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c index 831b47585b7..f069460751a 100644 --- a/arch/mips/kernel/cevt-gt641xx.c +++ b/arch/mips/kernel/cevt-gt641xx.c @@ -1,7 +1,7 @@ /* * GT641xx clockevent routines. * - * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> + * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -98,7 +98,7 @@ static struct clock_event_device gt641xx_timer0_clockevent = { .name = "gt641xx-timer0", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .irq = GT641XX_TIMER0_IRQ, - .set_next_event = gt641xx_timer0_set_next_event, + .set_next_event = gt641xx_timer0_set_next_event, .set_mode = gt641xx_timer0_set_mode, .event_handler = gt641xx_timer0_event_handler, }; diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 75323925e53..07b847d77f5 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -25,7 +25,7 @@ #ifndef CONFIG_MIPS_MT_SMTC static int mips_next_event(unsigned long delta, - struct clock_event_device *evt) + struct clock_event_device *evt) { unsigned int cnt; int res; @@ -66,7 +66,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) goto out; /* - * The same applies to performance counter interrupts. But with the + * The same applies to performance counter interrupts. But with the * above we now know that the reason we got here must be a timer * interrupt. Being the paranoiacs we are we check anyway. */ @@ -119,7 +119,7 @@ int c0_compare_int_usable(void) unsigned int cnt; /* - * IP7 already pending? Try to clear it by acking the timer. + * IP7 already pending? Try to clear it by acking the timer. */ if (c0_compare_int_pending()) { cnt = read_c0_count(); diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index e73439fd685..200f2778bf3 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c @@ -39,7 +39,7 @@ * the rest of the system */ static void sibyte_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) + struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); void __iomem *cfg, *init; @@ -143,7 +143,7 @@ void __cpuinit sb1250_clockevent_init(void) sb1250_unmask_irq(cpu, irq); - action->handler = sibyte_counter_handler; + action->handler = sibyte_counter_handler; action->flags = IRQF_PERCPU | IRQF_TIMER; action->name = name; action->dev_id = cd; diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c index 2e72d30b2f0..9de5ed7ef1a 100644 --- a/arch/mips/kernel/cevt-smtc.c +++ b/arch/mips/kernel/cevt-smtc.c @@ -49,7 +49,7 @@ static int smtc_nextinvpe[NR_CPUS]; /* * Timestamps stored are absolute values to be programmed - * into Count register. Valid timestamps will never be zero. + * into Count register. Valid timestamps will never be zero. * If a Zero Count value is actually calculated, it is converted * to be a 1, which will introduce 1 or two CPU cycles of error * roughly once every four billion events, which at 1000 HZ means diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index e5c30b1d086..2ae08462e46 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -4,7 +4,7 @@ * for more details. * * Based on linux/arch/mips/kernel/cevt-r4k.c, - * linux/arch/mips/jmr3927/rbhma3100/setup.c + * linux/arch/mips/jmr3927/rbhma3100/setup.c * * Copyright 2001 MontaVista Software Inc. * Copyright (C) 2000-2001 Toshiba Corporation @@ -129,7 +129,7 @@ static struct txx9_clock_event_device txx9_clock_event_device = { CLOCK_EVT_FEAT_ONESHOT, .rating = 200, .set_mode = txx9tmr_set_mode, - .set_next_event = txx9tmr_set_next_event, + .set_next_event = txx9tmr_set_next_event, }, }; @@ -139,7 +139,7 @@ static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id) struct clock_event_device *cd = &txx9_cd->cd; struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; - __raw_writel(0, &tmrptr->tisr); /* ack interrupt */ + __raw_writel(0, &tmrptr->tisr); /* ack interrupt */ cd->event_handler(cd); return IRQ_HANDLED; } diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index d6a18644365..de3c25ffd9f 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -84,9 +84,9 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w, ".set noreorder\n\t" ".set nomacro\n\t" "mult %2, %3\n\t" - "dsll32 %0, %4, %5\n\t" + "dsll32 %0, %4, %5\n\t" "mflo $0\n\t" - "dsll32 %1, %4, %5\n\t" + "dsll32 %1, %4, %5\n\t" "nop\n\t" ".set pop" : "=&r" (lv1), "=r" (lw) @@ -239,7 +239,7 @@ static inline void check_daddi(void) panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } -int daddiu_bug = -1; +int daddiu_bug = -1; static inline void check_daddiu(void) { @@ -273,7 +273,7 @@ static inline void check_daddiu(void) #ifdef HAVE_AS_SET_DADDI ".set daddi\n\t" #endif - "daddiu %0, %2, %4\n\t" + "daddiu %0, %2, %4\n\t" "addiu %1, $0, %4\n\t" "daddu %1, %2\n\t" ".set pop" @@ -292,7 +292,7 @@ static inline void check_daddiu(void) asm volatile( "addiu %2, $0, %3\n\t" "dsrl %2, %2, 1\n\t" - "daddiu %0, %2, %4\n\t" + "daddiu %0, %2, %4\n\t" "addiu %1, $0, %4\n\t" "daddu %1, %2" : "=&r" (v), "=&r" (w), "=&r" (tmp) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index cce3782c96c..760139ee7a9 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -4,7 +4,7 @@ * Copyright (C) xxxx the Anonymous * Copyright (C) 1994 - 2006 Ralf Baechle * Copyright (C) 2003, 2004 Maciej W. Rozycki - * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. + * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -69,12 +69,12 @@ void r4k_wait_irqoff(void) " wait \n" " .set pop \n"); local_irq_enable(); - __asm__(" .globl __pastwait \n" + __asm__(" .globl __pastwait \n" "__pastwait: \n"); } /* - * The RM7000 variant has to handle erratum 38. The workaround is to not + * The RM7000 variant has to handle erratum 38. The workaround is to not * have any pending stores when the WAIT instruction is executed. */ static void rm7k_wait_irqoff(void) @@ -469,7 +469,7 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c) c->scache.flags = MIPS_CACHE_NOT_PRESENT; ok = decode_config0(c); /* Read Config registers. */ - BUG_ON(!ok); /* Arch spec violation! */ + BUG_ON(!ok); /* Arch spec violation! */ if (ok) ok = decode_config1(c); if (ok) @@ -710,12 +710,12 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; /* - * Undocumented RM7000: Bit 29 in the info register of + * Undocumented RM7000: Bit 29 in the info register of * the RM7000 v2.0 indicates if the TLB has 48 or 64 * entries. * - * 29 1 => 64 entry JTLB - * 0 => 48 entry JTLB + * 29 1 => 64 entry JTLB + * 0 => 48 entry JTLB */ c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; break; @@ -729,8 +729,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) * Bit 29 in the info register of the RM9000 * indicates if the TLB has 48 or 64 entries. * - * 29 1 => 64 entry JTLB - * 0 => 48 entry JTLB + * 29 1 => 64 entry JTLB + * 0 => 48 entry JTLB */ c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; break; @@ -1053,12 +1053,12 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) return; } - c->options = (MIPS_CPU_TLB | - MIPS_CPU_4KEX | + c->options = (MIPS_CPU_TLB | + MIPS_CPU_4KEX | MIPS_CPU_COUNTER | - MIPS_CPU_DIVEC | - MIPS_CPU_WATCH | - MIPS_CPU_EJTAG | + MIPS_CPU_DIVEC | + MIPS_CPU_WATCH | + MIPS_CPU_EJTAG | MIPS_CPU_LLSC); switch (c->processor_id & 0xff00) { @@ -1129,7 +1129,7 @@ __cpuinit void cpu_probe(void) struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int cpu = smp_processor_id(); - c->processor_id = PRID_IMP_UNKNOWN; + c->processor_id = PRID_IMP_UNKNOWN; c->fpu_id = FPIR_IMP_NONE; c->cputype = CPU_UNKNOWN; diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c index e7c98e2b78b..bb51d3193ad 100644 --- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c +++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c @@ -195,8 +195,8 @@ static void loongson2_cpu_wait(void) spin_lock_irqsave(&loongson2_wait_lock, flags); cpu_freq = LOONGSON_CHIPCFG0; - LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ + LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ + LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ spin_unlock_irqrestore(&loongson2_wait_lock, flags); } diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c index 0f53c39324b..93aa302948d 100644 --- a/arch/mips/kernel/crash.c +++ b/arch/mips/kernel/crash.c @@ -59,7 +59,7 @@ static void crash_kexec_prepare_cpus(void) #else /* !defined(CONFIG_SMP) */ static void crash_kexec_prepare_cpus(void) {} -#endif /* !defined(CONFIG_SMP) */ +#endif /* !defined(CONFIG_SMP) */ void default_machine_crash_shutdown(struct pt_regs *regs) { diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c index f96f99c794a..468f3eba413 100644 --- a/arch/mips/kernel/csrc-bcm1480.c +++ b/arch/mips/kernel/csrc-bcm1480.c @@ -35,7 +35,7 @@ static cycle_t bcm1480_hpt_read(struct clocksource *cs) struct clocksource bcm1480_clocksource = { .name = "zbbus-cycles", - .rating = 200, + .rating = 200, .read = bcm1480_hpt_read, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c index 46bd7fa98d6..0654bff9b69 100644 --- a/arch/mips/kernel/csrc-ioasic.c +++ b/arch/mips/kernel/csrc-ioasic.c @@ -1,7 +1,7 @@ /* * DEC I/O ASIC's counter clocksource * - * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> + * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c index 2e7c5232da8..abd99ea911a 100644 --- a/arch/mips/kernel/csrc-powertv.c +++ b/arch/mips/kernel/csrc-powertv.c @@ -45,7 +45,7 @@ unsigned int __init mips_get_pll_freq(void) m = PLL_GET_M(pll_reg); n = PLL_GET_N(pll_reg); p = PLL_GET_P(pll_reg); - pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p); + pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p); /* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */ fout = ((2 * n * fin) / (m * (0x01 << p))); @@ -83,8 +83,8 @@ static void __init powertv_c0_hpt_clocksource_init(void) /** * struct tim_c - free running counter - * @hi: High 16 bits of the counter - * @lo: Low 32 bits of the counter + * @hi: High 16 bits of the counter + * @lo: Low 32 bits of the counter * * Lays out the structure of the free running counter in memory. This counter * increments at a rate of 27 MHz/8 on all platforms. diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c index e9606d90768..6ecb77d8206 100644 --- a/arch/mips/kernel/csrc-sb1250.c +++ b/arch/mips/kernel/csrc-sb1250.c @@ -44,7 +44,7 @@ static cycle_t sb1250_hpt_read(struct clocksource *cs) struct clocksource bcm1250_clocksource = { .name = "bcm1250-counter-3", - .rating = 200, + .rating = 200, .read = sb1250_hpt_read, .mask = CLOCKSOURCE_MASK(23), .flags = CLOCK_SOURCE_IS_CONTINUOUS, diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 83fa1460e29..cf5509f13dd 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -125,21 +125,21 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, * * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT * - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) * addiu v1, v1, low_16bit_of_mcount * move at, ra * move $12, ra_address * jalr v1 * sub sp, sp, 8 - * 1: offset = 5 instructions + * 1: offset = 5 instructions * 2.2 For the Other situations * - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) * addiu v1, v1, low_16bit_of_mcount * move at, ra * jalr v1 * nop | move $12, ra_address | sub sp, sp, 8 - * 1: offset = 4 instructions + * 1: offset = 4 instructions */ #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) @@ -228,8 +228,8 @@ int ftrace_disable_ftrace_graph_caller(void) #ifndef KBUILD_MCOUNT_RA_ADDRESS -#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ -#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ +#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ +#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 8a0096d6281..ecb347ce1b3 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -160,7 +160,7 @@ LEAF(r4k_wait) .set pop .endm - .align 5 + .align 5 BUILD_ROLLBACK_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) #ifdef CONFIG_TRACE_IRQFLAGS @@ -362,7 +362,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set push .set noat SAVE_ALL - move a0, sp + move a0, sp jal nmi_exception_handler RESTORE_ALL .set mips3 @@ -409,7 +409,7 @@ NESTED(nmi_handler, PT_SIZE, sp) string escapes and emits bogus warnings if it believes to recognize an unknown escape code. So make the arguments start with an n and gas will believe \n is ok ... */ - .macro __BUILD_verbose nexception + .macro __BUILD_verbose nexception LONG_L a1, PT_EPC(sp) #ifdef CONFIG_32BIT PRINT("Got \nexception at %08lx\012") @@ -442,7 +442,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .endm .macro BUILD_HANDLER exception handler clear verbose - __BUILD_HANDLER \exception \handler \clear \verbose _int + __BUILD_HANDLER \exception \handler \clear \verbose _int .endm BUILD_HANDLER adel ade ade silent /* #4 */ @@ -456,7 +456,7 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER tr tr sti silent /* #13 */ BUILD_HANDLER fpe fpe fpe silent /* #15 */ BUILD_HANDLER mdmx mdmx sti silent /* #22 */ -#ifdef CONFIG_HARDWARE_WATCHPOINTS +#ifdef CONFIG_HARDWARE_WATCHPOINTS /* * For watch, interrupts will be enabled after the watch * registers are read. @@ -482,8 +482,8 @@ NESTED(nmi_handler, PT_SIZE, sp) MFC0 k1, CP0_ENTRYHI andi k1, 0xff /* ASID_MASK */ MFC0 k0, CP0_EPC - PTR_SRL k0, _PAGE_SHIFT + 1 - PTR_SLL k0, _PAGE_SHIFT + 1 + PTR_SRL k0, _PAGE_SHIFT + 1 + PTR_SLL k0, _PAGE_SHIFT + 1 or k1, k0 MTC0 k1, CP0_ENTRYHI mtc0_tlbw_hazard diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index fcf97312f32..c61cdaed2b1 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -133,7 +133,7 @@ EXPORT(_stext) #ifdef CONFIG_BOOT_RAW /* * Give us a fighting chance of running if execution beings at the - * kernel load address. This is needed because this platform does + * kernel load address. This is needed because this platform does * not have a ELF loader yet. */ FEXPORT(__kernel_entry) @@ -201,7 +201,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point #ifdef CONFIG_SMP /* - * SMP slave cpus entry point. Board specific code for bootstrap calls this + * SMP slave cpus entry point. Board specific code for bootstrap calls this * function after setting up the stack and gp registers. */ NESTED(smp_bootstrap, 16, sp) diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 32b397b646e..2b91fe80c43 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -178,7 +178,7 @@ handle_real_irq: } else { inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ outb(cached_master_mask, PIC_MASTER_IMR); - outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ + outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ } smtc_im_ack_irq(irq); raw_spin_unlock_irqrestore(&i8259A_lock, flags); diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c index 883fc6cead3..44a1f792e39 100644 --- a/arch/mips/kernel/irq-gt641xx.c +++ b/arch/mips/kernel/irq-gt641xx.c @@ -1,7 +1,7 @@ /* * GT641xx IRQ routines. * - * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> + * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,7 +25,7 @@ #include <asm/gt64120.h> -#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE)) +#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE)) static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock); diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 14ac52c5ae8..fab40f7d2e0 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c @@ -1,6 +1,6 @@ /* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * @@ -86,7 +86,7 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d) */ void ll_msc_irq(void) { - unsigned int irq; + unsigned int irq; /* read the interrupt vector register */ MSCIC_READ(MSC01_IC_VEC, irq); diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index b0662cf97ea..26f4e4c9db1 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c @@ -1,8 +1,8 @@ /* * Copyright (C) 2003 Ralf Baechle * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index a5aa43d07c8..d1fea7a054b 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -48,7 +48,7 @@ again: } /* - * Allocate the 16 legacy interrupts for i8259 devices. This happens early + * Allocate the 16 legacy interrupts for i8259 devices. This happens early * in the kernel initialization so treating allocation failure as BUG() is * ok. */ diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 972263bcf40..0207a44917b 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -3,13 +3,13 @@ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * * Copyright (C) 2001 Ralf Baechle - * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. - * Author: Maciej W. Rozycki <macro@mips.com> + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * Author: Maciej W. Rozycki <macro@mips.com> * * This file define the irq handler for MIPS CPU interrupts. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c index b0c55b50218..ab00e490482 100644 --- a/arch/mips/kernel/irq_txx9.c +++ b/arch/mips/kernel/irq_txx9.c @@ -1,12 +1,12 @@ /* * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c, - * linux/arch/mips/tx4927/common/tx4927_irq.c, - * linux/arch/mips/tx4938/common/irq.c + * linux/arch/mips/tx4927/common/tx4927_irq.c, + * linux/arch/mips/tx4938/common/irq.c * * Copyright 2001, 2003-2005 MontaVista Software Inc. * Author: MontaVista Software, Inc. - * ahennessy@mvista.com - * source@mvista.com + * ahennessy@mvista.com + * source@mvista.com * Copyright (C) 2000-2001 Toshiba Corporation * * This file is subject to the terms and conditions of the GNU General Public @@ -122,7 +122,7 @@ static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type) switch (flow_type & IRQF_TRIGGER_MASK) { case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break; case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break; - case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break; + case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break; case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break; default: return -EINVAL; diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 23817a6e32b..fcaac2f132f 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -40,7 +40,7 @@ static struct hard_trap_info { { 6, SIGBUS }, /* instruction bus error */ { 7, SIGBUS }, /* data bus error */ { 9, SIGTRAP }, /* break */ -/* { 11, SIGILL }, */ /* CPU unusable */ +/* { 11, SIGILL }, */ /* CPU unusable */ { 12, SIGFPE }, /* overflow */ { 13, SIGTRAP }, /* trap */ { 14, SIGSEGV }, /* virtual instruction cache coherency */ @@ -321,7 +321,7 @@ int kgdb_ll_trap(int cmd, const char *str, .regs = regs, .str = str, .err = err, - .trapnr = trap, + .trapnr = trap, .signr = sig, }; @@ -371,7 +371,7 @@ int kgdb_arch_init(void) union mips_instruction insn = { .r_format = { .opcode = spec_op, - .func = break_op, + .func = break_op, } }; memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE); diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 158467da9bc..a14be5fa5ec 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -307,7 +307,7 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, /* * Called after single-stepping. p->addr is the address of the * instruction whose first byte has been replaced by the "break 0" - * instruction. To avoid the SMP problems that can occur when we + * instruction. To avoid the SMP problems that can occur when we * temporarily put back the original opcode to single-step, we * single-stepped a copy of the instruction. The address of this * copy is p->ainsn.insn. @@ -535,7 +535,7 @@ void jprobe_return_end(void); void __kprobes jprobe_return(void) { - /* Assembler quirk necessitates this '0,code' business. */ + /* Assembler quirk necessitates this '0,code' business. */ asm volatile( "break 0,%0\n\t" ".globl jprobe_return_end\n" @@ -614,9 +614,9 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, * We can handle this because: * - instances are always inserted at the head of the list * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the - * real return address, and all the rest will point to - * kretprobe_trampoline + * function, the first instance's ret_addr will point to the + * real return address, and all the rest will point to + * kretprobe_trampoline */ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { if (ri->task != current) diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 7adab86c632..16bf4a5d3d1 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -76,7 +76,7 @@ out: return error; } -#define RLIM_INFINITY32 0x7fffffff +#define RLIM_INFINITY32 0x7fffffff #define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) struct rlimit32 { @@ -105,7 +105,7 @@ SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high, /* From the Single Unix Spec: pread & pwrite act like lseek to pos + op + lseek back to original location. They fail just like lseek does on - non-seekable files. */ + non-seekable files. */ SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count, unsigned long, unused, unsigned long, a4, unsigned long, a5) @@ -263,7 +263,7 @@ SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd, } asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3, - size_t count) + size_t count) { return sys_readahead(fd, merge_64(a2, a3), count); } @@ -292,7 +292,7 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2, unsigned offset_a3, unsigned len_a4, unsigned len_a5) { return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3), - merge_64(len_a4, len_a5)); + merge_64(len_a4, len_a5)); } save_static_function(sys32_clone); @@ -313,7 +313,7 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs) syscall() works. */ child_tidptr = (int __user *) __dummy4; return do_fork(clone_flags, newsp, 0, - parent_tidptr, child_tidptr); + parent_tidptr, child_tidptr); } asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf, @@ -323,7 +323,7 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf, } SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, - u64, a3, u64, a4, int, dfd, const char __user *, pathname) + u64, a3, u64, a4, int, dfd, const char __user *, pathname) { return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), dfd, pathname); diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index df1e3e455f9..6e58e97fcd3 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -17,9 +17,9 @@ extern void *__bzero(void *__s, size_t __count); extern long __strncpy_from_user_nocheck_asm(char *__to, - const char *__from, long __len); + const char *__from, long __len); extern long __strncpy_from_user_asm(char *__to, const char *__from, - long __len); + long __len); extern long __strlen_user_nocheck_asm(const char *s); extern long __strlen_user_asm(const char *s); extern long __strnlen_user_nocheck_asm(const char *s); diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c index 61d60028b88..2b70723071c 100644 --- a/arch/mips/kernel/module-rela.c +++ b/arch/mips/kernel/module-rela.c @@ -55,7 +55,7 @@ static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v) static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v) { *location = (*location & 0xffff0000) | - ((((long long) v + 0x8000LL) >> 16) & 0xffff); + ((((long long) v + 0x8000LL) >> 16) & 0xffff); return 0; } @@ -78,7 +78,7 @@ static int apply_r_mips_higher_rela(struct module *me, u32 *location, Elf_Addr v) { *location = (*location & 0xffff0000) | - ((((long long) v + 0x80008000LL) >> 32) & 0xffff); + ((((long long) v + 0x80008000LL) >> 32) & 0xffff); return 0; } @@ -87,7 +87,7 @@ static int apply_r_mips_highest_rela(struct module *me, u32 *location, Elf_Addr v) { *location = (*location & 0xffff0000) | - ((((long long) v + 0x800080008000LL) >> 48) & 0xffff); + ((((long long) v + 0x800080008000LL) >> 48) & 0xffff); return 0; } diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 07ff5812ffa..977a623d925 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -79,7 +79,7 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) } *location = (*location & ~0x03ffffff) | - ((*location + (v >> 2)) & 0x03ffffff); + ((*location + (v >> 2)) & 0x03ffffff); return 0; } @@ -122,7 +122,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) struct mips_hi16 *l; Elf_Addr val, vallo; - /* Sign extend the addend we extract from the lo insn. */ + /* Sign extend the addend we extract from the lo insn. */ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; if (me->arch.r_mips_hi16_list != NULL) { @@ -165,7 +165,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) } /* - * Ok, we're done with the HI16 relocs. Now deal with the LO16. + * Ok, we're done with the HI16 relocs. Now deal with the LO16. */ val = v + vallo; insnlo = (insnlo & ~0xffff) | (val & 0xffff); @@ -230,7 +230,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, } /* - * Normally the hi16 list should be deallocated at this point. A + * Normally the hi16 list should be deallocated at this point. A * malformed binary however could contain a series of R_MIPS_HI16 * relocations not followed by a R_MIPS_LO16 relocation. In that * case, free up the list and return an error. @@ -261,7 +261,7 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr) spin_unlock_irqrestore(&dbe_lock, flags); /* Now, if we found one, we are running inside it now, hence - we cannot unload the module, hence no refcnt needed. */ + we cannot unload the module, hence no refcnt needed. */ return e; } diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 207f1341578..0e23343eb0a 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -30,7 +30,7 @@ /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti, int usedfpu) */ .align 7 LEAF(resume) @@ -69,7 +69,7 @@ 1: #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ - mfc0 t0, $11,7 /* CvmMemCtl */ + mfc0 t0, $11,7 /* CvmMemCtl */ bbit0 t0, 6, 3f /* Is user access enabled? */ /* Store the CVMSEG state */ @@ -77,8 +77,8 @@ andi t0, 0x3f /* Multiply * (cache line size/sizeof(long)/2) */ sll t0, 7-LONGLOG-1 - li t1, -32768 /* Base address of CVMSEG */ - LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */ + li t1, -32768 /* Base address of CVMSEG */ + LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */ synciobdma 2: .set noreorder @@ -89,13 +89,13 @@ LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */ LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */ bnez t0, 2b /* Loop until we've copied it all */ - LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */ + LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */ .set reorder /* Disable access to CVMSEG */ - mfc0 t0, $11,7 /* CvmMemCtl */ + mfc0 t0, $11,7 /* CvmMemCtl */ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ - mtc0 t0, $11,7 /* CvmMemCtl */ + mtc0 t0, $11,7 /* CvmMemCtl */ #endif 3: /* @@ -133,7 +133,7 @@ dmfc0 t9, $9,7 /* CvmCtl register. */ - /* Save the COP2 CRC state */ + /* Save the COP2 CRC state */ dmfc2 t0, 0x0201 dmfc2 t1, 0x0202 dmfc2 t2, 0x0200 @@ -149,30 +149,30 @@ sd t0, OCTEON_CP2_LLM_DAT(a0) sd t1, OCTEON_CP2_LLM_DAT+8(a0) -1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ +1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ /* Save the COP2 crypto state */ - /* this part is mostly common to both pass 1 and later revisions */ - dmfc2 t0, 0x0084 - dmfc2 t1, 0x0080 - dmfc2 t2, 0x0081 - dmfc2 t3, 0x0082 + /* this part is mostly common to both pass 1 and later revisions */ + dmfc2 t0, 0x0084 + dmfc2 t1, 0x0080 + dmfc2 t2, 0x0081 + dmfc2 t3, 0x0082 sd t0, OCTEON_CP2_3DES_IV(a0) - dmfc2 t0, 0x0088 + dmfc2 t0, 0x0088 sd t1, OCTEON_CP2_3DES_KEY(a0) - dmfc2 t1, 0x0111 /* only necessary for pass 1 */ + dmfc2 t1, 0x0111 /* only necessary for pass 1 */ sd t2, OCTEON_CP2_3DES_KEY+8(a0) - dmfc2 t2, 0x0102 + dmfc2 t2, 0x0102 sd t3, OCTEON_CP2_3DES_KEY+16(a0) - dmfc2 t3, 0x0103 + dmfc2 t3, 0x0103 sd t0, OCTEON_CP2_3DES_RESULT(a0) - dmfc2 t0, 0x0104 - sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */ - dmfc2 t1, 0x0105 + dmfc2 t0, 0x0104 + sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */ + dmfc2 t1, 0x0105 sd t2, OCTEON_CP2_AES_IV(a0) dmfc2 t2, 0x0106 sd t3, OCTEON_CP2_AES_IV+8(a0) - dmfc2 t3, 0x0107 + dmfc2 t3, 0x0107 sd t0, OCTEON_CP2_AES_KEY(a0) dmfc2 t0, 0x0110 sd t1, OCTEON_CP2_AES_KEY+8(a0) @@ -180,7 +180,7 @@ sd t2, OCTEON_CP2_AES_KEY+16(a0) dmfc2 t2, 0x0101 sd t3, OCTEON_CP2_AES_KEY+24(a0) - mfc0 t3, $15,0 /* Get the processor ID register */ + mfc0 t3, $15,0 /* Get the processor ID register */ sd t0, OCTEON_CP2_AES_KEYLEN(a0) li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ sd t1, OCTEON_CP2_AES_RESULT(a0) @@ -188,7 +188,7 @@ /* Skip to the Pass1 version of the remainder of the COP2 state */ beq t3, t0, 2f - /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ + /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ dmfc2 t1, 0x0240 dmfc2 t2, 0x0241 dmfc2 t3, 0x0242 @@ -214,7 +214,7 @@ sd t2, OCTEON_CP2_HSH_DATW+72(a0) dmfc2 t2, 0x024D sd t3, OCTEON_CP2_HSH_DATW+80(a0) - dmfc2 t3, 0x024E + dmfc2 t3, 0x024E sd t0, OCTEON_CP2_HSH_DATW+88(a0) dmfc2 t0, 0x0250 sd t1, OCTEON_CP2_HSH_DATW+96(a0) @@ -232,9 +232,9 @@ sd t3, OCTEON_CP2_HSH_IVW+24(a0) dmfc2 t3, 0x0257 sd t0, OCTEON_CP2_HSH_IVW+32(a0) - dmfc2 t0, 0x0258 + dmfc2 t0, 0x0258 sd t1, OCTEON_CP2_HSH_IVW+40(a0) - dmfc2 t1, 0x0259 + dmfc2 t1, 0x0259 sd t2, OCTEON_CP2_HSH_IVW+48(a0) dmfc2 t2, 0x025E sd t3, OCTEON_CP2_HSH_IVW+56(a0) @@ -247,7 +247,7 @@ sd t0, OCTEON_CP2_GFM_RESULT+8(a0) jr ra -2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ +2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ dmfc2 t3, 0x0040 dmfc2 t0, 0x0041 dmfc2 t1, 0x0042 @@ -269,7 +269,7 @@ sd t3, OCTEON_CP2_HSH_IVW+8(a0) sd t0, OCTEON_CP2_HSH_IVW+16(a0) -3: /* pass 1 or CvmCtl[NOCRYPTO] set */ +3: /* pass 1 or CvmCtl[NOCRYPTO] set */ jr ra END(octeon_cop2_save) @@ -280,19 +280,19 @@ .set push .set noreorder LEAF(octeon_cop2_restore) - /* First cache line was prefetched before the call */ - pref 4, 128(a0) + /* First cache line was prefetched before the call */ + pref 4, 128(a0) dmfc0 t9, $9,7 /* CvmCtl register. */ - pref 4, 256(a0) + pref 4, 256(a0) ld t0, OCTEON_CP2_CRC_IV(a0) - pref 4, 384(a0) + pref 4, 384(a0) ld t1, OCTEON_CP2_CRC_LENGTH(a0) ld t2, OCTEON_CP2_CRC_POLY(a0) /* Restore the COP2 CRC state */ dmtc2 t0, 0x0201 - dmtc2 t1, 0x1202 + dmtc2 t1, 0x1202 bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */ dmtc2 t2, 0x4200 @@ -310,19 +310,19 @@ ld t0, OCTEON_CP2_3DES_IV(a0) ld t1, OCTEON_CP2_3DES_KEY(a0) ld t2, OCTEON_CP2_3DES_KEY+8(a0) - dmtc2 t0, 0x0084 + dmtc2 t0, 0x0084 ld t0, OCTEON_CP2_3DES_KEY+16(a0) - dmtc2 t1, 0x0080 + dmtc2 t1, 0x0080 ld t1, OCTEON_CP2_3DES_RESULT(a0) - dmtc2 t2, 0x0081 + dmtc2 t2, 0x0081 ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */ dmtc2 t0, 0x0082 ld t0, OCTEON_CP2_AES_IV(a0) - dmtc2 t1, 0x0098 + dmtc2 t1, 0x0098 ld t1, OCTEON_CP2_AES_IV+8(a0) - dmtc2 t2, 0x010A /* only really needed for pass 1 */ + dmtc2 t2, 0x010A /* only really needed for pass 1 */ ld t2, OCTEON_CP2_AES_KEY(a0) - dmtc2 t0, 0x0102 + dmtc2 t0, 0x0102 ld t0, OCTEON_CP2_AES_KEY+8(a0) dmtc2 t1, 0x0103 ld t1, OCTEON_CP2_AES_KEY+16(a0) @@ -334,14 +334,14 @@ ld t1, OCTEON_CP2_AES_RESULT(a0) dmtc2 t2, 0x0107 ld t2, OCTEON_CP2_AES_RESULT+8(a0) - mfc0 t3, $15,0 /* Get the processor ID register */ + mfc0 t3, $15,0 /* Get the processor ID register */ dmtc2 t0, 0x0110 li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ dmtc2 t1, 0x0100 bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ dmtc2 t2, 0x0101 - /* this code is specific for pass 1 */ + /* this code is specific for pass 1 */ ld t0, OCTEON_CP2_HSH_DATW(a0) ld t1, OCTEON_CP2_HSH_DATW+8(a0) ld t2, OCTEON_CP2_HSH_DATW+16(a0) @@ -361,10 +361,10 @@ ld t0, OCTEON_CP2_HSH_IVW+16(a0) dmtc2 t1, 0x0048 dmtc2 t2, 0x0049 - b done_restore /* unconditional branch */ + b done_restore /* unconditional branch */ dmtc2 t0, 0x004A -3: /* this is post-pass1 code */ +3: /* this is post-pass1 code */ ld t2, OCTEON_CP2_HSH_DATW(a0) ld t0, OCTEON_CP2_HSH_DATW+8(a0) ld t1, OCTEON_CP2_HSH_DATW+16(a0) @@ -433,7 +433,7 @@ done_restore: * sp is assumed to point to a struct pt_regs * * NOTE: This is called in SAVE_SOME in stackframe.h. It can only - * safely modify k0 and k1. + * safely modify k0 and k1. */ .align 7 .set push @@ -446,14 +446,14 @@ done_restore: /* Save the multiplier state */ v3mulu k0, $0, $0 v3mulu k1, $0, $0 - sd k0, PT_MTP(sp) /* PT_MTP has P0 */ + sd k0, PT_MTP(sp) /* PT_MTP has P0 */ v3mulu k0, $0, $0 sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */ ori k1, $0, 1 v3mulu k1, k1, $0 sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */ v3mulu k0, $0, $0 - sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */ + sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */ v3mulu k1, $0, $0 sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ jr ra @@ -475,19 +475,19 @@ done_restore: .set noreorder LEAF(octeon_mult_restore) dmfc0 k1, $9,7 /* CvmCtl register. */ - ld v0, PT_MPL(sp) /* MPL0 */ - ld v1, PT_MPL+8(sp) /* MPL1 */ - ld k0, PT_MPL+16(sp) /* MPL2 */ + ld v0, PT_MPL(sp) /* MPL0 */ + ld v1, PT_MPL+8(sp) /* MPL1 */ + ld k0, PT_MPL+16(sp) /* MPL2 */ bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ /* Normally falls through, so no time wasted here */ nop /* Restore the multiplier state */ - ld k1, PT_MTP+16(sp) /* P2 */ + ld k1, PT_MTP+16(sp) /* P2 */ MTM0 v0 /* MPL0 */ ld v0, PT_MTP+8(sp) /* P1 */ MTM1 v1 /* MPL1 */ - ld v1, PT_MTP(sp) /* P0 */ + ld v1, PT_MTP(sp) /* P0 */ MTM2 k0 /* MPL2 */ MTP2 k1 /* P2 */ MTP1 v0 /* P1 */ diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index d9c81c5a6c9..45f1ffcf1a4 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -103,13 +103,13 @@ static struct mips_pmu mipspmu; #define M_CONFIG1_PC (1 << 4) -#define M_PERFCTL_EXL (1 << 0) -#define M_PERFCTL_KERNEL (1 << 1) -#define M_PERFCTL_SUPERVISOR (1 << 2) -#define M_PERFCTL_USER (1 << 3) -#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4) +#define M_PERFCTL_EXL (1 << 0) +#define M_PERFCTL_KERNEL (1 << 1) +#define M_PERFCTL_SUPERVISOR (1 << 2) +#define M_PERFCTL_USER (1 << 3) +#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4) #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5) -#define M_PERFCTL_VPEID(vpe) ((vpe) << 16) +#define M_PERFCTL_VPEID(vpe) ((vpe) << 16) #ifdef CONFIG_CPU_BMIPS5000 #define M_PERFCTL_MT_EN(filter) 0 @@ -117,13 +117,13 @@ static struct mips_pmu mipspmu; #define M_PERFCTL_MT_EN(filter) ((filter) << 20) #endif /* CONFIG_CPU_BMIPS5000 */ -#define M_TC_EN_ALL M_PERFCTL_MT_EN(0) -#define M_TC_EN_VPE M_PERFCTL_MT_EN(1) -#define M_TC_EN_TC M_PERFCTL_MT_EN(2) -#define M_PERFCTL_TCID(tcid) ((tcid) << 22) -#define M_PERFCTL_WIDE (1 << 30) -#define M_PERFCTL_MORE (1 << 31) -#define M_PERFCTL_TC (1 << 30) +#define M_TC_EN_ALL M_PERFCTL_MT_EN(0) +#define M_TC_EN_VPE M_PERFCTL_MT_EN(1) +#define M_TC_EN_TC M_PERFCTL_MT_EN(2) +#define M_PERFCTL_TCID(tcid) ((tcid) << 22) +#define M_PERFCTL_WIDE (1 << 30) +#define M_PERFCTL_MORE (1 << 31) +#define M_PERFCTL_TC (1 << 30) #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \ M_PERFCTL_KERNEL | \ @@ -827,7 +827,7 @@ static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL }, - [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL }, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL }, [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL }, [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL }, @@ -1371,7 +1371,7 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \ (r) == 176 || ((b) >= 50 && (b) <= 55) || \ ((b) >= 64 && (b) <= 67)) -#define IS_RANGE_V_34K_EVENT(r) ((r) == 47) +#define IS_RANGE_V_34K_EVENT(r) ((r) == 47) #endif /* 74K */ diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 07dff54f2ce..9dafed05813 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -1,7 +1,7 @@ /* * Copyright (C) 1995, 1996, 2001 Ralf Baechle * Copyright (C) 2001, 2004 MIPS Technologies, Inc. - * Copyright (C) 2004 Maciej W. Rozycki + * Copyright (C) 2004 Maciej W. Rozycki */ #include <linux/delay.h> #include <linux/kernel.h> diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index a11c6f9fdd5..902e7803fcf 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -154,8 +154,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, return 0; } *childregs = *regs; - childregs->regs[7] = 0; /* Clear error flag */ - childregs->regs[2] = 0; /* Child gets zero as return value */ + childregs->regs[7] = 0; /* Clear error flag */ + childregs->regs[2] = 0; /* Child gets zero as return value */ childregs->regs[29] = usp; ti->addr_limit = USER_DS; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 4812c6d916e..9c6299c733a 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -50,7 +50,7 @@ void ptrace_disable(struct task_struct *child) } /* - * Read a general register set. We always use the 64-bit format, even + * Read a general register set. We always use the 64-bit format, even * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. * Registers are sign extended to fill the available space. */ @@ -326,7 +326,7 @@ long arch_ptrace(struct task_struct *child, long request, case FPC_CSR: tmp = child->thread.fpu.fcr31; break; - case FPC_EIR: { /* implementation / version register */ + case FPC_EIR: { /* implementation / version register */ unsigned int flags; #ifdef CONFIG_MIPS_MT_SMTC unsigned long irqflags; @@ -520,10 +520,10 @@ static inline int audit_arch(void) { int arch = EM_MIPS; #ifdef CONFIG_64BIT - arch |= __AUDIT_ARCH_64BIT; + arch |= __AUDIT_ARCH_64BIT; #endif #if defined(__LITTLE_ENDIAN) - arch |= __AUDIT_ARCH_LE; + arch |= __AUDIT_ARCH_LE; #endif return arch; } @@ -546,7 +546,7 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs) /* The 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? - 0x80 : 0)); + 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do @@ -581,7 +581,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) /* The 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? - 0x80 : 0)); + 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index a3b017815ef..9486055ba66 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c @@ -124,7 +124,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, case FPC_CSR: tmp = child->thread.fpu.fcr31; break; - case FPC_EIR: { /* implementation / version register */ + case FPC_EIR: { /* implementation / version register */ unsigned int flags; #ifdef CONFIG_MIPS_MT_SMTC unsigned int irqflags; diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S index 61c8a0f2a60..f31063dbdae 100644 --- a/arch/mips/kernel/r2300_fpu.S +++ b/arch/mips/kernel/r2300_fpu.S @@ -30,38 +30,38 @@ LEAF(_save_fp_context) li v0, 0 # assume success cfc1 t1,fcr31 - EX(swc1 $f0,(SC_FPREGS+0)(a0)) - EX(swc1 $f1,(SC_FPREGS+8)(a0)) - EX(swc1 $f2,(SC_FPREGS+16)(a0)) - EX(swc1 $f3,(SC_FPREGS+24)(a0)) - EX(swc1 $f4,(SC_FPREGS+32)(a0)) - EX(swc1 $f5,(SC_FPREGS+40)(a0)) - EX(swc1 $f6,(SC_FPREGS+48)(a0)) - EX(swc1 $f7,(SC_FPREGS+56)(a0)) - EX(swc1 $f8,(SC_FPREGS+64)(a0)) - EX(swc1 $f9,(SC_FPREGS+72)(a0)) - EX(swc1 $f10,(SC_FPREGS+80)(a0)) - EX(swc1 $f11,(SC_FPREGS+88)(a0)) - EX(swc1 $f12,(SC_FPREGS+96)(a0)) - EX(swc1 $f13,(SC_FPREGS+104)(a0)) - EX(swc1 $f14,(SC_FPREGS+112)(a0)) - EX(swc1 $f15,(SC_FPREGS+120)(a0)) - EX(swc1 $f16,(SC_FPREGS+128)(a0)) - EX(swc1 $f17,(SC_FPREGS+136)(a0)) - EX(swc1 $f18,(SC_FPREGS+144)(a0)) - EX(swc1 $f19,(SC_FPREGS+152)(a0)) - EX(swc1 $f20,(SC_FPREGS+160)(a0)) - EX(swc1 $f21,(SC_FPREGS+168)(a0)) - EX(swc1 $f22,(SC_FPREGS+176)(a0)) - EX(swc1 $f23,(SC_FPREGS+184)(a0)) - EX(swc1 $f24,(SC_FPREGS+192)(a0)) - EX(swc1 $f25,(SC_FPREGS+200)(a0)) - EX(swc1 $f26,(SC_FPREGS+208)(a0)) - EX(swc1 $f27,(SC_FPREGS+216)(a0)) - EX(swc1 $f28,(SC_FPREGS+224)(a0)) - EX(swc1 $f29,(SC_FPREGS+232)(a0)) - EX(swc1 $f30,(SC_FPREGS+240)(a0)) - EX(swc1 $f31,(SC_FPREGS+248)(a0)) + EX(swc1 $f0,(SC_FPREGS+0)(a0)) + EX(swc1 $f1,(SC_FPREGS+8)(a0)) + EX(swc1 $f2,(SC_FPREGS+16)(a0)) + EX(swc1 $f3,(SC_FPREGS+24)(a0)) + EX(swc1 $f4,(SC_FPREGS+32)(a0)) + EX(swc1 $f5,(SC_FPREGS+40)(a0)) + EX(swc1 $f6,(SC_FPREGS+48)(a0)) + EX(swc1 $f7,(SC_FPREGS+56)(a0)) + EX(swc1 $f8,(SC_FPREGS+64)(a0)) + EX(swc1 $f9,(SC_FPREGS+72)(a0)) + EX(swc1 $f10,(SC_FPREGS+80)(a0)) + EX(swc1 $f11,(SC_FPREGS+88)(a0)) + EX(swc1 $f12,(SC_FPREGS+96)(a0)) + EX(swc1 $f13,(SC_FPREGS+104)(a0)) + EX(swc1 $f14,(SC_FPREGS+112)(a0)) + EX(swc1 $f15,(SC_FPREGS+120)(a0)) + EX(swc1 $f16,(SC_FPREGS+128)(a0)) + EX(swc1 $f17,(SC_FPREGS+136)(a0)) + EX(swc1 $f18,(SC_FPREGS+144)(a0)) + EX(swc1 $f19,(SC_FPREGS+152)(a0)) + EX(swc1 $f20,(SC_FPREGS+160)(a0)) + EX(swc1 $f21,(SC_FPREGS+168)(a0)) + EX(swc1 $f22,(SC_FPREGS+176)(a0)) + EX(swc1 $f23,(SC_FPREGS+184)(a0)) + EX(swc1 $f24,(SC_FPREGS+192)(a0)) + EX(swc1 $f25,(SC_FPREGS+200)(a0)) + EX(swc1 $f26,(SC_FPREGS+208)(a0)) + EX(swc1 $f27,(SC_FPREGS+216)(a0)) + EX(swc1 $f28,(SC_FPREGS+224)(a0)) + EX(swc1 $f29,(SC_FPREGS+232)(a0)) + EX(swc1 $f30,(SC_FPREGS+240)(a0)) + EX(swc1 $f31,(SC_FPREGS+248)(a0)) EX(sw t1,(SC_FPC_CSR)(a0)) cfc1 t0,$0 # implementation/version jr ra @@ -82,38 +82,38 @@ LEAF(_save_fp_context) LEAF(_restore_fp_context) li v0, 0 # assume success EX(lw t0,(SC_FPC_CSR)(a0)) - EX(lwc1 $f0,(SC_FPREGS+0)(a0)) - EX(lwc1 $f1,(SC_FPREGS+8)(a0)) - EX(lwc1 $f2,(SC_FPREGS+16)(a0)) - EX(lwc1 $f3,(SC_FPREGS+24)(a0)) - EX(lwc1 $f4,(SC_FPREGS+32)(a0)) - EX(lwc1 $f5,(SC_FPREGS+40)(a0)) - EX(lwc1 $f6,(SC_FPREGS+48)(a0)) - EX(lwc1 $f7,(SC_FPREGS+56)(a0)) - EX(lwc1 $f8,(SC_FPREGS+64)(a0)) - EX(lwc1 $f9,(SC_FPREGS+72)(a0)) - EX(lwc1 $f10,(SC_FPREGS+80)(a0)) - EX(lwc1 $f11,(SC_FPREGS+88)(a0)) - EX(lwc1 $f12,(SC_FPREGS+96)(a0)) - EX(lwc1 $f13,(SC_FPREGS+104)(a0)) - EX(lwc1 $f14,(SC_FPREGS+112)(a0)) - EX(lwc1 $f15,(SC_FPREGS+120)(a0)) - EX(lwc1 $f16,(SC_FPREGS+128)(a0)) - EX(lwc1 $f17,(SC_FPREGS+136)(a0)) - EX(lwc1 $f18,(SC_FPREGS+144)(a0)) - EX(lwc1 $f19,(SC_FPREGS+152)(a0)) - EX(lwc1 $f20,(SC_FPREGS+160)(a0)) - EX(lwc1 $f21,(SC_FPREGS+168)(a0)) - EX(lwc1 $f22,(SC_FPREGS+176)(a0)) - EX(lwc1 $f23,(SC_FPREGS+184)(a0)) - EX(lwc1 $f24,(SC_FPREGS+192)(a0)) - EX(lwc1 $f25,(SC_FPREGS+200)(a0)) - EX(lwc1 $f26,(SC_FPREGS+208)(a0)) - EX(lwc1 $f27,(SC_FPREGS+216)(a0)) - EX(lwc1 $f28,(SC_FPREGS+224)(a0)) - EX(lwc1 $f29,(SC_FPREGS+232)(a0)) - EX(lwc1 $f30,(SC_FPREGS+240)(a0)) - EX(lwc1 $f31,(SC_FPREGS+248)(a0)) + EX(lwc1 $f0,(SC_FPREGS+0)(a0)) + EX(lwc1 $f1,(SC_FPREGS+8)(a0)) + EX(lwc1 $f2,(SC_FPREGS+16)(a0)) + EX(lwc1 $f3,(SC_FPREGS+24)(a0)) + EX(lwc1 $f4,(SC_FPREGS+32)(a0)) + EX(lwc1 $f5,(SC_FPREGS+40)(a0)) + EX(lwc1 $f6,(SC_FPREGS+48)(a0)) + EX(lwc1 $f7,(SC_FPREGS+56)(a0)) + EX(lwc1 $f8,(SC_FPREGS+64)(a0)) + EX(lwc1 $f9,(SC_FPREGS+72)(a0)) + EX(lwc1 $f10,(SC_FPREGS+80)(a0)) + EX(lwc1 $f11,(SC_FPREGS+88)(a0)) + EX(lwc1 $f12,(SC_FPREGS+96)(a0)) + EX(lwc1 $f13,(SC_FPREGS+104)(a0)) + EX(lwc1 $f14,(SC_FPREGS+112)(a0)) + EX(lwc1 $f15,(SC_FPREGS+120)(a0)) + EX(lwc1 $f16,(SC_FPREGS+128)(a0)) + EX(lwc1 $f17,(SC_FPREGS+136)(a0)) + EX(lwc1 $f18,(SC_FPREGS+144)(a0)) + EX(lwc1 $f19,(SC_FPREGS+152)(a0)) + EX(lwc1 $f20,(SC_FPREGS+160)(a0)) + EX(lwc1 $f21,(SC_FPREGS+168)(a0)) + EX(lwc1 $f22,(SC_FPREGS+176)(a0)) + EX(lwc1 $f23,(SC_FPREGS+184)(a0)) + EX(lwc1 $f24,(SC_FPREGS+192)(a0)) + EX(lwc1 $f25,(SC_FPREGS+200)(a0)) + EX(lwc1 $f26,(SC_FPREGS+208)(a0)) + EX(lwc1 $f27,(SC_FPREGS+216)(a0)) + EX(lwc1 $f28,(SC_FPREGS+224)(a0)) + EX(lwc1 $f29,(SC_FPREGS+232)(a0)) + EX(lwc1 $f30,(SC_FPREGS+240)(a0)) + EX(lwc1 $f31,(SC_FPREGS+248)(a0)) jr ra ctc1 t0,fcr31 END(_restore_fp_context) diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 8d32d5a6b46..5266c6ee2b3 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -42,7 +42,7 @@ /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti, int usedfpu) */ LEAF(resume) mfc0 t1, CP0_STATUS diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 8decdfacb44..5e51219990a 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -40,7 +40,7 @@ /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti, int usedfpu) */ .align 5 LEAF(resume) @@ -53,7 +53,7 @@ * check if we need to save FPU registers */ - beqz a3, 1f + beqz a3, 1f PTR_L t3, TASK_THREAD_INFO(a0) /* diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index 804ebb2c34a..43d2d78d328 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -33,7 +33,7 @@ process_entry: b process_entry 1: - /* indirection page, update s0 */ + /* indirection page, update s0 */ and s3, s2, 0x2 beq s3, zero, 1f and s0, s2, ~0x2 @@ -69,7 +69,7 @@ done: of kexec_flag. */ bal 1f - 1: move t1,ra; + 1: move t1,ra; PTR_LA t2,1b PTR_LA t0,kexec_flag PTR_SUB t0,t0,t2; @@ -158,10 +158,10 @@ arg3: PTR 0x0 */ secondary_kexec_args: EXPORT(secondary_kexec_args) -s_arg0: PTR 0x0 -s_arg1: PTR 0x0 -s_arg2: PTR 0x0 -s_arg3: PTR 0x0 +s_arg0: PTR 0x0 +s_arg1: PTR 0x0 +s_arg2: PTR 0x0 +s_arg3: PTR 0x0 .size secondary_kexec_args,PTRSIZE*4 kexec_flag: LONG 0x1 diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index b8c18dcdd2c..ce72bfff5e2 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -252,12 +252,12 @@ int rtlx_release(int index) unsigned int rtlx_read_poll(int index, int can_sleep) { - struct rtlx_channel *chan; + struct rtlx_channel *chan; - if (rtlx == NULL) - return 0; + if (rtlx == NULL) + return 0; - chan = &rtlx->channel[index]; + chan = &rtlx->channel[index]; /* data available to read? */ if (chan->lx_read == chan->lx_write) { @@ -451,8 +451,8 @@ static ssize_t file_write(struct file *file, const char __user * buffer, return -EAGAIN; __wait_event_interruptible(channel_wqs[minor].rt_queue, - rtlx_write_poll(minor), - ret); + rtlx_write_poll(minor), + ret); if (ret) return ret; } @@ -462,11 +462,11 @@ static ssize_t file_write(struct file *file, const char __user * buffer, static const struct file_operations rtlx_fops = { .owner = THIS_MODULE, - .open = file_open, + .open = file_open, .release = file_release, .write = file_write, - .read = file_read, - .poll = file_poll, + .read = file_read, + .poll = file_poll, .llseek = noop_llseek, }; diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index d20a4bc9ed0..988bc06ff96 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -24,7 +24,7 @@ /* Highest syscall used of any syscall flavour */ #define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls - .align 5 + .align 5 NESTED(handle_sys, PT_SIZE, sp) .set noat SAVE_SOME @@ -54,7 +54,7 @@ stack_done: lw t0, TI_FLAGS($28) # syscall tracing enabled? li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT and t0, t1 - bnez t0, syscall_trace_entry # -> yes + bnez t0, syscall_trace_entry # -> yes jalr t2 # Do The Real Thing (TM) @@ -126,8 +126,8 @@ stackargs: la t1, 5f # load up to 3 arguments subu t1, t3 1: lw t5, 16(t0) # argument #5 from usp - .set push - .set noreorder + .set push + .set noreorder .set nomacro jr t1 addiu t1, 6f - 5f @@ -205,7 +205,7 @@ illegal_syscall: jr t2 /* Unreached */ -einval: li v0, -ENOSYS +einval: li v0, -ENOSYS jr ra END(sys_syscall) @@ -354,7 +354,7 @@ einval: li v0, -ENOSYS sys sys_ni_syscall 0 /* was create_module */ sys sys_init_module 5 sys sys_delete_module 1 - sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */ + sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */ sys sys_quotactl 4 sys sys_getpgid 1 sys sys_fchdir 1 @@ -589,7 +589,7 @@ einval: li v0, -ENOSYS /* We pre-compute the number of _instruction_ bytes needed to load or store the arguments 6-8. Negative values are ignored. */ - .macro sys function, nargs + .macro sys function, nargs PTR \function LONG (\nargs << 2) - (5 << 2) .endm diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index b64f642da07..4c356b0093c 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -25,7 +25,7 @@ #define handle_sys64 handle_sys #endif - .align 5 + .align 5 NESTED(handle_sys64, PT_SIZE, sp) #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) /* @@ -40,7 +40,7 @@ NESTED(handle_sys64, PT_SIZE, sp) #endif dsubu t0, v0, __NR_64_Linux # check syscall number - sltiu t0, t0, __NR_64_Linux_syscalls + 1 + sltiu t0, t0, __NR_64_Linux_syscalls + 1 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) ld t1, PT_EPC(sp) # skip syscall on return daddiu t1, 4 # skip to next instruction @@ -290,7 +290,7 @@ sys_call_table: PTR sys_quotactl PTR sys_ni_syscall /* was nfsservctl */ PTR sys_ni_syscall /* res. for getpmsg */ - PTR sys_ni_syscall /* 5175 for putpmsg */ + PTR sys_ni_syscall /* 5175 for putpmsg */ PTR sys_ni_syscall /* res. for afs_syscall */ PTR sys_ni_syscall /* res. for security */ PTR sys_gettid diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index c29ac197f44..30ef88b989a 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -22,7 +22,7 @@ #define handle_sysn32 handle_sys #endif - .align 5 + .align 5 NESTED(handle_sysn32, PT_SIZE, sp) #ifndef CONFIG_MIPS32_O32 .set noat @@ -33,7 +33,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) #endif dsubu t0, v0, __NR_N32_Linux # check syscall number - sltiu t0, t0, __NR_N32_Linux_syscalls + 1 + sltiu t0, t0, __NR_N32_Linux_syscalls + 1 #ifndef CONFIG_MIPS32_O32 ld t1, PT_EPC(sp) # skip syscall on return @@ -279,7 +279,7 @@ EXPORT(sysn32_call_table) PTR sys_quotactl PTR sys_ni_syscall /* was nfsservctl */ PTR sys_ni_syscall /* res. for getpmsg */ - PTR sys_ni_syscall /* 6175 for putpmsg */ + PTR sys_ni_syscall /* 6175 for putpmsg */ PTR sys_ni_syscall /* res. for afs_syscall */ PTR sys_ni_syscall /* res. for security */ PTR sys_gettid @@ -402,8 +402,8 @@ EXPORT(sysn32_call_table) PTR compat_sys_rt_tgsigqueueinfo /* 6295 */ PTR sys_perf_event_open PTR sys_accept4 - PTR compat_sys_recvmmsg - PTR sys_getdents64 + PTR compat_sys_recvmmsg + PTR sys_getdents64 PTR sys_fanotify_init /* 6300 */ PTR sys_fanotify_mark PTR sys_prlimit64 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index cf3e75e4665..42e789562db 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -10,7 +10,7 @@ * * Hairy, the userspace application uses a different argument passing * convention than the kernel, so we have to translate things from o32 - * to ABI64 calling convention. 64-bit syscalls are also processed + * to ABI64 calling convention. 64-bit syscalls are also processed * here for now. */ #include <linux/errno.h> @@ -24,7 +24,7 @@ #include <asm/unistd.h> #include <asm/sysmips.h> - .align 5 + .align 5 NESTED(handle_sys, PT_SIZE, sp) .set noat SAVE_SOME @@ -185,7 +185,7 @@ LEAF(sys32_syscall) jr t2 /* Unreached */ -einval: li v0, -ENOSYS +einval: li v0, -ENOSYS jr ra END(sys32_syscall) @@ -329,7 +329,7 @@ sys_call_table: PTR sys_bdflush PTR sys_sysfs /* 4135 */ PTR sys_32_personality - PTR sys_ni_syscall /* for afs_syscall */ + PTR sys_ni_syscall /* for afs_syscall */ PTR sys_setfsuid PTR sys_setfsgid PTR sys_32_llseek /* 4140 */ @@ -352,12 +352,12 @@ sys_call_table: PTR sys_munlockall PTR sys_sched_setparam PTR sys_sched_getparam - PTR sys_sched_setscheduler /* 4160 */ + PTR sys_sched_setscheduler /* 4160 */ PTR sys_sched_getscheduler PTR sys_sched_yield PTR sys_sched_get_priority_max PTR sys_sched_get_priority_min - PTR sys_32_sched_rr_get_interval /* 4165 */ + PTR sys_32_sched_rr_get_interval /* 4165 */ PTR compat_sys_nanosleep PTR sys_mremap PTR sys_accept @@ -387,7 +387,7 @@ sys_call_table: PTR sys_prctl PTR sys32_rt_sigreturn PTR sys_32_rt_sigaction - PTR sys_32_rt_sigprocmask /* 4195 */ + PTR sys_32_rt_sigprocmask /* 4195 */ PTR sys_32_rt_sigpending PTR compat_sys_rt_sigtimedwait PTR sys_32_rt_sigqueueinfo diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8c41187801c..653197e151d 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -8,7 +8,7 @@ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle * Copyright (C) 1996 Stoned Elipot * Copyright (C) 1999 Silicon Graphics, Inc. - * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki + * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki */ #include <linux/init.h> #include <linux/ioport.h> @@ -449,7 +449,7 @@ static void __init bootmem_init(void) * At this stage the bootmem allocator is ready to use. * * NOTE: historically plat_mem_setup did the entire platform initialization. - * This was rather impractical because it meant plat_mem_setup had to + * This was rather impractical because it meant plat_mem_setup had to * get away without any kind of memory allocator. To keep old code from * breaking plat_setup was just renamed to plat_setup and a second platform * initialization hook for anything else was introduced. @@ -469,7 +469,7 @@ static int __init early_parse_mem(char *p) if (usermem == 0) { boot_mem_map.nr_map = 0; usermem = 1; - } + } start = 0; size = memparse(p, &p); if (*p == '@') @@ -571,7 +571,7 @@ static void __init mips_parse_crashkernel(void) return; crashk_res.start = crash_base; - crashk_res.end = crash_base + crash_size - 1; + crashk_res.end = crash_base + crash_size - 1; } static void __init request_crashkernel(struct resource *res) @@ -585,7 +585,7 @@ static void __init request_crashkernel(struct resource *res) crashk_res.start + 1) >> 20), (unsigned long)(crashk_res.start >> 20)); } -#else /* !defined(CONFIG_KEXEC) */ +#else /* !defined(CONFIG_KEXEC) */ static void __init mips_parse_crashkernel(void) { } diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index b6aa7703501..0f57e06b7fd 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -445,7 +445,7 @@ give_sigsegv: #endif static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, - struct pt_regs *regs, int signr, sigset_t *set, + struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info) { struct rt_sigframe __user *frame; @@ -458,15 +458,15 @@ static int setup_rt_frame(void *sig_return, struct k_sigaction *ka, /* Create siginfo. */ err |= copy_siginfo_to_user(&frame->rs_info, info); - /* Create the ucontext. */ + /* Create the ucontext. */ err |= __put_user(0, &frame->rs_uc.uc_flags); err |= __put_user(NULL, &frame->rs_uc.uc_link); err |= __put_user((void __user *)current->sas_ss_sp, - &frame->rs_uc.uc_stack.ss_sp); + &frame->rs_uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[29]), - &frame->rs_uc.uc_stack.ss_flags); + &frame->rs_uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, - &frame->rs_uc.uc_stack.ss_size); + &frame->rs_uc.uc_stack.ss_size); err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); @@ -506,7 +506,7 @@ struct mips_abi mips_abi = { .setup_frame = setup_frame, .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), #endif - .setup_rt_frame = setup_rt_frame, + .setup_rt_frame = setup_rt_frame, .rt_signal_return_offset = offsetof(struct mips_vdso, rt_signal_trampoline), .restart = __NR_restart_syscall @@ -538,7 +538,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, regs->cp0_epc -= 4; } - regs->regs[0] = 0; /* Don't deal with this again. */ + regs->regs[0] = 0; /* Don't deal with this again. */ } if (sig_uses_siginfo(ka)) @@ -562,7 +562,7 @@ static void do_signal(struct pt_regs *regs) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - /* Whee! Actually deliver the signal. */ + /* Whee! Actually deliver the signal. */ handle_signal(signr, &info, &ka, regs); return; } @@ -583,7 +583,7 @@ static void do_signal(struct pt_regs *regs) regs->cp0_epc -= 4; break; } - regs->regs[0] = 0; /* Don't deal with this again. */ + regs->regs[0] = 0; /* Don't deal with this again. */ } /* diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index da1b56a39ac..cae0b0e42a5 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -48,7 +48,7 @@ extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user /* * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... */ -#define __NR_O32_restart_syscall 4253 +#define __NR_O32_restart_syscall 4253 /* 32-bit compatibility types */ @@ -69,11 +69,11 @@ typedef struct sigaltstack32 { } stack32_t; struct ucontext32 { - u32 uc_flags; - s32 uc_link; - stack32_t uc_stack; + u32 uc_flags; + s32 uc_link; + stack32_t uc_stack; struct sigcontext32 uc_mcontext; - compat_sigset_t uc_sigmask; /* mask last for extensibility */ + compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; struct sigframe32 { @@ -338,7 +338,7 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct sigaction32 __user *, act, return -EFAULT; err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); err |= __put_user((u32)(u64)old_ka.sa.sa_handler, - &oact->sa_handler); + &oact->sa_handler); err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); err |= __put_user(0, &oact->sa_mask.sig[1]); err |= __put_user(0, &oact->sa_mask.sig[2]); @@ -599,16 +599,16 @@ static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka, /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ err |= copy_siginfo_to_user32(&frame->rs_info, info); - /* Create the ucontext. */ + /* Create the ucontext. */ err |= __put_user(0, &frame->rs_uc.uc_flags); err |= __put_user(0, &frame->rs_uc.uc_link); sp = (int) (long) current->sas_ss_sp; err |= __put_user(sp, - &frame->rs_uc.uc_stack.ss_sp); + &frame->rs_uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[29]), - &frame->rs_uc.uc_stack.ss_flags); + &frame->rs_uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, - &frame->rs_uc.uc_stack.ss_size); + &frame->rs_uc.uc_stack.ss_size); err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); @@ -650,7 +650,7 @@ struct mips_abi mips_abi_32 = { .setup_frame = setup_frame_32, .signal_return_offset = offsetof(struct mips_vdso, o32_signal_trampoline), - .setup_rt_frame = setup_rt_frame_32, + .setup_rt_frame = setup_rt_frame_32, .rt_signal_return_offset = offsetof(struct mips_vdso, o32_rt_signal_trampoline), .restart = __NR_O32_restart_syscall @@ -690,7 +690,7 @@ SYSCALL_DEFINE4(32_rt_sigaction, int, sig, return -EFAULT; err |= __put_user((u32)(u64)old_sa.sa.sa_handler, - &oact->sa_handler); + &oact->sa_handler); err |= __put_user(old_sa.sa.sa_flags, &oact->sa_flags); err |= put_sigset(&old_sa.sa.sa_mask, &oact->sa_mask); if (err) diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 3574c145511..7246e33721a 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -59,11 +59,11 @@ typedef struct sigaltstack32 { } stack32_t; struct ucontextn32 { - u32 uc_flags; - s32 uc_link; - stack32_t uc_stack; + u32 uc_flags; + s32 uc_link; + stack32_t uc_stack; struct sigcontext uc_mcontext; - compat_sigset_t uc_sigmask; /* mask last for extensibility */ + compat_sigset_t uc_sigmask; /* mask last for extensibility */ }; struct rt_sigframe_n32 { @@ -162,16 +162,16 @@ static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka, /* Create siginfo. */ err |= copy_siginfo_to_user32(&frame->rs_info, info); - /* Create the ucontext. */ + /* Create the ucontext. */ err |= __put_user(0, &frame->rs_uc.uc_flags); err |= __put_user(0, &frame->rs_uc.uc_link); sp = (int) (long) current->sas_ss_sp; err |= __put_user(sp, - &frame->rs_uc.uc_stack.ss_sp); + &frame->rs_uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->regs[29]), - &frame->rs_uc.uc_stack.ss_flags); + &frame->rs_uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, - &frame->rs_uc.uc_stack.ss_size); + &frame->rs_uc.uc_stack.ss_size); err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); @@ -207,7 +207,7 @@ give_sigsegv: } struct mips_abi mips_abi_n32 = { - .setup_rt_frame = setup_rt_frame_n32, + .setup_rt_frame = setup_rt_frame_n32, .rt_signal_return_offset = offsetof(struct mips_vdso, n32_rt_signal_trampoline), .restart = __NR_N32_restart_syscall diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index 06cd0c610f4..c2e5d74739b 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c @@ -172,7 +172,7 @@ void __init cmp_smp_setup(void) if (amon_cpu_avail(i)) { set_cpu_possible(i, true); __cpu_number_map[i] = ++ncpu; - __cpu_logical_map[ncpu] = i; + __cpu_logical_map[ncpu] = i; } } diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 2defa2bbdaa..bfede063d96 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -71,7 +71,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, /* Record this as available CPU */ set_cpu_possible(tc, true); __cpu_number_map[tc] = ++ncpu; - __cpu_logical_map[ncpu] = tc; + __cpu_logical_map[ncpu] = tc; } /* Disable multi-threading with TC's */ @@ -215,7 +215,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) write_tc_gpr_gp((unsigned long)gp); flush_icache_range((unsigned long)gp, - (unsigned long)(gp + sizeof(struct thread_info))); + (unsigned long)(gp + sizeof(struct thread_info))); /* finally out of configuration and into chaos */ clear_c0_mvpcontrol(MVPCONTROL_VPC); diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S index 20938a4cb52..76016ac0a9c 100644 --- a/arch/mips/kernel/smtc-asm.S +++ b/arch/mips/kernel/smtc-asm.S @@ -65,7 +65,7 @@ FEXPORT(__smtc_ipi_vector) 1: /* * The IPI sender has put some information on the anticipated - * kernel stack frame. If we were in user mode, this will be + * kernel stack frame. If we were in user mode, this will be * built above the saved kernel SP. If we were already in the * kernel, it will be built above the current CPU SP. * diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c index 145771c0ed7..aee7c8177b5 100644 --- a/arch/mips/kernel/smtc-proc.c +++ b/arch/mips/kernel/smtc-proc.c @@ -35,7 +35,7 @@ static struct proc_dir_entry *smtc_stats; atomic_t smtc_fpu_recoveries; static int proc_read_smtc(char *page, char **start, off_t off, - int count, int *eof, void *data) + int count, int *eof, void *data) { int totalen = 0; int len; @@ -68,7 +68,7 @@ static int proc_read_smtc(char *page, char **start, off_t off, page += len; } len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n", - atomic_read(&smtc_fpu_recoveries)); + atomic_read(&smtc_fpu_recoveries)); totalen += len; page += len; @@ -87,5 +87,5 @@ void init_smtc_stats(void) atomic_set(&smtc_fpu_recoveries, 0); smtc_stats = create_proc_read_entry("smtc", 0444, NULL, - proc_read_smtc, NULL); + proc_read_smtc, NULL); } diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 1d47843d3cc..1c152a93dc7 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -235,7 +235,7 @@ static void smtc_configure_tlb(void) mips_ihb(); /* No need to un-Halt - that happens later anyway */ for (i=0; i < vpes; i++) { - write_tc_c0_tcbind(i); + write_tc_c0_tcbind(i); /* * To be 100% sure we're really getting the right * information, we exit the configuration state @@ -286,7 +286,7 @@ static void smtc_configure_tlb(void) /* * Incrementally build the CPU map out of constituent MIPS MT cores, - * using the specified available VPEs and TCs. Plaform code needs + * using the specified available VPEs and TCs. Plaform code needs * to ensure that each MIPS MT core invokes this routine on reset, * one at a time(!). * @@ -348,7 +348,7 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) { /* * FIXME: Multi-core SMTC hasn't been tested and the - * maximum number of VPEs may change. + * maximum number of VPEs may change. */ cp1contexts[0] = smtc_nconf1[0] - 1; cp1contexts[1] = smtc_nconf1[1]; @@ -761,9 +761,9 @@ void smtc_forward_irq(struct irq_data *d) * mask has been purged of bits corresponding to nonexistent and * offline "CPUs", and to TCs bound to VPEs other than the VPE * connected to the physical interrupt input for the interrupt - * in question. Otherwise we have a nasty problem with interrupt + * in question. Otherwise we have a nasty problem with interrupt * mask management. This is best handled in non-performance-critical - * platform IRQ affinity setting code, to minimize interrupt-time + * platform IRQ affinity setting code, to minimize interrupt-time * checks. */ @@ -899,10 +899,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) mips_ihb(); /* - * Inspect TCStatus - if IXMT is set, we have to queue + * Inspect TCStatus - if IXMT is set, we have to queue * a message. Otherwise, we set up the "interrupt" * of the other TC - */ + */ tcstatus = read_tc_c0_tcstatus(); if ((tcstatus & TCSTATUS_IXMT) != 0) { @@ -964,7 +964,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi) * CU bit of Status is indicator that TC was * already running on a kernel stack... */ - if (tcstatus & ST0_CU0) { + if (tcstatus & ST0_CU0) { /* Note that this "- 1" is pointer arithmetic */ kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1; } else { @@ -1288,7 +1288,7 @@ void smtc_idle_loop_hook(void) for (tc = 0; tc < hook_ntcs; tc++) { tcnoprog[tc] = 0; clock_hang_reported[tc] = 0; - } + } for (vpe = 0; vpe < 2; vpe++) for (im = 0; im < 8; im++) imstuckcount[vpe][im] = 0; @@ -1485,7 +1485,7 @@ static int halt_state_save[NR_CPUS]; /* * To really, really be sure that nothing is being done - * by other TCs, halt them all. This code assumes that + * by other TCs, halt them all. This code assumes that * a DVPE has already been done, so while their Halted * state is theoretically architecturally unstable, in * practice, it's not going to change while we're looking diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index 7f1eca3858d..1ff43d5ac2c 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -25,7 +25,7 @@ static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0); static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0); static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); -#define COUNTON 100 +#define COUNTON 100 #define NR_LOOPS 5 void __cpuinit synchronise_count_master(int cpu) diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 107307d583e..d7feee0c273 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -41,9 +41,9 @@ /* * For historic reasons the pipe(2) syscall on MIPS has an unusual calling - * convention. It returns results in registers $v0 / $v1 which means there + * convention. It returns results in registers $v0 / $v1 which means there * is no need for it to do verify the validity of a userspace pointer - * argument. Historically that used to be expensive in Linux. These days + * argument. Historically that used to be expensive in Linux. These days * the performance advantage is negligible. */ asmlinkage int sysm_pipe(nabi_no_regargs volatile struct pt_regs regs) @@ -124,7 +124,7 @@ _sys_clone(nabi_no_regargs struct pt_regs regs) child_tidptr = (int __user *) regs.regs[8]; #endif return do_fork(clone_flags, newsp, 0, - parent_tidptr, child_tidptr); + parent_tidptr, child_tidptr); } SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 99d73b72b00..9d686bf97b0 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -5,8 +5,8 @@ * * Common time service routines for MIPS machines. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ @@ -62,8 +62,8 @@ EXPORT_SYMBOL(perf_irq); * time_init() - it does the following things. * * 1) plat_time_init() - - * a) (optional) set up RTC routines, - * b) (optional) calibrate and set the mips_hpt_frequency + * a) (optional) set up RTC routines, + * b) (optional) calibrate and set the mips_hpt_frequency * (only needed if you intended to use cpu counter as timer interrupt * source) * 2) calculate a couple of cached variables for later usage @@ -75,7 +75,7 @@ unsigned int mips_hpt_frequency; * This function exists in order to cause an error due to a duplicate * definition if platform code should have its own implementation. The hook * to use instead is plat_time_init. plat_time_init does not receive the - * irqaction pointer argument anymore. This is because any function which + * irqaction pointer argument anymore. This is because any function which * initializes an interrupt timer now takes care of its own request_irq rsp. * setup_irq calls and each clock_event_device should use its own * struct irqrequest. @@ -93,7 +93,7 @@ static __init int cpu_has_mfc0_count_bug(void) case CPU_R4000MC: /* * V3.0 is documented as suffering from the mfc0 from count bug. - * Afaik this is the last version of the R4000. Later versions + * Afaik this is the last version of the R4000. Later versions * were marketed as R4400. */ return 1; diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e3a5f3ddab1..08c07bdb7e6 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -164,7 +164,7 @@ static void show_stacktrace(struct task_struct *task, i = 0; while ((unsigned long) sp & (PAGE_SIZE - 1)) { if (i && ((i % (64 / field)) == 0)) - printk("\n "); + printk("\n "); if (i > 39) { printk(" ..."); break; @@ -279,7 +279,7 @@ static void __show_regs(const struct pt_regs *regs) printk("ra : %0*lx %pS\n", field, regs->regs[31], (void *) regs->regs[31]); - printk("Status: %08x ", (uint32_t) regs->cp0_status); + printk("Status: %08x ", (uint32_t) regs->cp0_status); if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { if (regs->cp0_status & ST0_KUO) @@ -441,7 +441,7 @@ asmlinkage void do_be(struct pt_regs *regs) int data = regs->cp0_cause & 4; int action = MIPS_BE_FATAL; - /* XXX For now. Fixme, this searches the wrong table ... */ + /* XXX For now. Fixme, this searches the wrong table ... */ if (data && !user_mode(regs)) fixup = search_dbe_tables(exception_epc(regs)); @@ -739,7 +739,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* Restore the hardware register state */ - own_fpu(1); /* Using the FPU again. */ + own_fpu(1); /* Using the FPU again. */ /* If something went wrong, signal */ process_fpemu_return(sig, fault_addr); @@ -966,7 +966,7 @@ int cu2_notifier_call_chain(unsigned long val, void *v) } static int default_cu2_call(struct notifier_block *nfb, unsigned long action, - void *data) + void *data) { struct pt_regs *regs = data; @@ -974,7 +974,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, default: die_if_kernel("Unhandled kernel unaligned access or invalid " "instruction", regs); - /* Fall through */ + /* Fall through */ case CU2_EXCEPTION: force_sig(SIGILL, current); @@ -1029,10 +1029,10 @@ asmlinkage void do_cpu(struct pt_regs *regs) /* * Old (MIPS I and MIPS II) processors will set this code * for COP1X opcode instructions that replaced the original - * COP3 space. We don't limit COP1 space instructions in + * COP3 space. We don't limit COP1 space instructions in * the emulator according to the CPU ISA, so we want to * treat COP1X instructions consistently regardless of which - * code the CPU chose. Therefore we redirect this trap to + * code the CPU chose. Therefore we redirect this trap to * the FP emulator too. * * Then some newer FPU-less processors use this code @@ -1044,9 +1044,9 @@ asmlinkage void do_cpu(struct pt_regs *regs) /* Fall through. */ case 1: - if (used_math()) /* Using the FPU again. */ + if (used_math()) /* Using the FPU again. */ own_fpu(1); - else { /* First time FPU user. */ + else { /* First time FPU user. */ init_fpu(); set_used_math(); } @@ -1114,7 +1114,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) show_regs(regs); if (multi_match) { - printk("Index : %0x\n", read_c0_index()); + printk("Index : %0x\n", read_c0_index()); printk("Pagemask: %0x\n", read_c0_pagemask()); printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); @@ -1181,7 +1181,7 @@ asmlinkage void do_dsp(struct pt_regs *regs) asmlinkage void do_reserved(struct pt_regs *regs) { /* - * Game over - no way to handle this if it ever occurs. Most probably + * Game over - no way to handle this if it ever occurs. Most probably * caused by a new unknown cpu type or after another deadly * hard/software error. */ @@ -1705,7 +1705,7 @@ void __init trap_init(void) #if defined(CONFIG_KGDB) if (kgdb_early_setup) - return; /* Already done */ + return; /* Already done */ #endif if (cpu_has_veic || cpu_has_vint) { @@ -1799,7 +1799,7 @@ void __init trap_init(void) * The R6000 is the only R-series CPU that features a machine * check exception (similar to the R4000 cache error) and * unaligned ldc1/sdc1 exception. The handlers have not been - * written yet. Well, anyway there is no R6000 machine on the + * written yet. Well, anyway there is no R6000 machine on the * current list of targets for Linux/MIPS. * (Duh, crap, there is someone with a triple R6k machine) */ diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 9c58bdf58f2..6087a54c86a 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -21,11 +21,11 @@ * * For now I enable fixing of address errors by default to make life easier. * I however intend to disable this somewhen in the future when the alignment - * problems with user programs have been fixed. For programmers this is the + * problems with user programs have been fixed. For programmers this is the * right way to go. * * Fixing address errors is a per process option. The option is inherited - * across fork(2) and execve(2) calls. If you really want to use the + * across fork(2) and execve(2) calls. If you really want to use the * option in your user programs - I discourage the use of the software * emulation strongly - use the following code in your userland stuff: * @@ -43,34 +43,34 @@ * #include <sys/sysmips.h> * * struct foo { - * unsigned char bar[8]; + * unsigned char bar[8]; * }; * * main(int argc, char *argv[]) * { - * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; - * unsigned int *p = (unsigned int *) (x.bar + 3); - * int i; + * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; + * unsigned int *p = (unsigned int *) (x.bar + 3); + * int i; * - * if (argc > 1) - * sysmips(MIPS_FIXADE, atoi(argv[1])); + * if (argc > 1) + * sysmips(MIPS_FIXADE, atoi(argv[1])); * - * printf("*p = %08lx\n", *p); + * printf("*p = %08lx\n", *p); * - * *p = 0xdeadface; + * *p = 0xdeadface; * - * for(i = 0; i <= 7; i++) - * printf("%02x ", x.bar[i]); - * printf("\n"); + * for(i = 0; i <= 7; i++) + * printf("%02x ", x.bar[i]); + * printf("\n"); * } * * Coprocessor loads are not supported; I think this case is unimportant * in the practice. * * TODO: Handle ndc (attempted store to doubleword in uncached memory) - * exception for the R6000. - * A store crossing a page boundary might be executed only partially. - * Undo the partial store in this case. + * exception for the R6000. + * A store crossing a page boundary might be executed only partially. + * Undo the partial store in this case. */ #include <linux/mm.h> #include <linux/signal.h> @@ -86,7 +86,7 @@ #include <asm/inst.h> #include <asm/uaccess.h> -#define STR(x) __STR(x) +#define STR(x) __STR(x) #define __STR(x) #x enum { diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 0a4336b803e..05826d20a79 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -22,12 +22,12 @@ PHDRS { #ifdef CONFIG_32BIT #ifdef CONFIG_CPU_LITTLE_ENDIAN - jiffies = jiffies_64; + jiffies = jiffies_64; #else - jiffies = jiffies_64 + 4; + jiffies = jiffies_64 + 4; #endif #else - jiffies = jiffies_64; + jiffies = jiffies_64; #endif SECTIONS @@ -139,7 +139,7 @@ SECTIONS /* * Force .bss to 64K alignment so that .bss..swapper_pg_dir - * gets that alignment. .sbss should be empty, so there will be + * gets that alignment. .sbss should be empty, so there will be * no holes after __init_end. */ BSS_SECTION(0, 0x10000, 0) diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 147cec19621..32fc5d4a22e 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -254,7 +254,7 @@ static void __maybe_unused dump_mtregs(void) val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT); } -/* Find some VPE program space */ +/* Find some VPE program space */ static void *alloc_progmem(unsigned long len) { void *addr; @@ -292,7 +292,7 @@ static long get_offset(unsigned long *size, Elf_Shdr * sechdr) } /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld - might -- code, read-only data, read-write data, small data. Tally + might -- code, read-only data, read-write data, small data. Tally sizes, and place the offsets into sh_entsize fields: high bit means it belongs in init. */ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, @@ -386,7 +386,7 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location, if( (rel > 32768) || (rel < -32768) ) { printk(KERN_DEBUG "VPE loader: " - "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); + "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); return -ENOEXEC; } @@ -458,7 +458,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, Elf32_Addr val, vallo; struct mips_hi16 *l, *next; - /* Sign extend the addend we extract from the lo insn. */ + /* Sign extend the addend we extract from the lo insn. */ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; if (mips_hi16_list != NULL) { @@ -470,7 +470,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, /* * The value for the HI16 had best be the same. */ - if (v != l->value) { + if (v != l->value) { printk(KERN_DEBUG "VPE loader: " "apply_r_mips_lo16/hi16: \t" "inconsistent value information\n"); @@ -505,7 +505,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, } /* - * Ok, we're done with the HI16 relocs. Now deal with the LO16. + * Ok, we're done with the HI16 relocs. Now deal with the LO16. */ val = v + vallo; insnlo = (insnlo & ~0xffff) | (val & 0xffff); @@ -579,7 +579,7 @@ static int apply_relocations(Elf32_Shdr *sechdrs, res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); if( res ) { char *r = rstrs[ELF32_R_TYPE(r_info)]; - printk(KERN_WARNING "VPE loader: .text+0x%x " + printk(KERN_WARNING "VPE loader: .text+0x%x " "relocation type %s for symbol \"%s\" failed\n", rel[i].r_offset, r ? r : "UNKNOWN", strtab + sym->st_name); @@ -772,7 +772,7 @@ static int vpe_run(struct vpe * v) /* Set up the XTC bit in vpeconf0 to point at our tc */ write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) - | (t->index << VPECONF0_XTC_SHIFT)); + | (t->index << VPECONF0_XTC_SHIFT)); back_to_back_c0_hazard(); @@ -926,34 +926,34 @@ static int vpe_elfload(struct vpe * v) secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr); } - /* Fix up syms, so that st_value is a pointer to location. */ - simplify_symbols(sechdrs, symindex, strtab, secstrings, - hdr->e_shnum, &mod); - - /* Now do relocations. */ - for (i = 1; i < hdr->e_shnum; i++) { - const char *strtab = (char *)sechdrs[strindex].sh_addr; - unsigned int info = sechdrs[i].sh_info; - - /* Not a valid relocation section? */ - if (info >= hdr->e_shnum) - continue; - - /* Don't bother with non-allocated sections */ - if (!(sechdrs[info].sh_flags & SHF_ALLOC)) - continue; - - if (sechdrs[i].sh_type == SHT_REL) - err = apply_relocations(sechdrs, strtab, symindex, i, - &mod); - else if (sechdrs[i].sh_type == SHT_RELA) - err = apply_relocate_add(sechdrs, strtab, symindex, i, - &mod); - if (err < 0) - return err; - - } - } else { + /* Fix up syms, so that st_value is a pointer to location. */ + simplify_symbols(sechdrs, symindex, strtab, secstrings, + hdr->e_shnum, &mod); + + /* Now do relocations. */ + for (i = 1; i < hdr->e_shnum; i++) { + const char *strtab = (char *)sechdrs[strindex].sh_addr; + unsigned int info = sechdrs[i].sh_info; + + /* Not a valid relocation section? */ + if (info >= hdr->e_shnum) + continue; + + /* Don't bother with non-allocated sections */ + if (!(sechdrs[info].sh_flags & SHF_ALLOC)) + continue; + + if (sechdrs[i].sh_type == SHT_REL) + err = apply_relocations(sechdrs, strtab, symindex, i, + &mod); + else if (sechdrs[i].sh_type == SHT_RELA) + err = apply_relocate_add(sechdrs, strtab, symindex, i, + &mod); + if (err < 0) + return err; + + } + } else { struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); for (i = 0; i < hdr->e_phnum; i++) { @@ -968,16 +968,16 @@ static int vpe_elfload(struct vpe * v) } for (i = 0; i < hdr->e_shnum; i++) { - /* Internal symbols and strings. */ - if (sechdrs[i].sh_type == SHT_SYMTAB) { - symindex = i; - strindex = sechdrs[i].sh_link; - strtab = (char *)hdr + sechdrs[strindex].sh_offset; - - /* mark the symtab's address for when we try to find the - magic symbols */ - sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; - } + /* Internal symbols and strings. */ + if (sechdrs[i].sh_type == SHT_SYMTAB) { + symindex = i; + strindex = sechdrs[i].sh_link; + strtab = (char *)hdr + sechdrs[strindex].sh_offset; + + /* mark the symtab's address for when we try to find the + magic symbols */ + sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; + } } } @@ -1049,7 +1049,7 @@ static int getcwd(char *buff, int size) return ret; } -/* checks VPE is unused and gets ready to load program */ +/* checks VPE is unused and gets ready to load program */ static int vpe_open(struct inode *inode, struct file *filp) { enum vpe_state state; @@ -1121,11 +1121,11 @@ static int vpe_release(struct inode *inode, struct file *filp) if (vpe_elfload(v) >= 0) { vpe_run(v); } else { - printk(KERN_WARNING "VPE loader: ELF load failed.\n"); + printk(KERN_WARNING "VPE loader: ELF load failed.\n"); ret = -ENOEXEC; } } else { - printk(KERN_WARNING "VPE loader: only elf files are supported\n"); + printk(KERN_WARNING "VPE loader: only elf files are supported\n"); ret = -ENOEXEC; } diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c index c1540696803..7726f6157d9 100644 --- a/arch/mips/kernel/watch.c +++ b/arch/mips/kernel/watch.c @@ -12,7 +12,7 @@ #include <asm/watch.h> /* - * Install the watch registers for the current thread. A maximum of + * Install the watch registers for the current thread. A maximum of * four registers are installed although the machine may have more. */ void mips_install_watch_registers(void) @@ -72,7 +72,7 @@ void mips_read_watch_registers(void) } /* - * Disable all watch registers. Although only four registers are + * Disable all watch registers. Although only four registers are * installed, all are cleared to eliminate the possibility of endless * looping in the watch handler. */ |