diff options
Diffstat (limited to 'arch/blackfin/kernel')
28 files changed, 2338 insertions, 1369 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index a8ddbc8ed5a..30d0d1f01dc 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile @@ -7,7 +7,8 @@ extra-y := init_task.o vmlinux.lds obj-y := \ entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \ - fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o + fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o \ + exception.o dumpstack.o ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y) obj-y += time-ts.o @@ -25,9 +26,12 @@ obj-$(CONFIG_CPLB_INFO) += cplbinfo.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o +obj-$(CONFIG_NMI_WATCHDOG) += nmi.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_DEBUG_VERBOSE) += trace.o +obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o # the kgdb test puts code into L2 and without linker # relaxation, we need to force long calls to/from it diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 924c00286ba..26403d1c9e6 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c @@ -91,7 +91,7 @@ late_initcall(proc_dma_init); */ int request_dma(unsigned int channel, const char *device_id) { - pr_debug("request_dma() : BEGIN \n"); + pr_debug("request_dma() : BEGIN\n"); if (device_id == NULL) printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); @@ -107,7 +107,7 @@ int request_dma(unsigned int channel, const char *device_id) #endif if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) { - pr_debug("DMA CHANNEL IN USE \n"); + pr_debug("DMA CHANNEL IN USE\n"); return -EBUSY; } @@ -131,7 +131,7 @@ int request_dma(unsigned int channel, const char *device_id) * you have to request DMA, before doing any operations on * descriptor/channel */ - pr_debug("request_dma() : END \n"); + pr_debug("request_dma() : END\n"); return 0; } EXPORT_SYMBOL(request_dma); @@ -171,7 +171,7 @@ static void clear_dma_buffer(unsigned int channel) void free_dma(unsigned int channel) { - pr_debug("freedma() : BEGIN \n"); + pr_debug("freedma() : BEGIN\n"); BUG_ON(channel >= MAX_DMA_CHANNELS || !atomic_read(&dma_ch[channel].chan_status)); @@ -185,7 +185,7 @@ void free_dma(unsigned int channel) /* Clear the DMA Variable in the Channel */ atomic_set(&dma_ch[channel].chan_status, 0); - pr_debug("freedma() : END \n"); + pr_debug("freedma() : END\n"); } EXPORT_SYMBOL(free_dma); diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c index a174596cc00..42833ee2b30 100644 --- a/arch/blackfin/kernel/bfin_gpio.c +++ b/arch/blackfin/kernel/bfin_gpio.c @@ -475,9 +475,7 @@ GET_GPIO_P(maskb) #ifdef CONFIG_PM - static unsigned short wakeup_map[GPIO_BANK_NUM]; -static unsigned char wakeup_flags_map[MAX_BLACKFIN_GPIOS]; static const unsigned int sic_iwr_irqs[] = { #if defined(BF533_FAMILY) @@ -514,112 +512,26 @@ static const unsigned int sic_iwr_irqs[] = { ************************************************************* * MODIFICATION HISTORY : **************************************************************/ -int gpio_pm_wakeup_request(unsigned gpio, unsigned char type) -{ - unsigned long flags; - - if ((check_gpio(gpio) < 0) || !type) - return -EINVAL; - - local_irq_save_hw(flags); - wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio); - wakeup_flags_map[gpio] = type; - local_irq_restore_hw(flags); - - return 0; -} -EXPORT_SYMBOL(gpio_pm_wakeup_request); - -void gpio_pm_wakeup_free(unsigned gpio) +int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl) { unsigned long flags; if (check_gpio(gpio) < 0) - return; + return -EINVAL; local_irq_save_hw(flags); - - wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); - - local_irq_restore_hw(flags); -} -EXPORT_SYMBOL(gpio_pm_wakeup_free); - -static int bfin_gpio_wakeup_type(unsigned gpio, unsigned char type) -{ - port_setup(gpio, GPIO_USAGE); - set_gpio_dir(gpio, 0); - set_gpio_inen(gpio, 1); - - if (type & (PM_WAKE_RISING | PM_WAKE_FALLING)) - set_gpio_edge(gpio, 1); - else - set_gpio_edge(gpio, 0); - - if ((type & (PM_WAKE_BOTH_EDGES)) == (PM_WAKE_BOTH_EDGES)) - set_gpio_both(gpio, 1); + if (ctrl) + wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio); else - set_gpio_both(gpio, 0); - - if ((type & (PM_WAKE_FALLING | PM_WAKE_LOW))) - set_gpio_polar(gpio, 1); - else - set_gpio_polar(gpio, 0); - - SSYNC(); - - return 0; -} + wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); -u32 bfin_pm_standby_setup(void) -{ - u16 bank, mask, i, gpio; - - for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { - mask = wakeup_map[gpio_bank(i)]; - bank = gpio_bank(i); - - gpio_bank_saved[bank].maskb = gpio_array[bank]->maskb; - gpio_array[bank]->maskb = 0; - - if (mask) { -#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x) - gpio_bank_saved[bank].fer = *port_fer[bank]; -#endif - gpio_bank_saved[bank].inen = gpio_array[bank]->inen; - gpio_bank_saved[bank].polar = gpio_array[bank]->polar; - gpio_bank_saved[bank].dir = gpio_array[bank]->dir; - gpio_bank_saved[bank].edge = gpio_array[bank]->edge; - gpio_bank_saved[bank].both = gpio_array[bank]->both; - gpio_bank_saved[bank].reserved = - reserved_gpio_map[bank]; - - gpio = i; - - while (mask) { - if ((mask & 1) && (wakeup_flags_map[gpio] != - PM_WAKE_IGNORE)) { - reserved_gpio_map[gpio_bank(gpio)] |= - gpio_bit(gpio); - bfin_gpio_wakeup_type(gpio, - wakeup_flags_map[gpio]); - set_gpio_data(gpio, 0); /*Clear*/ - } - gpio++; - mask >>= 1; - } - - bfin_internal_set_wake(sic_iwr_irqs[bank], 1); - gpio_array[bank]->maskb_set = wakeup_map[gpio_bank(i)]; - } - } - - AWA_DUMMY_READ(maskb_set); + set_gpio_maskb(gpio, ctrl); + local_irq_restore_hw(flags); return 0; } -void bfin_pm_standby_restore(void) +int bfin_pm_standby_ctrl(unsigned ctrl) { u16 bank, mask, i; @@ -627,24 +539,10 @@ void bfin_pm_standby_restore(void) mask = wakeup_map[gpio_bank(i)]; bank = gpio_bank(i); - if (mask) { -#if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x) - *port_fer[bank] = gpio_bank_saved[bank].fer; -#endif - gpio_array[bank]->inen = gpio_bank_saved[bank].inen; - gpio_array[bank]->dir = gpio_bank_saved[bank].dir; - gpio_array[bank]->polar = gpio_bank_saved[bank].polar; - gpio_array[bank]->edge = gpio_bank_saved[bank].edge; - gpio_array[bank]->both = gpio_bank_saved[bank].both; - - reserved_gpio_map[bank] = - gpio_bank_saved[bank].reserved; - bfin_internal_set_wake(sic_iwr_irqs[bank], 0); - } - - gpio_array[bank]->maskb = gpio_bank_saved[bank].maskb; + if (mask) + bfin_internal_set_wake(sic_iwr_irqs[bank], ctrl); } - AWA_DUMMY_READ(maskb); + return 0; } void bfin_gpio_pm_hibernate_suspend(void) @@ -708,16 +606,11 @@ void bfin_gpio_pm_hibernate_restore(void) #else /* CONFIG_BF54x */ #ifdef CONFIG_PM -u32 bfin_pm_standby_setup(void) +int bfin_pm_standby_ctrl(unsigned ctrl) { return 0; } -void bfin_pm_standby_restore(void) -{ - -} - void bfin_gpio_pm_hibernate_suspend(void) { int i, bank; @@ -1289,44 +1182,50 @@ __initcall(gpio_register_proc); #endif #ifdef CONFIG_GPIOLIB -int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio) +static int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio) { return bfin_gpio_direction_input(gpio); } -int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level) +static int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level) { return bfin_gpio_direction_output(gpio, level); } -int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio) +static int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio) { return bfin_gpio_get_value(gpio); } -void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value) +static void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value) { return bfin_gpio_set_value(gpio, value); } -int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio) +static int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio) { return bfin_gpio_request(gpio, chip->label); } -void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) +static void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) { return bfin_gpio_free(gpio); } +static int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) +{ + return gpio + GPIO_IRQ_BASE; +} + static struct gpio_chip bfin_chip = { - .label = "Blackfin-GPIOlib", + .label = "BFIN-GPIO", .direction_input = bfin_gpiolib_direction_input, .get = bfin_gpiolib_get_value, .direction_output = bfin_gpiolib_direction_output, .set = bfin_gpiolib_set_value, .request = bfin_gpiolib_gpio_request, .free = bfin_gpiolib_gpio_free, + .to_irq = bfin_gpiolib_gpio_to_irq, .base = 0, .ngpio = MAX_BLACKFIN_GPIOS, }; diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index ed8392c117e..2c264b51566 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c @@ -33,6 +33,18 @@ EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memchr); /* + * Because string functions are both inline and exported functions and + * folder arch/blackfin/lib is configured as a library path in Makefile, + * symbols exported in folder lib is not linked into built-in.o but + * inlined only. In order to export string symbols to kernel module + * properly, they should be exported here. + */ +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strncpy); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strncmp); + +/* * libgcc functions - functions that are used internally by the * compiler... (prototypes are not correct though, but that * doesn't really matter since they're not versioned). diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c index 8d42b9e50df..30fd6417f06 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c @@ -64,6 +64,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); } +#ifdef CONFIG_ROMKERNEL + /* Cover kernel XIP flash area */ + addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1); + dcplb_tbl[cpu][i_d].addr = addr; + dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD; + icplb_tbl[cpu][i_i].addr = addr; + icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD; +#endif + /* Cover L1 memory. One 4M area for code and data each is enough. */ #if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0 dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu); diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index 930c01c0681..87b25b1b30e 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c @@ -31,6 +31,12 @@ int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS]; int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS]; int nr_cplb_flush[NR_CPUS]; +#ifdef CONFIG_EXCPT_IRQ_SYSC_L1 +#define MGR_ATTR __attribute__((l1_text)) +#else +#define MGR_ATTR +#endif + /* * Given the contents of the status register, return the index of the * CPLB that caused the fault. @@ -59,7 +65,7 @@ static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS]; /* * Find an ICPLB entry to be evicted and return its index. */ -static int evict_one_icplb(unsigned int cpu) +MGR_ATTR static int evict_one_icplb(unsigned int cpu) { int i; for (i = first_switched_icplb; i < MAX_CPLBS; i++) @@ -74,7 +80,7 @@ static int evict_one_icplb(unsigned int cpu) return i; } -static int evict_one_dcplb(unsigned int cpu) +MGR_ATTR static int evict_one_dcplb(unsigned int cpu) { int i; for (i = first_switched_dcplb; i < MAX_CPLBS; i++) @@ -89,7 +95,7 @@ static int evict_one_dcplb(unsigned int cpu) return i; } -static noinline int dcplb_miss(unsigned int cpu) +MGR_ATTR static noinline int dcplb_miss(unsigned int cpu) { unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); int status = bfin_read_DCPLB_STATUS(); @@ -114,10 +120,15 @@ static noinline int dcplb_miss(unsigned int cpu) d_data = L2_DMEMORY; } else if (addr >= physical_mem_end) { if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { - addr &= ~(4 * 1024 * 1024 - 1); - d_data &= ~PAGE_SIZE_4KB; - d_data |= PAGE_SIZE_4MB; - d_data |= CPLB_USER_RD | CPLB_USER_WR; + mask = current_rwx_mask[cpu]; + if (mask) { + int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT; + int idx = page >> 5; + int bit = 1 << (page & 31); + + if (mask[idx] & bit) + d_data |= CPLB_USER_RD; + } } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) { addr &= ~(1 * 1024 * 1024 - 1); @@ -126,7 +137,9 @@ static noinline int dcplb_miss(unsigned int cpu) } else return CPLB_PROT_VIOL; } else if (addr >= _ramend) { - d_data |= CPLB_USER_RD | CPLB_USER_WR; + d_data |= CPLB_USER_RD | CPLB_USER_WR; + if (reserved_mem_dcache_on) + d_data |= CPLB_L1_CHBL; } else { mask = current_rwx_mask[cpu]; if (mask) { @@ -156,7 +169,7 @@ static noinline int dcplb_miss(unsigned int cpu) return 0; } -static noinline int icplb_miss(unsigned int cpu) +MGR_ATTR static noinline int icplb_miss(unsigned int cpu) { unsigned long addr = bfin_read_ICPLB_FAULT_ADDR(); int status = bfin_read_ICPLB_STATUS(); @@ -204,10 +217,19 @@ static noinline int icplb_miss(unsigned int cpu) i_data = L2_IMEMORY; } else if (addr >= physical_mem_end) { if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { - addr &= ~(4 * 1024 * 1024 - 1); - i_data &= ~PAGE_SIZE_4KB; - i_data |= PAGE_SIZE_4MB; - i_data |= CPLB_USER_RD; + if (!(status & FAULT_USERSUPV)) { + unsigned long *mask = current_rwx_mask[cpu]; + + if (mask) { + int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT; + int idx = page >> 5; + int bit = 1 << (page & 31); + + mask += 2 * page_mask_nelts; + if (mask[idx] & bit) + i_data |= CPLB_USER_RD; + } + } } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH && (status & FAULT_USERSUPV)) { addr &= ~(1 * 1024 * 1024 - 1); @@ -217,6 +239,8 @@ static noinline int icplb_miss(unsigned int cpu) return CPLB_PROT_VIOL; } else if (addr >= _ramend) { i_data |= CPLB_USER_RD; + if (reserved_mem_icache_on) + i_data |= CPLB_L1_CHBL; } else { /* * Two cases to distinguish - a supervisor access must @@ -251,7 +275,7 @@ static noinline int icplb_miss(unsigned int cpu) return 0; } -static noinline int dcplb_protection_fault(unsigned int cpu) +MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu) { int status = bfin_read_DCPLB_STATUS(); @@ -271,7 +295,7 @@ static noinline int dcplb_protection_fault(unsigned int cpu) return CPLB_PROT_VIOL; } -int cplb_hdr(int seqstat, struct pt_regs *regs) +MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs) { int cause = seqstat & 0x3f; unsigned int cpu = raw_smp_processor_id(); diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c index 282a7919821..bfe75af4e8b 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c @@ -56,6 +56,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; } +#ifdef CONFIG_ROMKERNEL + /* Cover kernel XIP flash area */ + addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1); + d_tbl[i_d].addr = addr; + d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB; + i_tbl[i_i].addr = addr; + i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; +#endif + /* Cover L1 memory. One 4M area for code and data each is enough. */ if (cpu == 0) { if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) { diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index e937f323d82..04ddcfeb798 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c @@ -116,7 +116,7 @@ EXPORT_SYMBOL(dma_free_coherent); void __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) { - _dma_sync(addr, size, dir); + __dma_sync_inline(addr, size, dir); } EXPORT_SYMBOL(__dma_sync); diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c new file mode 100644 index 00000000000..5cfbaa29821 --- /dev/null +++ b/arch/blackfin/kernel/dumpstack.c @@ -0,0 +1,174 @@ +/* Provide basic stack dumping functions + * + * Copyright 2004-2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later + */ + +#include <linux/kernel.h> +#include <linux/thread_info.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <asm/trace.h> + +/* + * Checks to see if the address pointed to is either a + * 16-bit CALL instruction, or a 32-bit CALL instruction + */ +static bool is_bfin_call(unsigned short *addr) +{ + unsigned int opcode; + + if (!get_instruction(&opcode, addr)) + return false; + + if ((opcode >= 0x0060 && opcode <= 0x0067) || + (opcode >= 0x0070 && opcode <= 0x0077) || + (opcode >= 0xE3000000 && opcode <= 0xE3FFFFFF)) + return true; + + return false; + +} + +void show_stack(struct task_struct *task, unsigned long *stack) +{ +#ifdef CONFIG_PRINTK + unsigned int *addr, *endstack, *fp = 0, *frame; + unsigned short *ins_addr; + char buf[150]; + unsigned int i, j, ret_addr, frame_no = 0; + + /* + * If we have been passed a specific stack, use that one otherwise + * if we have been passed a task structure, use that, otherwise + * use the stack of where the variable "stack" exists + */ + + if (stack == NULL) { + if (task) { + /* We know this is a kernel stack, so this is the start/end */ + stack = (unsigned long *)task->thread.ksp; + endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE); + } else { + /* print out the existing stack info */ + stack = (unsigned long *)&stack; + endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); + } + } else + endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); + + printk(KERN_NOTICE "Stack info:\n"); + decode_address(buf, (unsigned int)stack); + printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); + + if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) { + printk(KERN_NOTICE "Invalid stack pointer\n"); + return; + } + + /* First thing is to look for a frame pointer */ + for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { + if (*addr & 0x1) + continue; + ins_addr = (unsigned short *)*addr; + ins_addr--; + if (is_bfin_call(ins_addr)) + fp = addr - 1; + + if (fp) { + /* Let's check to see if it is a frame pointer */ + while (fp >= (addr - 1) && fp < endstack + && fp && ((unsigned int) fp & 0x3) == 0) + fp = (unsigned int *)*fp; + if (fp == 0 || fp == endstack) { + fp = addr - 1; + break; + } + fp = 0; + } + } + if (fp) { + frame = fp; + printk(KERN_NOTICE " FP: (0x%p)\n", fp); + } else + frame = 0; + + /* + * Now that we think we know where things are, we + * walk the stack again, this time printing things out + * incase there is no frame pointer, we still look for + * valid return addresses + */ + + /* First time print out data, next time, print out symbols */ + for (j = 0; j <= 1; j++) { + if (j) + printk(KERN_NOTICE "Return addresses in stack:\n"); + else + printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack); + + fp = frame; + frame_no = 0; + + for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0; + addr < endstack; addr++, i++) { + + ret_addr = 0; + if (!j && i % 8 == 0) + printk(KERN_NOTICE "%p:", addr); + + /* if it is an odd address, or zero, just skip it */ + if (*addr & 0x1 || !*addr) + goto print; + + ins_addr = (unsigned short *)*addr; + + /* Go back one instruction, and see if it is a CALL */ + ins_addr--; + ret_addr = is_bfin_call(ins_addr); + print: + if (!j && stack == (unsigned long *)addr) + printk("[%08x]", *addr); + else if (ret_addr) + if (j) { + decode_address(buf, (unsigned int)*addr); + if (frame == addr) { + printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf); + continue; + } + printk(KERN_NOTICE " address : %s\n", buf); + } else + printk("<%08x>", *addr); + else if (fp == addr) { + if (j) + frame = addr+1; + else + printk("(%08x)", *addr); + + fp = (unsigned int *)*addr; + frame_no++; + + } else if (!j) + printk(" %08x ", *addr); + } + if (!j) + printk("\n"); + } +#endif +} +EXPORT_SYMBOL(show_stack); + +void dump_stack(void) +{ + unsigned long stack; +#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON + int tflags; +#endif + trace_buffer_save(tflags); + dump_bfin_trace_buffer(); + show_stack(current, &stack); + trace_buffer_restore(tflags); +} +EXPORT_SYMBOL(dump_stack); diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S index f27dc2292e1..686478f5f66 100644 --- a/arch/blackfin/kernel/entry.S +++ b/arch/blackfin/kernel/entry.S @@ -44,7 +44,7 @@ ENTRY(_ret_from_fork) sti r4; #endif /* CONFIG_IPIPE */ SP += -12; - call _schedule_tail; + pseudo_long_call _schedule_tail, p5; SP += 12; r0 = [sp + PT_IPEND]; cc = bittst(r0,1); @@ -79,7 +79,7 @@ ENTRY(_sys_vfork) r0 += 24; [--sp] = rets; SP += -12; - call _bfin_vfork; + pseudo_long_call _bfin_vfork, p2; SP += 12; rets = [sp++]; rts; @@ -90,7 +90,7 @@ ENTRY(_sys_clone) r0 += 24; [--sp] = rets; SP += -12; - call _bfin_clone; + pseudo_long_call _bfin_clone, p2; SP += 12; rets = [sp++]; rts; @@ -101,7 +101,7 @@ ENTRY(_sys_rt_sigreturn) r0 += 24; [--sp] = rets; SP += -12; - call _do_rt_sigreturn; + pseudo_long_call _do_rt_sigreturn, p2; SP += 12; rets = [sp++]; rts; diff --git a/arch/blackfin/kernel/exception.c b/arch/blackfin/kernel/exception.c new file mode 100644 index 00000000000..9208b5fd518 --- /dev/null +++ b/arch/blackfin/kernel/exception.c @@ -0,0 +1,45 @@ +/* Basic functions for adding/removing custom exception handlers + * + * Copyright 2004-2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later + */ + +#include <linux/module.h> +#include <asm/irq_handler.h> + +int bfin_request_exception(unsigned int exception, void (*handler)(void)) +{ + void (*curr_handler)(void); + + if (exception > 0x3F) + return -EINVAL; + + curr_handler = ex_table[exception]; + + if (curr_handler != ex_replaceable) + return -EBUSY; + + ex_table[exception] = handler; + + return 0; +} +EXPORT_SYMBOL(bfin_request_exception); + +int bfin_free_exception(unsigned int exception, void (*handler)(void)) +{ + void (*curr_handler)(void); + + if (exception > 0x3F) + return -EINVAL; + + curr_handler = ex_table[exception]; + + if (curr_handler != handler) + return -EBUSY; + + ex_table[exception] = ex_replaceable; + + return 0; +} +EXPORT_SYMBOL(bfin_free_exception); diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S index 76dd4fbcd17..d66446b572c 100644 --- a/arch/blackfin/kernel/ftrace-entry.S +++ b/arch/blackfin/kernel/ftrace-entry.S @@ -1,7 +1,7 @@ /* * mcount and friends -- ftrace stuff * - * Copyright (C) 2009 Analog Devices Inc. + * Copyright (C) 2009-2010 Analog Devices Inc. * Licensed under the GPL-2 or later. */ @@ -21,6 +21,15 @@ * function will be waiting there. mmmm pie. */ ENTRY(__mcount) +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + /* optional micro optimization: return if stopped */ + p1.l = _function_trace_stop; + p1.h = _function_trace_stop; + r3 = [p1]; + cc = r3 == 0; + if ! cc jump _ftrace_stub (bp); +#endif + /* save third function arg early so we can do testing below */ [--sp] = r2; @@ -106,9 +115,12 @@ ENTRY(_ftrace_graph_caller) [--sp] = r1; [--sp] = rets; - /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */ - r0 = sp; - r1 = rets; + /* prepare_ftrace_return(parent, self_addr, frame_pointer) */ + r0 = sp; /* unsigned long *parent */ + r1 = rets; /* unsigned long self_addr */ +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST + r2 = fp; /* unsigned long frame_pointer */ +#endif r0 += 16; /* skip the 4 local regs on stack */ r1 += -MCOUNT_INSN_SIZE; call _prepare_ftrace_return; @@ -127,6 +139,9 @@ ENTRY(_return_to_handler) [--sp] = r1; /* get original return address */ +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST + r0 = fp; /* Blackfin is sane, so omit this */ +#endif call _ftrace_return_to_handler; rets = r0; diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c index f2c85ac6f2d..a61d948ea92 100644 --- a/arch/blackfin/kernel/ftrace.c +++ b/arch/blackfin/kernel/ftrace.c @@ -16,7 +16,8 @@ * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) { struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long)&return_to_handler; @@ -24,7 +25,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; - if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, 0) == -EBUSY) + if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, + frame_pointer) == -EBUSY) return; trace.func = self_addr; diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c index 118c5b9deda..d3970e8acd1 100644 --- a/arch/blackfin/kernel/init_task.c +++ b/arch/blackfin/kernel/init_task.c @@ -28,5 +28,5 @@ EXPORT_SYMBOL(init_task); * "init_task" linker map entry. */ union thread_union init_thread_union - __attribute__ ((__section__(".init_task.data"))) = { + __init_task_data = { INIT_THREAD_INFO(init_task)}; diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index a77307a4473..1a496cd71ba 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c @@ -27,7 +27,6 @@ #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/bitops.h> -#include <linux/slab.h> #include <linux/errno.h> #include <linux/kthread.h> #include <linux/unistd.h> diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index 34c7c3ed2c9..08bc44ea688 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c @@ -66,7 +66,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) gdb_regs[BFIN_RETN] = regs->retn; gdb_regs[BFIN_RETE] = regs->rete; gdb_regs[BFIN_PC] = regs->pc; - gdb_regs[BFIN_CC] = 0; + gdb_regs[BFIN_CC] = (regs->astat >> 5) & 1; gdb_regs[BFIN_EXTRA1] = 0; gdb_regs[BFIN_EXTRA2] = 0; gdb_regs[BFIN_EXTRA3] = 0; @@ -145,7 +145,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) #endif } -struct hw_breakpoint { +static struct hw_breakpoint { unsigned int occupied:1; unsigned int skip:1; unsigned int enabled:1; @@ -155,7 +155,7 @@ struct hw_breakpoint { unsigned int addr; } breakinfo[HW_WATCHPOINT_NUM]; -int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) +static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) { int breakno; int bfin_type; @@ -202,7 +202,7 @@ int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) return -ENOSPC; } -int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) +static int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) { int breakno; int bfin_type; @@ -230,7 +230,7 @@ int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) return 0; } -void bfin_remove_all_hw_break(void) +static void bfin_remove_all_hw_break(void) { int breakno; @@ -242,7 +242,7 @@ void bfin_remove_all_hw_break(void) breakinfo[breakno].type = TYPE_DATA_WATCHPOINT; } -void bfin_correct_hw_break(void) +static void bfin_correct_hw_break(void) { int breakno; unsigned int wpiactl = 0; @@ -439,6 +439,11 @@ int kgdb_validate_break_address(unsigned long addr) return -EFAULT; } +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->retx = ip; +} + int kgdb_arch_init(void) { kgdb_single_step = 0; diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c new file mode 100644 index 00000000000..0b5f72f17fd --- /dev/null +++ b/arch/blackfin/kernel/nmi.c @@ -0,0 +1,299 @@ +/* + * Blackfin nmi_watchdog Driver + * + * Originally based on bfin_wdt.c + * Copyright 2010-2010 Analog Devices Inc. + * Graff Yang <graf.yang@analog.com> + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/bitops.h> +#include <linux/hardirq.h> +#include <linux/sysdev.h> +#include <linux/pm.h> +#include <linux/nmi.h> +#include <linux/smp.h> +#include <linux/timer.h> +#include <asm/blackfin.h> +#include <asm/atomic.h> +#include <asm/cacheflush.h> +#include <asm/bfin_watchdog.h> + +#define DRV_NAME "nmi-wdt" + +#define NMI_WDT_TIMEOUT 5 /* 5 seconds */ +#define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */ +static int nmi_wdt_cpu = 1; + +static unsigned int timeout = NMI_WDT_TIMEOUT; +static int nmi_active; + +static unsigned short wdoga_ctl; +static unsigned int wdoga_cnt; +static struct corelock_slot saved_corelock; +static atomic_t nmi_touched[NR_CPUS]; +static struct timer_list ntimer; + +enum { + COREA_ENTER_NMI = 0, + COREA_EXIT_NMI, + COREB_EXIT_NMI, + + NMI_EVENT_NR, +}; +static unsigned long nmi_event __attribute__ ((__section__(".l2.bss"))); + +/* we are in nmi, non-atomic bit ops is safe */ +static inline void set_nmi_event(int event) +{ + __set_bit(event, &nmi_event); +} + +static inline void wait_nmi_event(int event) +{ + while (!test_bit(event, &nmi_event)) + barrier(); + __clear_bit(event, &nmi_event); +} + +static inline void send_corea_nmi(void) +{ + wdoga_ctl = bfin_read_WDOGA_CTL(); + wdoga_cnt = bfin_read_WDOGA_CNT(); + + bfin_write_WDOGA_CTL(WDEN_DISABLE); + bfin_write_WDOGA_CNT(0); + bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI); +} + +static inline void restore_corea_nmi(void) +{ + bfin_write_WDOGA_CTL(WDEN_DISABLE); + bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE); + + bfin_write_WDOGA_CNT(wdoga_cnt); + bfin_write_WDOGA_CTL(wdoga_ctl); +} + +static inline void save_corelock(void) +{ + saved_corelock = corelock; + corelock.lock = 0; +} + +static inline void restore_corelock(void) +{ + corelock = saved_corelock; +} + + +static inline void nmi_wdt_keepalive(void) +{ + bfin_write_WDOGB_STAT(0); +} + +static inline void nmi_wdt_stop(void) +{ + bfin_write_WDOGB_CTL(WDEN_DISABLE); +} + +/* before calling this function, you must stop the WDT */ +static inline void nmi_wdt_clear(void) +{ + /* clear TRO bit, disable event generation */ + bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE); +} + +static inline void nmi_wdt_start(void) +{ + bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI); +} + +static inline int nmi_wdt_running(void) +{ + return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE); +} + +static inline int nmi_wdt_set_timeout(unsigned long t) +{ + u32 cnt, max_t, sclk; + int run; + + sclk = get_sclk(); + max_t = -1 / sclk; + cnt = t * sclk; + if (t > max_t) { + pr_warning("NMI: timeout value is too large\n"); + return -EINVAL; + } + + run = nmi_wdt_running(); + nmi_wdt_stop(); + bfin_write_WDOGB_CNT(cnt); + if (run) + nmi_wdt_start(); + + timeout = t; + + return 0; +} + +int check_nmi_wdt_touched(void) +{ + unsigned int this_cpu = smp_processor_id(); + unsigned int cpu; + + cpumask_t mask = cpu_online_map; + + if (!atomic_read(&nmi_touched[this_cpu])) + return 0; + + atomic_set(&nmi_touched[this_cpu], 0); + + cpu_clear(this_cpu, mask); + for_each_cpu_mask(cpu, mask) { + invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]), + (unsigned long)(&nmi_touched[cpu])); + if (!atomic_read(&nmi_touched[cpu])) + return 0; + atomic_set(&nmi_touched[cpu], 0); + } + + return 1; +} + +static void nmi_wdt_timer(unsigned long data) +{ + if (check_nmi_wdt_touched()) + nmi_wdt_keepalive(); + + mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT); +} + +static int __init init_nmi_wdt(void) +{ + nmi_wdt_set_timeout(timeout); + nmi_wdt_start(); + nmi_active = true; + + init_timer(&ntimer); + ntimer.function = nmi_wdt_timer; + ntimer.expires = jiffies + NMI_CHECK_TIMEOUT; + add_timer(&ntimer); + + pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout); + return 0; +} +device_initcall(init_nmi_wdt); + +void touch_nmi_watchdog(void) +{ + atomic_set(&nmi_touched[smp_processor_id()], 1); +} + +/* Suspend/resume support */ +#ifdef CONFIG_PM +static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state) +{ + nmi_wdt_stop(); + return 0; +} + +static int nmi_wdt_resume(struct sys_device *dev) +{ + if (nmi_active) + nmi_wdt_start(); + return 0; +} + +static struct sysdev_class nmi_sysclass = { + .name = DRV_NAME, + .resume = nmi_wdt_resume, + .suspend = nmi_wdt_suspend, +}; + +static struct sys_device device_nmi_wdt = { + .id = 0, + .cls = &nmi_sysclass, +}; + +static int __init init_nmi_wdt_sysfs(void) +{ + int error; + + if (!nmi_active) + return 0; + + error = sysdev_class_register(&nmi_sysclass); + if (!error) + error = sysdev_register(&device_nmi_wdt); + return error; +} +late_initcall(init_nmi_wdt_sysfs); + +#endif /* CONFIG_PM */ + + +asmlinkage notrace void do_nmi(struct pt_regs *fp) +{ + unsigned int cpu = smp_processor_id(); + nmi_enter(); + + cpu_pda[cpu].__nmi_count += 1; + + if (cpu == nmi_wdt_cpu) { + /* CoreB goes here first */ + + /* reload the WDOG_STAT */ + nmi_wdt_keepalive(); + + /* clear nmi interrupt for CoreB */ + nmi_wdt_stop(); + nmi_wdt_clear(); + + /* trigger NMI interrupt of CoreA */ + send_corea_nmi(); + + /* waiting CoreB to enter NMI */ + wait_nmi_event(COREA_ENTER_NMI); + + /* recover WDOGA's settings */ + restore_corea_nmi(); + + save_corelock(); + + /* corelock is save/cleared, CoreA is dummping messages */ + + wait_nmi_event(COREA_EXIT_NMI); + } else { + /* OK, CoreA entered NMI */ + set_nmi_event(COREA_ENTER_NMI); + } + + pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu); + dump_bfin_process(fp); + dump_bfin_mem(fp); + show_regs(fp); + dump_bfin_trace_buffer(); + show_stack(current, (unsigned long *)fp); + + if (cpu == nmi_wdt_cpu) { + pr_emerg("This fault is not recoverable, sorry!\n"); + + /* CoreA dump finished, restore the corelock */ + restore_corelock(); + + set_nmi_event(COREB_EXIT_NMI); + } else { + /* CoreB dump finished, notice the CoreA we are done */ + set_nmi_event(COREA_EXIT_NMI); + + /* synchronize with CoreA */ + wait_nmi_event(COREB_EXIT_NMI); + } + + nmi_exit(); +} diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index b56b0e485e0..93ec07da2e5 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -11,6 +11,7 @@ #include <linux/unistd.h> #include <linux/user.h> #include <linux/uaccess.h> +#include <linux/slab.h> #include <linux/sched.h> #include <linux/tick.h> #include <linux/fs.h> @@ -98,13 +99,6 @@ void cpu_idle(void) } } -/* Fill in the fpu structure for a core dump. */ - -int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs) -{ - return 1; -} - /* * This gets run with P1 containing the * function to call, and R1 containing diff --git a/arch/blackfin/kernel/pseudodbg.c b/arch/blackfin/kernel/pseudodbg.c new file mode 100644 index 00000000000..db85bc94334 --- /dev/null +++ b/arch/blackfin/kernel/pseudodbg.c @@ -0,0 +1,191 @@ +/* The fake debug assert instructions + * + * Copyright 2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later + */ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/ptrace.h> + +const char * const greg_names[] = { + "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", + "P0", "P1", "P2", "P3", "P4", "P5", "SP", "FP", + "I0", "I1", "I2", "I3", "M0", "M1", "M2", "M3", + "B0", "B1", "B2", "B3", "L0", "L1", "L2", "L3", + "A0.X", "A0.W", "A1.X", "A1.W", "<res>", "<res>", "ASTAT", "RETS", + "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", + "LC0", "LT0", "LB0", "LC1", "LT1", "LB1", "CYCLES", "CYCLES2", + "USP", "SEQSTAT", "SYSCFG", "RETI", "RETX", "RETN", "RETE", "EMUDAT", +}; + +static const char *get_allreg_name(int grp, int reg) +{ + return greg_names[(grp << 3) | reg]; +} + +/* + * Unfortunately, the pt_regs structure is not laid out the same way as the + * hardware register file, so we need to do some fix ups. + * + * CYCLES is not stored in the pt_regs structure - so, we just read it from + * the hardware. + * + * Don't support: + * - All reserved registers + * - All in group 7 are (supervisors only) + */ + +static bool fix_up_reg(struct pt_regs *fp, long *value, int grp, int reg) +{ + long *val = &fp->r0; + unsigned long tmp; + + /* Only do Dregs and Pregs for now */ + if (grp == 5 || + (grp == 4 && (reg == 4 || reg == 5)) || + (grp == 7)) + return false; + + if (grp == 0 || (grp == 1 && reg < 6)) + val -= (reg + 8 * grp); + else if (grp == 1 && reg == 6) + val = &fp->usp; + else if (grp == 1 && reg == 7) + val = &fp->fp; + else if (grp == 2) { + val = &fp->i0; + val -= reg; + } else if (grp == 3 && reg >= 4) { + val = &fp->l0; + val -= (reg - 4); + } else if (grp == 3 && reg < 4) { + val = &fp->b0; + val -= reg; + } else if (grp == 4 && reg < 4) { + val = &fp->a0x; + val -= reg; + } else if (grp == 4 && reg == 6) + val = &fp->astat; + else if (grp == 4 && reg == 7) + val = &fp->rets; + else if (grp == 6 && reg < 6) { + val = &fp->lc0; + val -= reg; + } else if (grp == 6 && reg == 6) { + __asm__ __volatile__("%0 = cycles;\n" : "=d"(tmp)); + val = &tmp; + } else if (grp == 6 && reg == 7) { + __asm__ __volatile__("%0 = cycles2;\n" : "=d"(tmp)); + val = &tmp; + } + + *value = *val; + return true; + +} + +#define PseudoDbg_Assert_opcode 0xf0000000 +#define PseudoDbg_Assert_expected_bits 0 +#define PseudoDbg_Assert_expected_mask 0xffff +#define PseudoDbg_Assert_regtest_bits 16 +#define PseudoDbg_Assert_regtest_mask 0x7 +#define PseudoDbg_Assert_grp_bits 19 +#define PseudoDbg_Assert_grp_mask 0x7 +#define PseudoDbg_Assert_dbgop_bits 22 +#define PseudoDbg_Assert_dbgop_mask 0x3 +#define PseudoDbg_Assert_dontcare_bits 24 +#define PseudoDbg_Assert_dontcare_mask 0x7 +#define PseudoDbg_Assert_code_bits 27 +#define PseudoDbg_Assert_code_mask 0x1f + +/* + * DBGA - debug assert + */ +bool execute_pseudodbg_assert(struct pt_regs *fp, unsigned int opcode) +{ + int expected = ((opcode >> PseudoDbg_Assert_expected_bits) & PseudoDbg_Assert_expected_mask); + int dbgop = ((opcode >> (PseudoDbg_Assert_dbgop_bits)) & PseudoDbg_Assert_dbgop_mask); + int grp = ((opcode >> (PseudoDbg_Assert_grp_bits)) & PseudoDbg_Assert_grp_mask); + int regtest = ((opcode >> (PseudoDbg_Assert_regtest_bits)) & PseudoDbg_Assert_regtest_mask); + long value; + + if ((opcode & 0xFF000000) != PseudoDbg_Assert_opcode) + return false; + + if (!fix_up_reg(fp, &value, grp, regtest)) + return false; + + if (dbgop == 0 || dbgop == 2) { + /* DBGA ( regs_lo , uimm16 ) */ + /* DBGAL ( regs , uimm16 ) */ + if (expected != (value & 0xFFFF)) { + pr_notice("DBGA (%s.L,0x%x) failure, got 0x%x\n", + get_allreg_name(grp, regtest), + expected, (unsigned int)(value & 0xFFFF)); + return false; + } + + } else if (dbgop == 1 || dbgop == 3) { + /* DBGA ( regs_hi , uimm16 ) */ + /* DBGAH ( regs , uimm16 ) */ + if (expected != ((value >> 16) & 0xFFFF)) { + pr_notice("DBGA (%s.H,0x%x) failure, got 0x%x\n", + get_allreg_name(grp, regtest), + expected, (unsigned int)((value >> 16) & 0xFFFF)); + return false; + } + } + + fp->pc += 4; + return true; +} + +#define PseudoDbg_opcode 0xf8000000 +#define PseudoDbg_reg_bits 0 +#define PseudoDbg_reg_mask 0x7 +#define PseudoDbg_grp_bits 3 +#define PseudoDbg_grp_mask 0x7 +#define PseudoDbg_fn_bits 6 +#define PseudoDbg_fn_mask 0x3 +#define PseudoDbg_code_bits 8 +#define PseudoDbg_code_mask 0xff + +/* + * DBG - debug (dump a register value out) + */ +bool execute_pseudodbg(struct pt_regs *fp, unsigned int opcode) +{ + int grp, fn, reg; + long value, value1; + + if ((opcode & 0xFF000000) != PseudoDbg_opcode) + return false; + + opcode >>= 16; + grp = ((opcode >> PseudoDbg_grp_bits) & PseudoDbg_reg_mask); + fn = ((opcode >> PseudoDbg_fn_bits) & PseudoDbg_fn_mask); + reg = ((opcode >> PseudoDbg_reg_bits) & PseudoDbg_reg_mask); + + if (fn == 3 && (reg == 0 || reg == 1)) { + if (!fix_up_reg(fp, &value, 4, 2 * reg)) + return false; + if (!fix_up_reg(fp, &value1, 4, 2 * reg + 1)) + return false; + + pr_notice("DBG A%i = %02lx%08lx\n", reg, value & 0xFF, value1); + fp->pc += 2; + return true; + + } else if (fn == 0) { + if (!fix_up_reg(fp, &value, grp, reg)) + return false; + + pr_notice("DBG %s = %08lx\n", get_allreg_name(grp, reg), value); + fp->pc += 2; + return true; + } + + return false; +} diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 65567dc4b9f..6ec77685df5 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -1,6 +1,6 @@ /* * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds - * these modifications are Copyright 2004-2009 Analog Devices Inc. + * these modifications are Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 */ @@ -9,10 +9,13 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> +#include <linux/elf.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> +#include <linux/regset.h> #include <linux/signal.h> +#include <linux/tracehook.h> #include <linux/uaccess.h> #include <asm/page.h> @@ -25,90 +28,57 @@ #include <asm/cacheflush.h> #include <asm/mem_map.h> -#define TEXT_OFFSET 0 /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ -/* determines which bits in the SYSCFG reg the user has access to. */ -/* 1 = access 0 = no access */ -#define SYSCFG_MASK 0x0007 /* SYSCFG reg */ -/* sets the trace bits. */ -#define TRACE_BITS 0x0001 - -/* Find the stack offset for a register, relative to thread.esp0. */ -#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg) - -/* - * Get the address of the live pt_regs for the specified task. - * These are saved onto the top kernel stack when the process - * is not running. - * - * Note: if a user thread is execve'd from kernel space, the - * kernel stack will not be empty on entry to the kernel, so - * ptracing these tasks will fail. - */ -static inline struct pt_regs *get_user_regs(struct task_struct *task) -{ - return (struct pt_regs *) - ((unsigned long)task_stack_page(task) + - (THREAD_SIZE - sizeof(struct pt_regs))); -} - -/* - * Get all user integer registers. - */ -static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs) -{ - struct pt_regs regs; - memcpy(®s, get_user_regs(tsk), sizeof(regs)); - regs.usp = tsk->thread.usp; - return copy_to_user(uregs, ®s, sizeof(struct pt_regs)) ? -EFAULT : 0; -} - -/* Mapping from PT_xxx to the stack offset at which the register is - * saved. Notice that usp has no stack-slot and needs to be treated - * specially (see get_reg/put_reg below). - */ - /* * Get contents of register REGNO in task TASK. */ -static inline long get_reg(struct task_struct *task, int regno) +static inline long +get_reg(struct task_struct *task, long regno, unsigned long __user *datap) { - unsigned char *reg_ptr; + long tmp; + struct pt_regs *regs = task_pt_regs(task); - struct pt_regs *regs = - (struct pt_regs *)((unsigned long)task_stack_page(task) + - (THREAD_SIZE - sizeof(struct pt_regs))); - reg_ptr = (char *)regs; + if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0) + return -EIO; switch (regno) { + case PT_TEXT_ADDR: + tmp = task->mm->start_code; + break; + case PT_TEXT_END_ADDR: + tmp = task->mm->end_code; + break; + case PT_DATA_ADDR: + tmp = task->mm->start_data; + break; case PT_USP: - return task->thread.usp; + tmp = task->thread.usp; + break; default: - if (regno <= 216) - return *(long *)(reg_ptr + regno); + if (regno < sizeof(*regs)) { + void *reg_ptr = regs; + tmp = *(long *)(reg_ptr + regno); + } else + return -EIO; } - /* slight mystery ... never seems to come here but kernel misbehaves without this code! */ - printk(KERN_WARNING "Request to get for unknown register %d\n", regno); - return 0; + return put_user(tmp, datap); } /* * Write contents of register REGNO in task TASK. */ static inline int -put_reg(struct task_struct *task, int regno, unsigned long data) +put_reg(struct task_struct *task, long regno, unsigned long data) { - char *reg_ptr; + struct pt_regs *regs = task_pt_regs(task); - struct pt_regs *regs = - (struct pt_regs *)((unsigned long)task_stack_page(task) + - (THREAD_SIZE - sizeof(struct pt_regs))); - reg_ptr = (char *)regs; + if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0) + return -EIO; switch (regno) { case PT_PC: @@ -125,10 +95,18 @@ put_reg(struct task_struct *task, int regno, unsigned long data) regs->usp = data; task->thread.usp = data; break; + case PT_SYSCFG: /* don't let userspace screw with this */ + if ((data & ~1) != 0x6) + pr_warning("ptrace: ignore syscfg write of %#lx\n", data); + break; /* regs->syscfg = data; break; */ default: - if (regno <= 216) - *(long *)(reg_ptr + regno) = data; + if (regno < sizeof(*regs)) { + void *reg_offset = regs; + *(long *)(reg_offset + regno) = data; + } + /* Ignore writes to pseudo registers */ } + return 0; } @@ -160,24 +138,98 @@ static inline int is_user_addr_valid(struct task_struct *child, return -EIO; } -void ptrace_enable(struct task_struct *child) +/* + * retrieve the contents of Blackfin userspace general registers + */ +static int genregs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + int ret; + + /* This sucks ... */ + regs->usp = target->thread.usp; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs, 0, sizeof(*regs)); + if (ret < 0) + return ret; + + return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + sizeof(*regs), -1); +} + +/* + * update the contents of the Blackfin userspace general registers + */ +static int genregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) { - unsigned long tmp; - tmp = get_reg(child, PT_SYSCFG) | (TRACE_BITS); - put_reg(child, PT_SYSCFG, tmp); + struct pt_regs *regs = task_pt_regs(target); + int ret; + + /* Don't let people set SYSCFG (it's at the end of pt_regs) */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs, 0, PT_SYSCFG); + if (ret < 0) + return ret; + + /* This sucks ... */ + target->thread.usp = regs->usp; + /* regs->retx = regs->pc; */ + + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + PT_SYSCFG, -1); } /* - * Called by kernel/ptrace.c when detaching.. - * - * Make sure the single step bit is not set. + * Define the register sets available on the Blackfin under Linux */ -void ptrace_disable(struct task_struct *child) +enum bfin_regset { + REGSET_GENERAL, +}; + +static const struct user_regset bfin_regsets[] = { + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = sizeof(struct pt_regs) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long), + .get = genregs_get, + .set = genregs_set, + }, +}; + +static const struct user_regset_view user_bfin_native_view = { + .name = "Blackfin", + .e_machine = EM_BLACKFIN, + .regsets = bfin_regsets, + .n = ARRAY_SIZE(bfin_regsets), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_bfin_native_view; +} + +void user_enable_single_step(struct task_struct *child) +{ + struct pt_regs *regs = task_pt_regs(child); + regs->syscfg |= SYSCFG_SSSTEP; + + set_tsk_thread_flag(child, TIF_SINGLESTEP); +} + +void user_disable_single_step(struct task_struct *child) { - unsigned long tmp; - /* make sure the single step bit is not set. */ - tmp = get_reg(child, PT_SYSCFG) & ~TRACE_BITS; - put_reg(child, PT_SYSCFG, tmp); + struct pt_regs *regs = task_pt_regs(child); + regs->syscfg &= ~SYSCFG_SSSTEP; + + clear_tsk_thread_flag(child, TIF_SINGLESTEP); } long arch_ptrace(struct task_struct *child, long request, long addr, long data) @@ -240,62 +292,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; } - /* read the word at location addr in the USER area. */ - case PTRACE_PEEKUSR: - { - unsigned long tmp; - ret = -EIO; - tmp = 0; - if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { - printk(KERN_WARNING "ptrace error : PEEKUSR : temporarily returning " - "0 - %x sizeof(pt_regs) is %lx\n", - (int)addr, sizeof(struct pt_regs)); - break; - } - if (addr == sizeof(struct pt_regs)) { - /* PT_TEXT_ADDR */ - tmp = child->mm->start_code + TEXT_OFFSET; - } else if (addr == (sizeof(struct pt_regs) + 4)) { - /* PT_TEXT_END_ADDR */ - tmp = child->mm->end_code; - } else if (addr == (sizeof(struct pt_regs) + 8)) { - /* PT_DATA_ADDR */ - tmp = child->mm->start_data; -#ifdef CONFIG_BINFMT_ELF_FDPIC - } else if (addr == (sizeof(struct pt_regs) + 12)) { - goto case_PTRACE_GETFDPIC_EXEC; - } else if (addr == (sizeof(struct pt_regs) + 16)) { - goto case_PTRACE_GETFDPIC_INTERP; -#endif - } else { - tmp = get_reg(child, addr); - } - ret = put_user(tmp, datap); - break; - } - -#ifdef CONFIG_BINFMT_ELF_FDPIC - case PTRACE_GETFDPIC: { - unsigned long tmp = 0; - - switch (addr) { - case_PTRACE_GETFDPIC_EXEC: - case PTRACE_GETFDPIC_EXEC: - tmp = child->mm->context.exec_fdpic_loadmap; - break; - case_PTRACE_GETFDPIC_INTERP: - case PTRACE_GETFDPIC_INTERP: - tmp = child->mm->context.interp_fdpic_loadmap; - break; - default: - break; - } - - ret = put_user(tmp, datap); - break; - } -#endif - /* when I and D space are separate, this will have to be fixed. */ case PTRACE_POKEDATA: pr_debug("ptrace: PTRACE_PEEKDATA\n"); @@ -336,79 +332,44 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; } - case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ - ret = -EIO; - if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { - printk(KERN_WARNING "ptrace error : POKEUSR: temporarily returning 0\n"); - break; - } - - if (addr >= (sizeof(struct pt_regs))) { - ret = 0; - break; - } - if (addr == PT_SYSCFG) { - data &= SYSCFG_MASK; - data |= get_reg(child, PT_SYSCFG); + case PTRACE_PEEKUSR: + switch (addr) { +#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */ + case PT_FDPIC_EXEC: + request = PTRACE_GETFDPIC; + addr = PTRACE_GETFDPIC_EXEC; + goto case_default; + case PT_FDPIC_INTERP: + request = PTRACE_GETFDPIC; + addr = PTRACE_GETFDPIC_INTERP; + goto case_default; +#endif + default: + ret = get_reg(child, addr, datap); } - ret = put_reg(child, addr, data); - break; - - case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ - case PTRACE_CONT: /* restart after signal. */ - pr_debug("ptrace: syscall/cont\n"); - - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - ptrace_disable(child); - pr_debug("ptrace: before wake_up_process\n"); - wake_up_process(child); - ret = 0; - break; - - /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: - ret = 0; - if (child->exit_state == EXIT_ZOMBIE) /* already dead */ - break; - child->exit_code = SIGKILL; - ptrace_disable(child); - wake_up_process(child); + pr_debug("ptrace: PEEKUSR reg %li with %#lx = %i\n", addr, data, ret); break; - case PTRACE_SINGLESTEP: /* set the trap flag. */ - pr_debug("ptrace: single step\n"); - ret = -EIO; - if (!valid_signal(data)) - break; - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - ptrace_enable(child); - child->exit_code = data; - wake_up_process(child); - ret = 0; + case PTRACE_POKEUSR: + ret = put_reg(child, addr, data); + pr_debug("ptrace: POKEUSR reg %li with %li = %i\n", addr, data, ret); break; case PTRACE_GETREGS: - /* Get all gp regs from the child. */ - ret = ptrace_getregs(child, datap); - break; + pr_debug("ptrace: PTRACE_GETREGS\n"); + return copy_regset_to_user(child, &user_bfin_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + (void __user *)data); case PTRACE_SETREGS: - printk(KERN_WARNING "ptrace: SETREGS: **** NOT IMPLEMENTED ***\n"); - /* Set all gp regs in the child. */ - ret = 0; - break; + pr_debug("ptrace: PTRACE_SETREGS\n"); + return copy_regset_from_user(child, &user_bfin_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + (const void __user *)data); + case_default: default: ret = ptrace_request(child, request, addr, data); break; @@ -417,27 +378,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) return ret; } -asmlinkage void syscall_trace(void) +asmlinkage int syscall_trace_enter(struct pt_regs *regs) { - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return; - - if (!(current->ptrace & PT_PTRACED)) - return; - - /* the 0x80 provides a way for the tracing parent to distinguish - * between a syscall stop and SIGTRAP delivery - */ - ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) - ? 0x80 : 0)); - - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + int ret = 0; + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ret = tracehook_report_syscall_entry(regs); + + return ret; +} + +asmlinkage void syscall_trace_leave(struct pt_regs *regs) +{ + int step; + + step = test_thread_flag(TIF_SINGLESTEP); + if (step || test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, step); } diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 95448ae9c43..d37a397f43f 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2009 Analog Devices Inc. + * Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ @@ -220,6 +220,16 @@ void __init bfin_relocate_l1_mem(void) memcpy(_stext_l2, _l2_lma, l2_len); } +#ifdef CONFIG_ROMKERNEL +void __init bfin_relocate_xip_data(void) +{ + early_shadow_stamp(); + + memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info)); + memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len); +} +#endif + /* add_memory_region to memmap */ static void __init add_memory_region(unsigned long long start, unsigned long long size, int type) @@ -504,7 +514,7 @@ static __init void memory_setup(void) #endif unsigned long max_mem; - _rambase = (unsigned long)_stext; + _rambase = CONFIG_BOOT_LOAD; _ramstart = (unsigned long)_end; if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { @@ -597,7 +607,12 @@ static __init void memory_setup(void) } #ifdef CONFIG_MPU +#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM) + page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE - + ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32; +#else page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32; +#endif page_mask_order = get_order(3 * page_mask_nelts * sizeof(long)); #endif @@ -630,7 +645,7 @@ static __init void memory_setup(void) __bss_start, __bss_stop, _sdata, _edata, (void *)&init_thread_union, - (void *)((int)(&init_thread_union) + 0x2000), + (void *)((int)(&init_thread_union) + THREAD_SIZE), __init_begin, __init_end, (void *)_ramstart, (void *)memory_end #ifdef CONFIG_MTD_UCLINUX @@ -792,10 +807,17 @@ static inline int __init get_mem_size(void) BUG(); } +__attribute__((weak)) +void __init native_machine_early_platform_add_devices(void) +{ +} + void __init setup_arch(char **cmdline_p) { unsigned long sclk, cclk; + native_machine_early_platform_add_devices(); + enable_shadow_console(); /* Check to make sure we are running on the right processor */ @@ -903,7 +925,7 @@ void __init setup_arch(char **cmdline_p) else if (_bfin_swrst & RESET_SOFTWARE) printk(KERN_NOTICE "Reset caused by Software reset\n"); - printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n"); + printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n"); if (bfin_compiled_revid() == 0xffff) printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid()); else if (bfin_compiled_revid() == -1) @@ -1217,10 +1239,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, BFIN_DLINES); #ifdef __ARCH_SYNC_CORE_DCACHE - seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); + seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]); #endif #ifdef __ARCH_SYNC_CORE_ICACHE - seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); + seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]); #endif if (cpu_num != num_possible_cpus() - 1) @@ -1249,8 +1271,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", - ((int)memory_end - (int)_stext) >> 10, - _stext, + ((int)memory_end - (int)_rambase) >> 10, + (void *)_rambase, (void *)memory_end); seq_printf(m, "\n"); diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index e0fd63e9e38..d536f35d1f4 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2009 Analog Devices Inc. + * Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later */ @@ -17,6 +17,7 @@ #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/fixed_code.h> +#include <asm/syscall.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -50,6 +51,9 @@ rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *p unsigned long usp = 0; int err = 0; + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + #define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x) /* restore passed registers */ @@ -206,16 +210,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, regs->r1 = (unsigned long)(&frame->info); regs->r2 = (unsigned long)(&frame->uc); - /* - * Clear the trace flag when entering the signal handler, but - * notify any tracer that was single-stepping it. The tracer - * may want to single-step inside the handler too. - */ - if (regs->syscfg & TRACE_BITS) { - regs->syscfg &= ~TRACE_BITS; - ptrace_notify(SIGTRAP); - } - return 0; give_sigsegv: @@ -247,6 +241,11 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) regs->r0 = regs->orig_r0; regs->pc -= 2; break; + + case -ERESTART_RESTARTBLOCK: + regs->p0 = __NR_restart_syscall; + regs->pc -= 2; + break; } } @@ -315,6 +314,9 @@ asmlinkage void do_signal(struct pt_regs *regs) * clear the TIF_RESTORE_SIGMASK flag */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); + + tracehook_signal_handler(signr, &info, &ka, regs, + test_thread_flag(TIF_SINGLESTEP)); } return; diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c index 2e7f8e10bf8..bdc1e2f0da3 100644 --- a/arch/blackfin/kernel/sys_bfin.c +++ b/arch/blackfin/kernel/sys_bfin.c @@ -47,3 +47,26 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, } EXPORT_SYMBOL(get_fb_unmapped_area); #endif + +/* Needed for legacy userspace atomic emulation */ +static DEFINE_SPINLOCK(bfin_spinlock_lock); + +#ifdef CONFIG_SYS_BFIN_SPINLOCK_L1 +__attribute__((l1_text)) +#endif +asmlinkage int sys_bfin_spinlock(int *p) +{ + int ret, tmp = 0; + + spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */ + ret = get_user(tmp, p); + if (likely(ret == 0)) { + if (unlikely(tmp)) + ret = 1; + else + put_user(1, p); + } + spin_unlock(&bfin_spinlock_lock); + + return ret; +} diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 17c38c5b5b2..8c9a43daf80 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -21,6 +21,7 @@ #include <asm/blackfin.h> #include <asm/time.h> #include <asm/gptimers.h> +#include <asm/nmi.h> /* Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) @@ -50,7 +51,11 @@ static notrace cycle_t bfin_read_cycles(struct clocksource *cs) { +#ifdef CONFIG_CPU_FREQ return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); +#else + return get_cycles(); +#endif } static struct clocksource bfin_cs_cycles = { @@ -132,7 +137,6 @@ static int __init bfin_cs_gptimer0_init(void) # define bfin_cs_gptimer0_init() #endif - #if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) /* prefer to use cycles since it has higher rating */ notrace unsigned long long sched_clock(void) @@ -145,47 +149,8 @@ notrace unsigned long long sched_clock(void) } #endif -#ifdef CONFIG_CORE_TIMER_IRQ_L1 -__attribute__((l1_text)) -#endif -irqreturn_t timer_interrupt(int irq, void *dev_id); - -static int bfin_timer_set_next_event(unsigned long, \ - struct clock_event_device *); - -static void bfin_timer_set_mode(enum clock_event_mode, \ - struct clock_event_device *); - -static struct clock_event_device clockevent_bfin = { -#if defined(CONFIG_TICKSOURCE_GPTMR0) - .name = "bfin_gptimer0", - .rating = 300, - .irq = IRQ_TIMER0, -#else - .name = "bfin_core_timer", - .rating = 350, - .irq = IRQ_CORETMR, -#endif - .shift = 32, - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = bfin_timer_set_next_event, - .set_mode = bfin_timer_set_mode, -}; - -static struct irqaction bfin_timer_irq = { #if defined(CONFIG_TICKSOURCE_GPTMR0) - .name = "Blackfin GPTimer0", -#else - .name = "Blackfin CoreTimer", -#endif - .flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_PERCPU, - .handler = timer_interrupt, - .dev_id = &clockevent_bfin, -}; - -#if defined(CONFIG_TICKSOURCE_GPTMR0) -static int bfin_timer_set_next_event(unsigned long cycles, +static int bfin_gptmr0_set_next_event(unsigned long cycles, struct clock_event_device *evt) { disable_gptimers(TIMER0bit); @@ -196,7 +161,7 @@ static int bfin_timer_set_next_event(unsigned long cycles, return 0; } -static void bfin_timer_set_mode(enum clock_event_mode mode, +static void bfin_gptmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { @@ -224,25 +189,65 @@ static void bfin_timer_set_mode(enum clock_event_mode mode, } } -static void bfin_timer_ack(void) +static void bfin_gptmr0_ack(void) { set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); } -static void __init bfin_timer_init(void) +static void __init bfin_gptmr0_init(void) { disable_gptimers(TIMER0bit); } -static unsigned long __init bfin_clockevent_check(void) +#ifdef CONFIG_CORE_TIMER_IRQ_L1 +__attribute__((l1_text)) +#endif +irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) { - setup_irq(IRQ_TIMER0, &bfin_timer_irq); - return get_sclk(); + struct clock_event_device *evt = dev_id; + smp_mb(); + evt->event_handler(evt); + bfin_gptmr0_ack(); + return IRQ_HANDLED; } -#else /* CONFIG_TICKSOURCE_CORETMR */ +static struct irqaction gptmr0_irq = { + .name = "Blackfin GPTimer0", + .flags = IRQF_DISABLED | IRQF_TIMER | \ + IRQF_IRQPOLL | IRQF_PERCPU, + .handler = bfin_gptmr0_interrupt, +}; + +static struct clock_event_device clockevent_gptmr0 = { + .name = "bfin_gptimer0", + .rating = 300, + .irq = IRQ_TIMER0, + .shift = 32, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = bfin_gptmr0_set_next_event, + .set_mode = bfin_gptmr0_set_mode, +}; -static int bfin_timer_set_next_event(unsigned long cycles, +static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt) +{ + unsigned long clock_tick; + + clock_tick = get_sclk(); + evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); + evt->max_delta_ns = clockevent_delta2ns(-1, evt); + evt->min_delta_ns = clockevent_delta2ns(100, evt); + + evt->cpumask = cpumask_of(0); + + clockevents_register_device(evt); +} +#endif /* CONFIG_TICKSOURCE_GPTMR0 */ + +#if defined(CONFIG_TICKSOURCE_CORETMR) +/* per-cpu local core timer */ +static DEFINE_PER_CPU(struct clock_event_device, coretmr_events); + +static int bfin_coretmr_set_next_event(unsigned long cycles, struct clock_event_device *evt) { bfin_write_TCNTL(TMPWR); @@ -253,7 +258,7 @@ static int bfin_timer_set_next_event(unsigned long cycles, return 0; } -static void bfin_timer_set_mode(enum clock_event_mode mode, +static void bfin_coretmr_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { @@ -285,19 +290,13 @@ static void bfin_timer_set_mode(enum clock_event_mode mode, } } -static void bfin_timer_ack(void) -{ -} - -static void __init bfin_timer_init(void) +void bfin_coretmr_init(void) { /* power up the timer, but don't enable it just yet */ bfin_write_TCNTL(TMPWR); CSYNC(); - /* - * the TSCALE prescaler counter. - */ + /* the TSCALE prescaler counter. */ bfin_write_TSCALE(TIME_SCALE - 1); bfin_write_TPERIOD(0); bfin_write_TCOUNT(0); @@ -305,52 +304,64 @@ static void __init bfin_timer_init(void) CSYNC(); } -static unsigned long __init bfin_clockevent_check(void) +#ifdef CONFIG_CORE_TIMER_IRQ_L1 +__attribute__((l1_text)) +#endif +irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id) { - setup_irq(IRQ_CORETMR, &bfin_timer_irq); - return get_cclk() / TIME_SCALE; -} + int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); -void __init setup_core_timer(void) -{ - bfin_timer_init(); - bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL); -} -#endif /* CONFIG_TICKSOURCE_GPTMR0 */ - -/* - * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick - */ -irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; smp_mb(); evt->event_handler(evt); - bfin_timer_ack(); + + touch_nmi_watchdog(); + return IRQ_HANDLED; } -static int __init bfin_clockevent_init(void) -{ - unsigned long timer_clk; - - timer_clk = bfin_clockevent_check(); +static struct irqaction coretmr_irq = { + .name = "Blackfin CoreTimer", + .flags = IRQF_DISABLED | IRQF_TIMER | \ + IRQF_IRQPOLL | IRQF_PERCPU, + .handler = bfin_coretmr_interrupt, +}; - bfin_timer_init(); +void bfin_coretmr_clockevent_init(void) +{ + unsigned long clock_tick; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); + + evt->name = "bfin_core_timer"; + evt->rating = 350; + evt->irq = -1; + evt->shift = 32; + evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + evt->set_next_event = bfin_coretmr_set_next_event; + evt->set_mode = bfin_coretmr_set_mode; + + clock_tick = get_cclk() / TIME_SCALE; + evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); + evt->max_delta_ns = clockevent_delta2ns(-1, evt); + evt->min_delta_ns = clockevent_delta2ns(100, evt); + + evt->cpumask = cpumask_of(cpu); + + clockevents_register_device(evt); +} +#endif /* CONFIG_TICKSOURCE_CORETMR */ - clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); - clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); - clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); - clockevent_bfin.cpumask = cpumask_of(0); - clockevents_register_device(&clockevent_bfin); - return 0; +void read_persistent_clock(struct timespec *ts) +{ + time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ + ts->tv_sec = secs_since_1970; + ts->tv_nsec = 0; } void __init time_init(void) { - time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ #ifdef CONFIG_RTC_DRV_BFIN /* [#2663] hack to filter junk RTC values that would cause @@ -363,12 +374,23 @@ void __init time_init(void) } #endif - /* Initialize xtime. From now on, xtime is updated with timer interrupts */ - xtime.tv_sec = secs_since_1970; - xtime.tv_nsec = 0; - set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); - bfin_cs_cycles_init(); bfin_cs_gptimer0_init(); - bfin_clockevent_init(); + +#if defined(CONFIG_TICKSOURCE_CORETMR) + bfin_coretmr_init(); + setup_irq(IRQ_CORETMR, &coretmr_irq); + bfin_coretmr_clockevent_init(); +#endif + +#if defined(CONFIG_TICKSOURCE_GPTMR0) + bfin_gptmr0_init(); + setup_irq(IRQ_TIMER0, &gptmr0_irq); + gptmr0_irq.dev_id = &clockevent_gptmr0; + bfin_gptmr0_clockevent_init(&clockevent_gptmr0); +#endif + +#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0) +# error at least one clock event device is required +#endif } diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index 13c1ee3e640..c9113619029 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c @@ -112,11 +112,6 @@ u32 arch_gettimeoffset(void) } #endif -static inline int set_rtc_mmss(unsigned long nowtime) -{ - return 0; -} - /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick @@ -126,29 +121,8 @@ __attribute__((l1_text)) #endif irqreturn_t timer_interrupt(int irq, void *dummy) { - /* last time the cmos clock got updated */ - static long last_rtc_update; - write_seqlock(&xtime_lock); do_timer(1); - - /* - * If we have an externally synchronized Linux clock, then update - * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be - * called as close as possible to 500 ms before the new second starts. - */ - if (ntp_synced() && - xtime.tv_sec > last_rtc_update + 660 && - (xtime.tv_nsec / NSEC_PER_USEC) >= - 500000 - ((unsigned)TICK_SIZE) / 2 - && (xtime.tv_nsec / NSEC_PER_USEC) <= - 500000 + ((unsigned)TICK_SIZE) / 2) { - if (set_rtc_mmss(xtime.tv_sec) == 0) - last_rtc_update = xtime.tv_sec; - else - /* Do it again in 60s. */ - last_rtc_update = xtime.tv_sec - 600; - } write_sequnlock(&xtime_lock); #ifdef CONFIG_IPIPE @@ -161,10 +135,15 @@ irqreturn_t timer_interrupt(int irq, void *dummy) return IRQ_HANDLED; } -void __init time_init(void) +void read_persistent_clock(struct timespec *ts) { time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ + ts->tv_sec = secs_since_1970; + ts->tv_nsec = 0; +} +void __init time_init(void) +{ #ifdef CONFIG_RTC_DRV_BFIN /* [#2663] hack to filter junk RTC values that would cause * userspace to have to deal with time values greater than @@ -176,11 +155,5 @@ void __init time_init(void) } #endif - /* Initialize xtime. From now on, xtime is updated with timer interrupts */ - xtime.tv_sec = secs_since_1970; - xtime.tv_nsec = 0; - - wall_to_monotonic.tv_sec = -xtime.tv_sec; - time_sched_init(timer_interrupt); } diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c new file mode 100644 index 00000000000..59fcdf6b013 --- /dev/null +++ b/arch/blackfin/kernel/trace.c @@ -0,0 +1,981 @@ +/* provide some functions which dump the trace buffer, in a nice way for people + * to read it, and understand what is going on + * + * Copyright 2004-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later + */ + +#include <linux/kernel.h> +#include <linux/hardirq.h> +#include <linux/thread_info.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <asm/dma.h> +#include <asm/trace.h> +#include <asm/fixed_code.h> +#include <asm/traps.h> +#include <asm/irq_handler.h> + +void decode_address(char *buf, unsigned long address) +{ + struct task_struct *p; + struct mm_struct *mm; + unsigned long flags, offset; + unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); + struct rb_node *n; + +#ifdef CONFIG_KALLSYMS + unsigned long symsize; + const char *symname; + char *modname; + char *delim = ":"; + char namebuf[128]; +#endif + + buf += sprintf(buf, "<0x%08lx> ", address); + +#ifdef CONFIG_KALLSYMS + /* look up the address and see if we are in kernel space */ + symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); + + if (symname) { + /* yeah! kernel space! */ + if (!modname) + modname = delim = ""; + sprintf(buf, "{ %s%s%s%s + 0x%lx }", + delim, modname, delim, symname, + (unsigned long)offset); + return; + } +#endif + + if (address >= FIXED_CODE_START && address < FIXED_CODE_END) { + /* Problem in fixed code section? */ + strcat(buf, "/* Maybe fixed code section */"); + return; + + } else if (address < CONFIG_BOOT_LOAD) { + /* Problem somewhere before the kernel start address */ + strcat(buf, "/* Maybe null pointer? */"); + return; + + } else if (address >= COREMMR_BASE) { + strcat(buf, "/* core mmrs */"); + return; + + } else if (address >= SYSMMR_BASE) { + strcat(buf, "/* system mmrs */"); + return; + + } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) { + strcat(buf, "/* on-chip L1 ROM */"); + return; + + } else if (address >= L1_SCRATCH_START && address < L1_SCRATCH_START + L1_SCRATCH_LENGTH) { + strcat(buf, "/* on-chip scratchpad */"); + return; + + } else if (address >= physical_mem_end && address < ASYNC_BANK0_BASE) { + strcat(buf, "/* unconnected memory */"); + return; + + } else if (address >= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE && address < BOOT_ROM_START) { + strcat(buf, "/* reserved memory */"); + return; + + } else if (address >= L1_DATA_A_START && address < L1_DATA_A_START + L1_DATA_A_LENGTH) { + strcat(buf, "/* on-chip Data Bank A */"); + return; + + } else if (address >= L1_DATA_B_START && address < L1_DATA_B_START + L1_DATA_B_LENGTH) { + strcat(buf, "/* on-chip Data Bank B */"); + return; + } + + /* + * Don't walk any of the vmas if we are oopsing, it has been known + * to cause problems - corrupt vmas (kernel crashes) cause double faults + */ + if (oops_in_progress) { + strcat(buf, "/* kernel dynamic memory (maybe user-space) */"); + return; + } + + /* looks like we're off in user-land, so let's walk all the + * mappings of all our processes and see if we can't be a whee + * bit more specific + */ + write_lock_irqsave(&tasklist_lock, flags); + for_each_process(p) { + mm = (in_atomic ? p->mm : get_task_mm(p)); + if (!mm) + continue; + + if (!down_read_trylock(&mm->mmap_sem)) { + if (!in_atomic) + mmput(mm); + continue; + } + + for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { + struct vm_area_struct *vma; + + vma = rb_entry(n, struct vm_area_struct, vm_rb); + + if (address >= vma->vm_start && address < vma->vm_end) { + char _tmpbuf[256]; + char *name = p->comm; + struct file *file = vma->vm_file; + + if (file) { + char *d_name = d_path(&file->f_path, _tmpbuf, + sizeof(_tmpbuf)); + if (!IS_ERR(d_name)) + name = d_name; + } + + /* FLAT does not have its text aligned to the start of + * the map while FDPIC ELF does ... + */ + + /* before we can check flat/fdpic, we need to + * make sure current is valid + */ + if ((unsigned long)current >= FIXED_CODE_START && + !((unsigned long)current & 0x3)) { + if (current->mm && + (address > current->mm->start_code) && + (address < current->mm->end_code)) + offset = address - current->mm->start_code; + else + offset = (address - vma->vm_start) + + (vma->vm_pgoff << PAGE_SHIFT); + + sprintf(buf, "[ %s + 0x%lx ]", name, offset); + } else + sprintf(buf, "[ %s vma:0x%lx-0x%lx]", + name, vma->vm_start, vma->vm_end); + + up_read(&mm->mmap_sem); + if (!in_atomic) + mmput(mm); + + if (buf[0] == '\0') + sprintf(buf, "[ %s ] dynamic memory", name); + + goto done; + } + } + + up_read(&mm->mmap_sem); + if (!in_atomic) + mmput(mm); + } + + /* + * we were unable to find this address anywhere, + * or some MMs were skipped because they were in use. + */ + sprintf(buf, "/* kernel dynamic memory */"); + +done: + write_unlock_irqrestore(&tasklist_lock, flags); +} + +#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) + +/* + * Similar to get_user, do some address checking, then dereference + * Return true on success, false on bad address + */ +bool get_mem16(unsigned short *val, unsigned short *address) +{ + unsigned long addr = (unsigned long)address; + + /* Check for odd addresses */ + if (addr & 0x1) + return false; + + switch (bfin_mem_access_type(addr, 2)) { + case BFIN_MEM_ACCESS_CORE: + case BFIN_MEM_ACCESS_CORE_ONLY: + *val = *address; + return true; + case BFIN_MEM_ACCESS_DMA: + dma_memcpy(val, address, 2); + return true; + case BFIN_MEM_ACCESS_ITEST: + isram_memcpy(val, address, 2); + return true; + default: /* invalid access */ + return false; + } +} + +bool get_instruction(unsigned int *val, unsigned short *address) +{ + unsigned long addr = (unsigned long)address; + unsigned short opcode0, opcode1; + + /* Check for odd addresses */ + if (addr & 0x1) + return false; + + /* MMR region will never have instructions */ + if (addr >= SYSMMR_BASE) + return false; + + /* Scratchpad will never have instructions */ + if (addr >= L1_SCRATCH_START && addr < L1_SCRATCH_START + L1_SCRATCH_LENGTH) + return false; + + /* Data banks will never have instructions */ + if (addr >= BOOT_ROM_START + BOOT_ROM_LENGTH && addr < L1_CODE_START) + return false; + + if (!get_mem16(&opcode0, address)) + return false; + + /* was this a 32-bit instruction? If so, get the next 16 bits */ + if ((opcode0 & 0xc000) == 0xc000) { + if (!get_mem16(&opcode1, address + 1)) + return false; + *val = (opcode0 << 16) + opcode1; + } else + *val = opcode0; + + return true; +} + +#if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON) +/* + * decode the instruction if we are printing out the trace, as it + * makes things easier to follow, without running it through objdump + * Decode the change of flow, and the common load/store instructions + * which are the main cause for faults, and discontinuities in the trace + * buffer. + */ + +#define ProgCtrl_opcode 0x0000 +#define ProgCtrl_poprnd_bits 0 +#define ProgCtrl_poprnd_mask 0xf +#define ProgCtrl_prgfunc_bits 4 +#define ProgCtrl_prgfunc_mask 0xf +#define ProgCtrl_code_bits 8 +#define ProgCtrl_code_mask 0xff + +static void decode_ProgCtrl_0(unsigned int opcode) +{ + int poprnd = ((opcode >> ProgCtrl_poprnd_bits) & ProgCtrl_poprnd_mask); + int prgfunc = ((opcode >> ProgCtrl_prgfunc_bits) & ProgCtrl_prgfunc_mask); + + if (prgfunc == 0 && poprnd == 0) + pr_cont("NOP"); + else if (prgfunc == 1 && poprnd == 0) + pr_cont("RTS"); + else if (prgfunc == 1 && poprnd == 1) + pr_cont("RTI"); + else if (prgfunc == 1 && poprnd == 2) + pr_cont("RTX"); + else if (prgfunc == 1 && poprnd == 3) + pr_cont("RTN"); + else if (prgfunc == 1 && poprnd == 4) + pr_cont("RTE"); + else if (prgfunc == 2 && poprnd == 0) + pr_cont("IDLE"); + else if (prgfunc == 2 && poprnd == 3) + pr_cont("CSYNC"); + else if (prgfunc == 2 && poprnd == 4) + pr_cont("SSYNC"); + else if (prgfunc == 2 && poprnd == 5) + pr_cont("EMUEXCPT"); + else if (prgfunc == 3) + pr_cont("CLI R%i", poprnd); + else if (prgfunc == 4) + pr_cont("STI R%i", poprnd); + else if (prgfunc == 5) + pr_cont("JUMP (P%i)", poprnd); + else if (prgfunc == 6) + pr_cont("CALL (P%i)", poprnd); + else if (prgfunc == 7) + pr_cont("CALL (PC + P%i)", poprnd); + else if (prgfunc == 8) + pr_cont("JUMP (PC + P%i", poprnd); + else if (prgfunc == 9) + pr_cont("RAISE %i", poprnd); + else if (prgfunc == 10) + pr_cont("EXCPT %i", poprnd); + else + pr_cont("0x%04x", opcode); + +} + +#define BRCC_opcode 0x1000 +#define BRCC_offset_bits 0 +#define BRCC_offset_mask 0x3ff +#define BRCC_B_bits 10 +#define BRCC_B_mask 0x1 +#define BRCC_T_bits 11 +#define BRCC_T_mask 0x1 +#define BRCC_code_bits 12 +#define BRCC_code_mask 0xf + +static void decode_BRCC_0(unsigned int opcode) +{ + int B = ((opcode >> BRCC_B_bits) & BRCC_B_mask); + int T = ((opcode >> BRCC_T_bits) & BRCC_T_mask); + + pr_cont("IF %sCC JUMP pcrel %s", T ? "" : "!", B ? "(BP)" : ""); +} + +#define CALLa_opcode 0xe2000000 +#define CALLa_addr_bits 0 +#define CALLa_addr_mask 0xffffff +#define CALLa_S_bits 24 +#define CALLa_S_mask 0x1 +#define CALLa_code_bits 25 +#define CALLa_code_mask 0x7f + +static void decode_CALLa_0(unsigned int opcode) +{ + int S = ((opcode >> (CALLa_S_bits - 16)) & CALLa_S_mask); + + if (S) + pr_cont("CALL pcrel"); + else + pr_cont("JUMP.L"); +} + +#define LoopSetup_opcode 0xe0800000 +#define LoopSetup_eoffset_bits 0 +#define LoopSetup_eoffset_mask 0x3ff +#define LoopSetup_dontcare_bits 10 +#define LoopSetup_dontcare_mask 0x3 +#define LoopSetup_reg_bits 12 +#define LoopSetup_reg_mask 0xf +#define LoopSetup_soffset_bits 16 +#define LoopSetup_soffset_mask 0xf +#define LoopSetup_c_bits 20 +#define LoopSetup_c_mask 0x1 +#define LoopSetup_rop_bits 21 +#define LoopSetup_rop_mask 0x3 +#define LoopSetup_code_bits 23 +#define LoopSetup_code_mask 0x1ff + +static void decode_LoopSetup_0(unsigned int opcode) +{ + int c = ((opcode >> LoopSetup_c_bits) & LoopSetup_c_mask); + int reg = ((opcode >> LoopSetup_reg_bits) & LoopSetup_reg_mask); + int rop = ((opcode >> LoopSetup_rop_bits) & LoopSetup_rop_mask); + + pr_cont("LSETUP <> LC%i", c); + if ((rop & 1) == 1) + pr_cont("= P%i", reg); + if ((rop & 2) == 2) + pr_cont(" >> 0x1"); +} + +#define DspLDST_opcode 0x9c00 +#define DspLDST_reg_bits 0 +#define DspLDST_reg_mask 0x7 +#define DspLDST_i_bits 3 +#define DspLDST_i_mask 0x3 +#define DspLDST_m_bits 5 +#define DspLDST_m_mask 0x3 +#define DspLDST_aop_bits 7 +#define DspLDST_aop_mask 0x3 +#define DspLDST_W_bits 9 +#define DspLDST_W_mask 0x1 +#define DspLDST_code_bits 10 +#define DspLDST_code_mask 0x3f + +static void decode_dspLDST_0(unsigned int opcode) +{ + int i = ((opcode >> DspLDST_i_bits) & DspLDST_i_mask); + int m = ((opcode >> DspLDST_m_bits) & DspLDST_m_mask); + int W = ((opcode >> DspLDST_W_bits) & DspLDST_W_mask); + int aop = ((opcode >> DspLDST_aop_bits) & DspLDST_aop_mask); + int reg = ((opcode >> DspLDST_reg_bits) & DspLDST_reg_mask); + + if (W == 0) { + pr_cont("R%i", reg); + switch (m) { + case 0: + pr_cont(" = "); + break; + case 1: + pr_cont(".L = "); + break; + case 2: + pr_cont(".W = "); + break; + } + } + + pr_cont("[ I%i", i); + + switch (aop) { + case 0: + pr_cont("++ ]"); + break; + case 1: + pr_cont("-- ]"); + break; + } + + if (W == 1) { + pr_cont(" = R%i", reg); + switch (m) { + case 1: + pr_cont(".L = "); + break; + case 2: + pr_cont(".W = "); + break; + } + } +} + +#define LDST_opcode 0x9000 +#define LDST_reg_bits 0 +#define LDST_reg_mask 0x7 +#define LDST_ptr_bits 3 +#define LDST_ptr_mask 0x7 +#define LDST_Z_bits 6 +#define LDST_Z_mask 0x1 +#define LDST_aop_bits 7 +#define LDST_aop_mask 0x3 +#define LDST_W_bits 9 +#define LDST_W_mask 0x1 +#define LDST_sz_bits 10 +#define LDST_sz_mask 0x3 +#define LDST_code_bits 12 +#define LDST_code_mask 0xf + +static void decode_LDST_0(unsigned int opcode) +{ + int Z = ((opcode >> LDST_Z_bits) & LDST_Z_mask); + int W = ((opcode >> LDST_W_bits) & LDST_W_mask); + int sz = ((opcode >> LDST_sz_bits) & LDST_sz_mask); + int aop = ((opcode >> LDST_aop_bits) & LDST_aop_mask); + int reg = ((opcode >> LDST_reg_bits) & LDST_reg_mask); + int ptr = ((opcode >> LDST_ptr_bits) & LDST_ptr_mask); + + if (W == 0) + pr_cont("%s%i = ", (sz == 0 && Z == 1) ? "P" : "R", reg); + + switch (sz) { + case 1: + pr_cont("W"); + break; + case 2: + pr_cont("B"); + break; + } + + pr_cont("[P%i", ptr); + + switch (aop) { + case 0: + pr_cont("++"); + break; + case 1: + pr_cont("--"); + break; + } + pr_cont("]"); + + if (W == 1) + pr_cont(" = %s%i ", (sz == 0 && Z == 1) ? "P" : "R", reg); + + if (sz) { + if (Z) + pr_cont(" (X)"); + else + pr_cont(" (Z)"); + } +} + +#define LDSTii_opcode 0xa000 +#define LDSTii_reg_bit 0 +#define LDSTii_reg_mask 0x7 +#define LDSTii_ptr_bit 3 +#define LDSTii_ptr_mask 0x7 +#define LDSTii_offset_bit 6 +#define LDSTii_offset_mask 0xf +#define LDSTii_op_bit 10 +#define LDSTii_op_mask 0x3 +#define LDSTii_W_bit 12 +#define LDSTii_W_mask 0x1 +#define LDSTii_code_bit 13 +#define LDSTii_code_mask 0x7 + +static void decode_LDSTii_0(unsigned int opcode) +{ + int reg = ((opcode >> LDSTii_reg_bit) & LDSTii_reg_mask); + int ptr = ((opcode >> LDSTii_ptr_bit) & LDSTii_ptr_mask); + int offset = ((opcode >> LDSTii_offset_bit) & LDSTii_offset_mask); + int op = ((opcode >> LDSTii_op_bit) & LDSTii_op_mask); + int W = ((opcode >> LDSTii_W_bit) & LDSTii_W_mask); + + if (W == 0) { + pr_cont("%s%i = %s[P%i + %i]", op == 3 ? "R" : "P", reg, + op == 1 || op == 2 ? "" : "W", ptr, offset); + if (op == 2) + pr_cont("(Z)"); + if (op == 3) + pr_cont("(X)"); + } else { + pr_cont("%s[P%i + %i] = %s%i", op == 0 ? "" : "W", ptr, + offset, op == 3 ? "P" : "R", reg); + } +} + +#define LDSTidxI_opcode 0xe4000000 +#define LDSTidxI_offset_bits 0 +#define LDSTidxI_offset_mask 0xffff +#define LDSTidxI_reg_bits 16 +#define LDSTidxI_reg_mask 0x7 +#define LDSTidxI_ptr_bits 19 +#define LDSTidxI_ptr_mask 0x7 +#define LDSTidxI_sz_bits 22 +#define LDSTidxI_sz_mask 0x3 +#define LDSTidxI_Z_bits 24 +#define LDSTidxI_Z_mask 0x1 +#define LDSTidxI_W_bits 25 +#define LDSTidxI_W_mask 0x1 +#define LDSTidxI_code_bits 26 +#define LDSTidxI_code_mask 0x3f + +static void decode_LDSTidxI_0(unsigned int opcode) +{ + int Z = ((opcode >> LDSTidxI_Z_bits) & LDSTidxI_Z_mask); + int W = ((opcode >> LDSTidxI_W_bits) & LDSTidxI_W_mask); + int sz = ((opcode >> LDSTidxI_sz_bits) & LDSTidxI_sz_mask); + int reg = ((opcode >> LDSTidxI_reg_bits) & LDSTidxI_reg_mask); + int ptr = ((opcode >> LDSTidxI_ptr_bits) & LDSTidxI_ptr_mask); + int offset = ((opcode >> LDSTidxI_offset_bits) & LDSTidxI_offset_mask); + + if (W == 0) + pr_cont("%s%i = ", sz == 0 && Z == 1 ? "P" : "R", reg); + + if (sz == 1) + pr_cont("W"); + if (sz == 2) + pr_cont("B"); + + pr_cont("[P%i + %s0x%x]", ptr, offset & 0x20 ? "-" : "", + (offset & 0x1f) << 2); + + if (W == 0 && sz != 0) { + if (Z) + pr_cont("(X)"); + else + pr_cont("(Z)"); + } + + if (W == 1) + pr_cont("= %s%i", (sz == 0 && Z == 1) ? "P" : "R", reg); + +} + +static void decode_opcode(unsigned int opcode) +{ +#ifdef CONFIG_BUG + if (opcode == BFIN_BUG_OPCODE) + pr_cont("BUG"); + else +#endif + if ((opcode & 0xffffff00) == ProgCtrl_opcode) + decode_ProgCtrl_0(opcode); + else if ((opcode & 0xfffff000) == BRCC_opcode) + decode_BRCC_0(opcode); + else if ((opcode & 0xfffff000) == 0x2000) + pr_cont("JUMP.S"); + else if ((opcode & 0xfe000000) == CALLa_opcode) + decode_CALLa_0(opcode); + else if ((opcode & 0xff8000C0) == LoopSetup_opcode) + decode_LoopSetup_0(opcode); + else if ((opcode & 0xfffffc00) == DspLDST_opcode) + decode_dspLDST_0(opcode); + else if ((opcode & 0xfffff000) == LDST_opcode) + decode_LDST_0(opcode); + else if ((opcode & 0xffffe000) == LDSTii_opcode) + decode_LDSTii_0(opcode); + else if ((opcode & 0xfc000000) == LDSTidxI_opcode) + decode_LDSTidxI_0(opcode); + else if (opcode & 0xffff0000) + pr_cont("0x%08x", opcode); + else + pr_cont("0x%04x", opcode); +} + +#define BIT_MULTI_INS 0x08000000 +static void decode_instruction(unsigned short *address) +{ + unsigned int opcode; + + if (!get_instruction(&opcode, address)) + return; + + decode_opcode(opcode); + + /* If things are a 32-bit instruction, it has the possibility of being + * a multi-issue instruction (a 32-bit, and 2 16 bit instrucitions) + * This test collidates with the unlink instruction, so disallow that + */ + if ((opcode & 0xc0000000) == 0xc0000000 && + (opcode & BIT_MULTI_INS) && + (opcode & 0xe8000000) != 0xe8000000) { + pr_cont(" || "); + if (!get_instruction(&opcode, address + 2)) + return; + decode_opcode(opcode); + pr_cont(" || "); + if (!get_instruction(&opcode, address + 3)) + return; + decode_opcode(opcode); + } +} +#endif + +void dump_bfin_trace_buffer(void) +{ +#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON + int tflags, i = 0, fault = 0; + char buf[150]; + unsigned short *addr; + unsigned int cpu = raw_smp_processor_id(); +#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND + int j, index; +#endif + + trace_buffer_save(tflags); + + pr_notice("Hardware Trace:\n"); + +#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND + pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n"); +#endif + + if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) { + for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) { + addr = (unsigned short *)bfin_read_TBUF(); + decode_address(buf, (unsigned long)addr); + pr_notice("%4i Target : %s\n", i, buf); + /* Normally, the faulting instruction doesn't go into + * the trace buffer, (since it doesn't commit), so + * we print out the fault address here + */ + if (!fault && addr == ((unsigned short *)evt_ivhw)) { + addr = (unsigned short *)bfin_read_TBUF(); + decode_address(buf, (unsigned long)addr); + pr_notice(" FAULT : %s ", buf); + decode_instruction(addr); + pr_cont("\n"); + fault = 1; + continue; + } + if (!fault && addr == (unsigned short *)trap && + (cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE) > VEC_EXCPT15) { + decode_address(buf, cpu_pda[cpu].icplb_fault_addr); + pr_notice(" FAULT : %s ", buf); + decode_instruction((unsigned short *)cpu_pda[cpu].icplb_fault_addr); + pr_cont("\n"); + fault = 1; + } + addr = (unsigned short *)bfin_read_TBUF(); + decode_address(buf, (unsigned long)addr); + pr_notice(" Source : %s ", buf); + decode_instruction(addr); + pr_cont("\n"); + } + } + +#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND + if (trace_buff_offset) + index = trace_buff_offset / 4; + else + index = EXPAND_LEN; + + j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128; + while (j) { + decode_address(buf, software_trace_buff[index]); + pr_notice("%4i Target : %s\n", i, buf); + index -= 1; + if (index < 0) + index = EXPAND_LEN; + decode_address(buf, software_trace_buff[index]); + pr_notice(" Source : %s ", buf); + decode_instruction((unsigned short *)software_trace_buff[index]); + pr_cont("\n"); + index -= 1; + if (index < 0) + index = EXPAND_LEN; + j--; + i++; + } +#endif + + trace_buffer_restore(tflags); +#endif +} +EXPORT_SYMBOL(dump_bfin_trace_buffer); + +void dump_bfin_process(struct pt_regs *fp) +{ + /* We should be able to look at fp->ipend, but we don't push it on the + * stack all the time, so do this until we fix that */ + unsigned int context = bfin_read_IPEND(); + + if (oops_in_progress) + pr_emerg("Kernel OOPS in progress\n"); + + if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) + pr_notice("HW Error context\n"); + else if (context & 0x0020) + pr_notice("Deferred Exception context\n"); + else if (context & 0x3FC0) + pr_notice("Interrupt context\n"); + else if (context & 0x4000) + pr_notice("Deferred Interrupt context\n"); + else if (context & 0x8000) + pr_notice("Kernel process context\n"); + + /* Because we are crashing, and pointers could be bad, we check things + * pretty closely before we use them + */ + if ((unsigned long)current >= FIXED_CODE_START && + !((unsigned long)current & 0x3) && current->pid) { + pr_notice("CURRENT PROCESS:\n"); + if (current->comm >= (char *)FIXED_CODE_START) + pr_notice("COMM=%s PID=%d", + current->comm, current->pid); + else + pr_notice("COMM= invalid"); + + pr_cont(" CPU=%d\n", current_thread_info()->cpu); + if (!((unsigned long)current->mm & 0x3) && + (unsigned long)current->mm >= FIXED_CODE_START) { + pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n", + (void *)current->mm->start_code, + (void *)current->mm->end_code, + (void *)current->mm->start_data, + (void *)current->mm->end_data); + pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n", + (void *)current->mm->end_data, + (void *)current->mm->brk, + (void *)current->mm->start_stack); + } else + pr_notice("invalid mm\n"); + } else + pr_notice("No Valid process in current context\n"); +} + +void dump_bfin_mem(struct pt_regs *fp) +{ + unsigned short *addr, *erraddr, val = 0, err = 0; + char sti = 0, buf[6]; + + erraddr = (void *)fp->pc; + + pr_notice("return address: [0x%p]; contents of:", erraddr); + + for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10; + addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10; + addr++) { + if (!((unsigned long)addr & 0xF)) + pr_notice("0x%p: ", addr); + + if (!get_mem16(&val, addr)) { + val = 0; + sprintf(buf, "????"); + } else + sprintf(buf, "%04x", val); + + if (addr == erraddr) { + pr_cont("[%s]", buf); + err = val; + } else + pr_cont(" %s ", buf); + + /* Do any previous instructions turn on interrupts? */ + if (addr <= erraddr && /* in the past */ + ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */ + val == 0x017b)) /* [SP++] = RETI */ + sti = 1; + } + + pr_cont("\n"); + + /* Hardware error interrupts can be deferred */ + if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR && + oops_in_progress)){ + pr_notice("Looks like this was a deferred error - sorry\n"); +#ifndef CONFIG_DEBUG_HWERR + pr_notice("The remaining message may be meaningless\n"); + pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n"); +#else + /* If we are handling only one peripheral interrupt + * and current mm and pid are valid, and the last error + * was in that user space process's text area + * print it out - because that is where the problem exists + */ + if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) && + (current->pid && current->mm)) { + /* And the last RETI points to the current userspace context */ + if ((fp + 1)->pc >= current->mm->start_code && + (fp + 1)->pc <= current->mm->end_code) { + pr_notice("It might be better to look around here :\n"); + pr_notice("-------------------------------------------\n"); + show_regs(fp + 1); + pr_notice("-------------------------------------------\n"); + } + } +#endif + } +} + +void show_regs(struct pt_regs *fp) +{ + char buf[150]; + struct irqaction *action; + unsigned int i; + unsigned long flags = 0; + unsigned int cpu = raw_smp_processor_id(); + unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); + + pr_notice("\n"); + if (CPUID != bfin_cpuid()) + pr_notice("Compiled for cpu family 0x%04x (Rev %d), " + "but running on:0x%04x (Rev %d)\n", + CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid()); + + pr_notice("ADSP-%s-0.%d", + CPU, bfin_compiled_revid()); + + if (bfin_compiled_revid() != bfin_revid()) + pr_cont("(Detected 0.%d)", bfin_revid()); + + pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n", + get_cclk()/1000000, get_sclk()/1000000, +#ifdef CONFIG_MPU + "mpu on" +#else + "mpu off" +#endif + ); + + pr_notice("%s", linux_banner); + + pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted()); + pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n", + (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg); + if (fp->ipend & EVT_IRPTEN) + pr_notice(" Global Interrupts Disabled (IPEND[4])\n"); + if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 | + EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR))) + pr_notice(" Peripheral interrupts masked off\n"); + if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14))) + pr_notice(" Kernel interrupts masked off\n"); + if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) { + pr_notice(" HWERRCAUSE: 0x%lx\n", + (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14); +#ifdef EBIU_ERRMST + /* If the error was from the EBIU, print it out */ + if (bfin_read_EBIU_ERRMST() & CORE_ERROR) { + pr_notice(" EBIU Error Reason : 0x%04x\n", + bfin_read_EBIU_ERRMST()); + pr_notice(" EBIU Error Address : 0x%08x\n", + bfin_read_EBIU_ERRADD()); + } +#endif + } + pr_notice(" EXCAUSE : 0x%lx\n", + fp->seqstat & SEQSTAT_EXCAUSE); + for (i = 2; i <= 15 ; i++) { + if (fp->ipend & (1 << i)) { + if (i != 4) { + decode_address(buf, bfin_read32(EVT0 + 4*i)); + pr_notice(" physical IVG%i asserted : %s\n", i, buf); + } else + pr_notice(" interrupts disabled\n"); + } + } + + /* if no interrupts are going off, don't print this out */ + if (fp->ipend & ~0x3F) { + for (i = 0; i < (NR_IRQS - 1); i++) { + if (!in_atomic) + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); + + action = irq_desc[i].action; + if (!action) + goto unlock; + + decode_address(buf, (unsigned int)action->handler); + pr_notice(" logical irq %3d mapped : %s", i, buf); + for (action = action->next; action; action = action->next) { + decode_address(buf, (unsigned int)action->handler); + pr_cont(", %s", buf); + } + pr_cont("\n"); +unlock: + if (!in_atomic) + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); + } + } + + decode_address(buf, fp->rete); + pr_notice(" RETE: %s\n", buf); + decode_address(buf, fp->retn); + pr_notice(" RETN: %s\n", buf); + decode_address(buf, fp->retx); + pr_notice(" RETX: %s\n", buf); + decode_address(buf, fp->rets); + pr_notice(" RETS: %s\n", buf); + decode_address(buf, fp->pc); + pr_notice(" PC : %s\n", buf); + + if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && + (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { + decode_address(buf, cpu_pda[cpu].dcplb_fault_addr); + pr_notice("DCPLB_FAULT_ADDR: %s\n", buf); + decode_address(buf, cpu_pda[cpu].icplb_fault_addr); + pr_notice("ICPLB_FAULT_ADDR: %s\n", buf); + } + + pr_notice("PROCESSOR STATE:\n"); + pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", + fp->r0, fp->r1, fp->r2, fp->r3); + pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", + fp->r4, fp->r5, fp->r6, fp->r7); + pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n", + fp->p0, fp->p1, fp->p2, fp->p3); + pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n", + fp->p4, fp->p5, fp->fp, (long)fp); + pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n", + fp->lb0, fp->lt0, fp->lc0); + pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n", + fp->lb1, fp->lt1, fp->lc1); + pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n", + fp->b0, fp->l0, fp->m0, fp->i0); + pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n", + fp->b1, fp->l1, fp->m1, fp->i1); + pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n", + fp->b2, fp->l2, fp->m2, fp->i2); + pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n", + fp->b3, fp->l3, fp->m3, fp->i3); + pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n", + fp->a0w, fp->a0x, fp->a1w, fp->a1x); + + pr_notice("USP : %08lx ASTAT: %08lx\n", + rdusp(), fp->astat); + + pr_notice("\n"); +} diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index d3cbcd6bd98..59c1df75e4d 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -1,25 +1,22 @@ /* - * Copyright 2004-2009 Analog Devices Inc. + * Main exception handling logic. + * + * Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later */ #include <linux/bug.h> #include <linux/uaccess.h> -#include <linux/interrupt.h> #include <linux/module.h> -#include <linux/kallsyms.h> -#include <linux/fs.h> -#include <linux/rbtree.h> #include <asm/traps.h> -#include <asm/cacheflush.h> #include <asm/cplb.h> -#include <asm/dma.h> #include <asm/blackfin.h> #include <asm/irq_handler.h> #include <linux/irq.h> #include <asm/trace.h> #include <asm/fixed_code.h> +#include <asm/pseudo_instructions.h> #ifdef CONFIG_KGDB # include <linux/kgdb.h> @@ -62,182 +59,6 @@ void __init trap_init(void) CSYNC(); } -static void decode_address(char *buf, unsigned long address) -{ -#ifdef CONFIG_DEBUG_VERBOSE - struct task_struct *p; - struct mm_struct *mm; - unsigned long flags, offset; - unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); - struct rb_node *n; - -#ifdef CONFIG_KALLSYMS - unsigned long symsize; - const char *symname; - char *modname; - char *delim = ":"; - char namebuf[128]; -#endif - - buf += sprintf(buf, "<0x%08lx> ", address); - -#ifdef CONFIG_KALLSYMS - /* look up the address and see if we are in kernel space */ - symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); - - if (symname) { - /* yeah! kernel space! */ - if (!modname) - modname = delim = ""; - sprintf(buf, "{ %s%s%s%s + 0x%lx }", - delim, modname, delim, symname, - (unsigned long)offset); - return; - } -#endif - - if (address >= FIXED_CODE_START && address < FIXED_CODE_END) { - /* Problem in fixed code section? */ - strcat(buf, "/* Maybe fixed code section */"); - return; - - } else if (address < CONFIG_BOOT_LOAD) { - /* Problem somewhere before the kernel start address */ - strcat(buf, "/* Maybe null pointer? */"); - return; - - } else if (address >= COREMMR_BASE) { - strcat(buf, "/* core mmrs */"); - return; - - } else if (address >= SYSMMR_BASE) { - strcat(buf, "/* system mmrs */"); - return; - - } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) { - strcat(buf, "/* on-chip L1 ROM */"); - return; - } - - /* - * Don't walk any of the vmas if we are oopsing, it has been known - * to cause problems - corrupt vmas (kernel crashes) cause double faults - */ - if (oops_in_progress) { - strcat(buf, "/* kernel dynamic memory (maybe user-space) */"); - return; - } - - /* looks like we're off in user-land, so let's walk all the - * mappings of all our processes and see if we can't be a whee - * bit more specific - */ - write_lock_irqsave(&tasklist_lock, flags); - for_each_process(p) { - mm = (in_atomic ? p->mm : get_task_mm(p)); - if (!mm) - continue; - - for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { - struct vm_area_struct *vma; - - vma = rb_entry(n, struct vm_area_struct, vm_rb); - - if (address >= vma->vm_start && address < vma->vm_end) { - char _tmpbuf[256]; - char *name = p->comm; - struct file *file = vma->vm_file; - - if (file) { - char *d_name = d_path(&file->f_path, _tmpbuf, - sizeof(_tmpbuf)); - if (!IS_ERR(d_name)) - name = d_name; - } - - /* FLAT does not have its text aligned to the start of - * the map while FDPIC ELF does ... - */ - - /* before we can check flat/fdpic, we need to - * make sure current is valid - */ - if ((unsigned long)current >= FIXED_CODE_START && - !((unsigned long)current & 0x3)) { - if (current->mm && - (address > current->mm->start_code) && - (address < current->mm->end_code)) - offset = address - current->mm->start_code; - else - offset = (address - vma->vm_start) + - (vma->vm_pgoff << PAGE_SHIFT); - - sprintf(buf, "[ %s + 0x%lx ]", name, offset); - } else - sprintf(buf, "[ %s vma:0x%lx-0x%lx]", - name, vma->vm_start, vma->vm_end); - - if (!in_atomic) - mmput(mm); - - if (buf[0] == '\0') - sprintf(buf, "[ %s ] dynamic memory", name); - - goto done; - } - } - if (!in_atomic) - mmput(mm); - } - - /* we were unable to find this address anywhere */ - sprintf(buf, "/* kernel dynamic memory */"); - -done: - write_unlock_irqrestore(&tasklist_lock, flags); -#else - sprintf(buf, " "); -#endif -} - -asmlinkage void double_fault_c(struct pt_regs *fp) -{ -#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON - int j; - trace_buffer_save(j); -#endif - - console_verbose(); - oops_in_progress = 1; -#ifdef CONFIG_DEBUG_VERBOSE - printk(KERN_EMERG "Double Fault\n"); -#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT - if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { - unsigned int cpu = raw_smp_processor_id(); - char buf[150]; - decode_address(buf, cpu_pda[cpu].retx_doublefault); - printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", - (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf); - decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr); - printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); - decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr); - printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); - - decode_address(buf, fp->retx); - printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf); - } else -#endif - { - dump_bfin_process(fp); - dump_bfin_mem(fp); - show_regs(fp); - dump_bfin_trace_buffer(); - } -#endif - panic("Double Fault - unrecoverable event"); - -} - static int kernel_mode_regs(struct pt_regs *regs) { return regs->ipend & 0xffc0; @@ -248,9 +69,10 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON int j; #endif -#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO - unsigned int cpu = raw_smp_processor_id(); +#ifdef CONFIG_BFIN_PSEUDODBG_INSNS + int opcode; #endif + unsigned int cpu = raw_smp_processor_id(); const char *strerror = NULL; int sig = 0; siginfo_t info; @@ -382,6 +204,19 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) } } #endif +#ifdef CONFIG_BFIN_PSEUDODBG_INSNS + /* + * Support for the fake instructions, if the instruction fails, + * then just execute a illegal opcode failure (like normal). + * Don't support these instructions inside the kernel + */ + if (!kernel_mode_regs(fp) && get_instruction(&opcode, (unsigned short *)fp->pc)) { + if (execute_pseudodbg_assert(fp, opcode)) + goto traps_done; + if (execute_pseudodbg(fp, opcode)) + goto traps_done; + } +#endif info.si_code = ILL_ILLOPC; sig = SIGILL; strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE); @@ -639,7 +474,17 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) { info.si_signo = sig; info.si_errno = 0; - info.si_addr = (void __user *)fp->pc; + switch (trapnr) { + case VEC_CPLB_VL: + case VEC_MISALI_D: + case VEC_CPLB_M: + case VEC_CPLB_MHIT: + info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr; + break; + default: + info.si_addr = (void __user *)fp->pc; + break; + } force_sig_info(sig, &info, current); } @@ -652,659 +497,44 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) trace_buffer_restore(j); } -/* Typical exception handling routines */ - -#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) - -/* - * Similar to get_user, do some address checking, then dereference - * Return true on success, false on bad address - */ -static bool get_instruction(unsigned short *val, unsigned short *address) -{ - unsigned long addr = (unsigned long)address; - - /* Check for odd addresses */ - if (addr & 0x1) - return false; - - /* MMR region will never have instructions */ - if (addr >= SYSMMR_BASE) - return false; - - switch (bfin_mem_access_type(addr, 2)) { - case BFIN_MEM_ACCESS_CORE: - case BFIN_MEM_ACCESS_CORE_ONLY: - *val = *address; - return true; - case BFIN_MEM_ACCESS_DMA: - dma_memcpy(val, address, 2); - return true; - case BFIN_MEM_ACCESS_ITEST: - isram_memcpy(val, address, 2); - return true; - default: /* invalid access */ - return false; - } -} - -/* - * decode the instruction if we are printing out the trace, as it - * makes things easier to follow, without running it through objdump - * These are the normal instructions which cause change of flow, which - * would be at the source of the trace buffer - */ -#if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON) -static void decode_instruction(unsigned short *address) -{ - unsigned short opcode; - - if (get_instruction(&opcode, address)) { - if (opcode == 0x0010) - verbose_printk("RTS"); - else if (opcode == 0x0011) - verbose_printk("RTI"); - else if (opcode == 0x0012) - verbose_printk("RTX"); - else if (opcode == 0x0013) - verbose_printk("RTN"); - else if (opcode == 0x0014) - verbose_printk("RTE"); - else if (opcode == 0x0025) - verbose_printk("EMUEXCPT"); - else if (opcode == 0x0040 && opcode <= 0x0047) - verbose_printk("STI R%i", opcode & 7); - else if (opcode >= 0x0050 && opcode <= 0x0057) - verbose_printk("JUMP (P%i)", opcode & 7); - else if (opcode >= 0x0060 && opcode <= 0x0067) - verbose_printk("CALL (P%i)", opcode & 7); - else if (opcode >= 0x0070 && opcode <= 0x0077) - verbose_printk("CALL (PC+P%i)", opcode & 7); - else if (opcode >= 0x0080 && opcode <= 0x0087) - verbose_printk("JUMP (PC+P%i)", opcode & 7); - else if (opcode >= 0x0090 && opcode <= 0x009F) - verbose_printk("RAISE 0x%x", opcode & 0xF); - else if (opcode >= 0x00A0 && opcode <= 0x00AF) - verbose_printk("EXCPT 0x%x", opcode & 0xF); - else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF)) - verbose_printk("IF !CC JUMP"); - else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff)) - verbose_printk("IF CC JUMP"); - else if (opcode >= 0x2000 && opcode <= 0x2fff) - verbose_printk("JUMP.S"); - else if (opcode >= 0xe080 && opcode <= 0xe0ff) - verbose_printk("LSETUP"); - else if (opcode >= 0xe200 && opcode <= 0xe2ff) - verbose_printk("JUMP.L"); - else if (opcode >= 0xe300 && opcode <= 0xe3ff) - verbose_printk("CALL pcrel"); - else - verbose_printk("0x%04x", opcode); - } - -} -#endif - -void dump_bfin_trace_buffer(void) -{ -#ifdef CONFIG_DEBUG_VERBOSE -#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON - int tflags, i = 0; - char buf[150]; - unsigned short *addr; -#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND - int j, index; -#endif - - trace_buffer_save(tflags); - - printk(KERN_NOTICE "Hardware Trace:\n"); - -#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND - printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n"); -#endif - - if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) { - for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) { - decode_address(buf, (unsigned long)bfin_read_TBUF()); - printk(KERN_NOTICE "%4i Target : %s\n", i, buf); - addr = (unsigned short *)bfin_read_TBUF(); - decode_address(buf, (unsigned long)addr); - printk(KERN_NOTICE " Source : %s ", buf); - decode_instruction(addr); - printk("\n"); - } - } - -#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND - if (trace_buff_offset) - index = trace_buff_offset / 4; - else - index = EXPAND_LEN; - - j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128; - while (j) { - decode_address(buf, software_trace_buff[index]); - printk(KERN_NOTICE "%4i Target : %s\n", i, buf); - index -= 1; - if (index < 0 ) - index = EXPAND_LEN; - decode_address(buf, software_trace_buff[index]); - printk(KERN_NOTICE " Source : %s ", buf); - decode_instruction((unsigned short *)software_trace_buff[index]); - printk("\n"); - index -= 1; - if (index < 0) - index = EXPAND_LEN; - j--; - i++; - } -#endif - - trace_buffer_restore(tflags); -#endif -#endif -} -EXPORT_SYMBOL(dump_bfin_trace_buffer); - -#ifdef CONFIG_BUG -int is_valid_bugaddr(unsigned long addr) -{ - unsigned short opcode; - - if (!get_instruction(&opcode, (unsigned short *)addr)) - return 0; - - return opcode == BFIN_BUG_OPCODE; -} -#endif - -/* - * Checks to see if the address pointed to is either a - * 16-bit CALL instruction, or a 32-bit CALL instruction - */ -static bool is_bfin_call(unsigned short *addr) -{ - unsigned short opcode = 0, *ins_addr; - ins_addr = (unsigned short *)addr; - - if (!get_instruction(&opcode, ins_addr)) - return false; - - if ((opcode >= 0x0060 && opcode <= 0x0067) || - (opcode >= 0x0070 && opcode <= 0x0077)) - return true; - - ins_addr--; - if (!get_instruction(&opcode, ins_addr)) - return false; - - if (opcode >= 0xE300 && opcode <= 0xE3FF) - return true; - - return false; - -} - -void show_stack(struct task_struct *task, unsigned long *stack) -{ -#ifdef CONFIG_PRINTK - unsigned int *addr, *endstack, *fp = 0, *frame; - unsigned short *ins_addr; - char buf[150]; - unsigned int i, j, ret_addr, frame_no = 0; - - /* - * If we have been passed a specific stack, use that one otherwise - * if we have been passed a task structure, use that, otherwise - * use the stack of where the variable "stack" exists - */ - - if (stack == NULL) { - if (task) { - /* We know this is a kernel stack, so this is the start/end */ - stack = (unsigned long *)task->thread.ksp; - endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE); - } else { - /* print out the existing stack info */ - stack = (unsigned long *)&stack; - endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); - } - } else - endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); - - printk(KERN_NOTICE "Stack info:\n"); - decode_address(buf, (unsigned int)stack); - printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); - - if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) { - printk(KERN_NOTICE "Invalid stack pointer\n"); - return; - } - - /* First thing is to look for a frame pointer */ - for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { - if (*addr & 0x1) - continue; - ins_addr = (unsigned short *)*addr; - ins_addr--; - if (is_bfin_call(ins_addr)) - fp = addr - 1; - - if (fp) { - /* Let's check to see if it is a frame pointer */ - while (fp >= (addr - 1) && fp < endstack - && fp && ((unsigned int) fp & 0x3) == 0) - fp = (unsigned int *)*fp; - if (fp == 0 || fp == endstack) { - fp = addr - 1; - break; - } - fp = 0; - } - } - if (fp) { - frame = fp; - printk(KERN_NOTICE " FP: (0x%p)\n", fp); - } else - frame = 0; - - /* - * Now that we think we know where things are, we - * walk the stack again, this time printing things out - * incase there is no frame pointer, we still look for - * valid return addresses - */ - - /* First time print out data, next time, print out symbols */ - for (j = 0; j <= 1; j++) { - if (j) - printk(KERN_NOTICE "Return addresses in stack:\n"); - else - printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack); - - fp = frame; - frame_no = 0; - - for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0; - addr < endstack; addr++, i++) { - - ret_addr = 0; - if (!j && i % 8 == 0) - printk(KERN_NOTICE "%p:",addr); - - /* if it is an odd address, or zero, just skip it */ - if (*addr & 0x1 || !*addr) - goto print; - - ins_addr = (unsigned short *)*addr; - - /* Go back one instruction, and see if it is a CALL */ - ins_addr--; - ret_addr = is_bfin_call(ins_addr); - print: - if (!j && stack == (unsigned long *)addr) - printk("[%08x]", *addr); - else if (ret_addr) - if (j) { - decode_address(buf, (unsigned int)*addr); - if (frame == addr) { - printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf); - continue; - } - printk(KERN_NOTICE " address : %s\n", buf); - } else - printk("<%08x>", *addr); - else if (fp == addr) { - if (j) - frame = addr+1; - else - printk("(%08x)", *addr); - - fp = (unsigned int *)*addr; - frame_no++; - - } else if (!j) - printk(" %08x ", *addr); - } - if (!j) - printk("\n"); - } -#endif -} -EXPORT_SYMBOL(show_stack); - -void dump_stack(void) +asmlinkage void double_fault_c(struct pt_regs *fp) { - unsigned long stack; #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON - int tflags; + int j; + trace_buffer_save(j); #endif - trace_buffer_save(tflags); - dump_bfin_trace_buffer(); - show_stack(current, &stack); - trace_buffer_restore(tflags); -} -EXPORT_SYMBOL(dump_stack); -void dump_bfin_process(struct pt_regs *fp) -{ + console_verbose(); + oops_in_progress = 1; #ifdef CONFIG_DEBUG_VERBOSE - /* We should be able to look at fp->ipend, but we don't push it on the - * stack all the time, so do this until we fix that */ - unsigned int context = bfin_read_IPEND(); - - if (oops_in_progress) - verbose_printk(KERN_EMERG "Kernel OOPS in progress\n"); - - if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) - verbose_printk(KERN_NOTICE "HW Error context\n"); - else if (context & 0x0020) - verbose_printk(KERN_NOTICE "Deferred Exception context\n"); - else if (context & 0x3FC0) - verbose_printk(KERN_NOTICE "Interrupt context\n"); - else if (context & 0x4000) - verbose_printk(KERN_NOTICE "Deferred Interrupt context\n"); - else if (context & 0x8000) - verbose_printk(KERN_NOTICE "Kernel process context\n"); - - /* Because we are crashing, and pointers could be bad, we check things - * pretty closely before we use them - */ - if ((unsigned long)current >= FIXED_CODE_START && - !((unsigned long)current & 0x3) && current->pid) { - verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n"); - if (current->comm >= (char *)FIXED_CODE_START) - verbose_printk(KERN_NOTICE "COMM=%s PID=%d", - current->comm, current->pid); - else - verbose_printk(KERN_NOTICE "COMM= invalid"); + printk(KERN_EMERG "Double Fault\n"); +#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT + if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { + unsigned int cpu = raw_smp_processor_id(); + char buf[150]; + decode_address(buf, cpu_pda[cpu].retx_doublefault); + printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", + (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf); + decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr); + printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); + decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr); + printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); - printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu); - if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START) - verbose_printk(KERN_NOTICE - "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" - " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n", - (void *)current->mm->start_code, - (void *)current->mm->end_code, - (void *)current->mm->start_data, - (void *)current->mm->end_data, - (void *)current->mm->end_data, - (void *)current->mm->brk, - (void *)current->mm->start_stack); - else - verbose_printk(KERN_NOTICE "invalid mm\n"); + decode_address(buf, fp->retx); + printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf); } else - verbose_printk(KERN_NOTICE - "No Valid process in current context\n"); -#endif -} - -void dump_bfin_mem(struct pt_regs *fp) -{ -#ifdef CONFIG_DEBUG_VERBOSE - unsigned short *addr, *erraddr, val = 0, err = 0; - char sti = 0, buf[6]; - - erraddr = (void *)fp->pc; - - verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr); - - for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10; - addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10; - addr++) { - if (!((unsigned long)addr & 0xF)) - verbose_printk(KERN_NOTICE "0x%p: ", addr); - - if (!get_instruction(&val, addr)) { - val = 0; - sprintf(buf, "????"); - } else - sprintf(buf, "%04x", val); - - if (addr == erraddr) { - verbose_printk("[%s]", buf); - err = val; - } else - verbose_printk(" %s ", buf); - - /* Do any previous instructions turn on interrupts? */ - if (addr <= erraddr && /* in the past */ - ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */ - val == 0x017b)) /* [SP++] = RETI */ - sti = 1; - } - - verbose_printk("\n"); - - /* Hardware error interrupts can be deferred */ - if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR && - oops_in_progress)){ - verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n"); -#ifndef CONFIG_DEBUG_HWERR - verbose_printk(KERN_NOTICE -"The remaining message may be meaningless\n" -"You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n"); -#else - /* If we are handling only one peripheral interrupt - * and current mm and pid are valid, and the last error - * was in that user space process's text area - * print it out - because that is where the problem exists - */ - if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) && - (current->pid && current->mm)) { - /* And the last RETI points to the current userspace context */ - if ((fp + 1)->pc >= current->mm->start_code && - (fp + 1)->pc <= current->mm->end_code) { - verbose_printk(KERN_NOTICE "It might be better to look around here : \n"); - verbose_printk(KERN_NOTICE "-------------------------------------------\n"); - show_regs(fp + 1); - verbose_printk(KERN_NOTICE "-------------------------------------------\n"); - } - } -#endif - } -#endif -} - -void show_regs(struct pt_regs *fp) -{ -#ifdef CONFIG_DEBUG_VERBOSE - char buf [150]; - struct irqaction *action; - unsigned int i; - unsigned long flags = 0; - unsigned int cpu = raw_smp_processor_id(); - unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); - - verbose_printk(KERN_NOTICE "\n"); - if (CPUID != bfin_cpuid()) - verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), " - "but running on:0x%04x (Rev %d)\n", - CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid()); - - verbose_printk(KERN_NOTICE "ADSP-%s-0.%d", - CPU, bfin_compiled_revid()); - - if (bfin_compiled_revid() != bfin_revid()) - verbose_printk("(Detected 0.%d)", bfin_revid()); - - verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n", - get_cclk()/1000000, get_sclk()/1000000, -#ifdef CONFIG_MPU - "mpu on" -#else - "mpu off" -#endif - ); - - verbose_printk(KERN_NOTICE "%s", linux_banner); - - verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted()); - verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n", - (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg); - if (fp->ipend & EVT_IRPTEN) - verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n"); - if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 | - EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR))) - verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n"); - if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14))) - verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n"); - if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) { - verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n", - (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14); -#ifdef EBIU_ERRMST - /* If the error was from the EBIU, print it out */ - if (bfin_read_EBIU_ERRMST() & CORE_ERROR) { - verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n", - bfin_read_EBIU_ERRMST()); - verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n", - bfin_read_EBIU_ERRADD()); - } #endif + { + dump_bfin_process(fp); + dump_bfin_mem(fp); + show_regs(fp); + dump_bfin_trace_buffer(); } - verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n", - fp->seqstat & SEQSTAT_EXCAUSE); - for (i = 2; i <= 15 ; i++) { - if (fp->ipend & (1 << i)) { - if (i != 4) { - decode_address(buf, bfin_read32(EVT0 + 4*i)); - verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf); - } else - verbose_printk(KERN_NOTICE " interrupts disabled\n"); - } - } - - /* if no interrupts are going off, don't print this out */ - if (fp->ipend & ~0x3F) { - for (i = 0; i < (NR_IRQS - 1); i++) { - if (!in_atomic) - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); - - action = irq_desc[i].action; - if (!action) - goto unlock; - - decode_address(buf, (unsigned int)action->handler); - verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf); - for (action = action->next; action; action = action->next) { - decode_address(buf, (unsigned int)action->handler); - verbose_printk(", %s", buf); - } - verbose_printk("\n"); -unlock: - if (!in_atomic) - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); - } - } - - decode_address(buf, fp->rete); - verbose_printk(KERN_NOTICE " RETE: %s\n", buf); - decode_address(buf, fp->retn); - verbose_printk(KERN_NOTICE " RETN: %s\n", buf); - decode_address(buf, fp->retx); - verbose_printk(KERN_NOTICE " RETX: %s\n", buf); - decode_address(buf, fp->rets); - verbose_printk(KERN_NOTICE " RETS: %s\n", buf); - decode_address(buf, fp->pc); - verbose_printk(KERN_NOTICE " PC : %s\n", buf); - - if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && - (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { - decode_address(buf, cpu_pda[cpu].dcplb_fault_addr); - verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf); - decode_address(buf, cpu_pda[cpu].icplb_fault_addr); - verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf); - } - - verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n"); - verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", - fp->r0, fp->r1, fp->r2, fp->r3); - verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", - fp->r4, fp->r5, fp->r6, fp->r7); - verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n", - fp->p0, fp->p1, fp->p2, fp->p3); - verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n", - fp->p4, fp->p5, fp->fp, (long)fp); - verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n", - fp->lb0, fp->lt0, fp->lc0); - verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n", - fp->lb1, fp->lt1, fp->lc1); - verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n", - fp->b0, fp->l0, fp->m0, fp->i0); - verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n", - fp->b1, fp->l1, fp->m1, fp->i1); - verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n", - fp->b2, fp->l2, fp->m2, fp->i2); - verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n", - fp->b3, fp->l3, fp->m3, fp->i3); - verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n", - fp->a0w, fp->a0x, fp->a1w, fp->a1x); - - verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n", - rdusp(), fp->astat); - - verbose_printk(KERN_NOTICE "\n"); #endif -} - -#ifdef CONFIG_SYS_BFIN_SPINLOCK_L1 -asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text)); -#endif - -static DEFINE_SPINLOCK(bfin_spinlock_lock); - -asmlinkage int sys_bfin_spinlock(int *p) -{ - int ret, tmp = 0; - - spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */ - ret = get_user(tmp, p); - if (likely(ret == 0)) { - if (unlikely(tmp)) - ret = 1; - else - put_user(1, p); - } - spin_unlock(&bfin_spinlock_lock); - return ret; -} - -int bfin_request_exception(unsigned int exception, void (*handler)(void)) -{ - void (*curr_handler)(void); - - if (exception > 0x3F) - return -EINVAL; - - curr_handler = ex_table[exception]; - - if (curr_handler != ex_replaceable) - return -EBUSY; - - ex_table[exception] = handler; + panic("Double Fault - unrecoverable event"); - return 0; } -EXPORT_SYMBOL(bfin_request_exception); - -int bfin_free_exception(unsigned int exception, void (*handler)(void)) -{ - void (*curr_handler)(void); - - if (exception > 0x3F) - return -EINVAL; - - curr_handler = ex_table[exception]; - if (curr_handler != handler) - return -EBUSY; - - ex_table[exception] = ex_replaceable; - - return 0; -} -EXPORT_SYMBOL(bfin_free_exception); void panic_cplb_error(int cplb_panic, struct pt_regs *fp) { @@ -1329,3 +559,23 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp) dump_stack(); panic("Unrecoverable event"); } + +#ifdef CONFIG_BUG +int is_valid_bugaddr(unsigned long addr) +{ + unsigned int opcode; + + if (!get_instruction(&opcode, (unsigned short *)addr)) + return 0; + + return opcode == BFIN_BUG_OPCODE; +} +#endif + +/* stub this out */ +#ifndef CONFIG_DEBUG_VERBOSE +void show_regs(struct pt_regs *fp) +{ + +} +#endif diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 66799e763dc..984c7817239 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -15,7 +15,12 @@ _jiffies = _jiffies_64; SECTIONS { +#ifdef CONFIG_RAMKERNEL . = CONFIG_BOOT_LOAD; +#else + . = CONFIG_ROM_BASE; +#endif + /* Neither the text, ro_data or bss section need to be aligned * So pack them back to back */ @@ -31,6 +36,12 @@ SECTIONS LOCK_TEXT IRQENTRY_TEXT KPROBES_TEXT +#ifdef CONFIG_ROMKERNEL + __sinittext = .; + INIT_TEXT + __einittext = .; + EXIT_TEXT +#endif *(.text.*) *(.fixup) @@ -50,8 +61,14 @@ SECTIONS /* Just in case the first read only is a 32-bit access */ RO_DATA(4) + __rodata_end = .; +#ifdef CONFIG_ROMKERNEL + . = CONFIG_BOOT_LOAD; + .bss : AT(__rodata_end) +#else .bss : +#endif { . = ALIGN(4); ___bss_start = .; @@ -67,7 +84,11 @@ SECTIONS ___bss_stop = .; } +#if defined(CONFIG_ROMKERNEL) + .data : AT(LOADADDR(.bss) + SIZEOF(.bss)) +#else .data : +#endif { __sdata = .; /* This gets done first, so the glob doesn't suck it in */ @@ -94,6 +115,8 @@ SECTIONS __edata = .; } + __data_lma = LOADADDR(.data); + __data_len = SIZEOF(.data); /* The init section should be last, so when we free it, it goes into * the general memory pool, and (hopefully) will decrease fragmentation @@ -103,25 +126,58 @@ SECTIONS . = ALIGN(PAGE_SIZE); ___init_begin = .; +#ifdef CONFIG_RAMKERNEL INIT_TEXT_SECTION(PAGE_SIZE) - . = ALIGN(16); - INIT_DATA_SECTION(16) - PERCPU(4) - /* we have to discard exit text and such at runtime, not link time, to + /* We have to discard exit text and such at runtime, not link time, to * handle embedded cross-section references (alt instructions, bug - * table, eh_frame, etc...) + * table, eh_frame, etc...). We need all of our .text up front and + * .data after it for PCREL call issues. */ .exit.text : { EXIT_TEXT } + + . = ALIGN(16); + INIT_DATA_SECTION(16) + PERCPU(4) + .exit.data : { EXIT_DATA } .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) +#else + .init.data : AT(__data_lma + __data_len) + { + __sinitdata = .; + INIT_DATA + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + SECURITY_INITCALL + INIT_RAM_FS + + . = ALIGN(4); + ___per_cpu_load = .; + ___per_cpu_start = .; + *(.data.percpu.first) + *(.data.percpu.page_aligned) + *(.data.percpu) + *(.data.percpu.shared_aligned) + ___per_cpu_end = .; + + EXIT_DATA + __einitdata = .; + } + __init_data_lma = LOADADDR(.init.data); + __init_data_len = SIZEOF(.init.data); + __init_data_end = .; + + .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len) +#endif { . = ALIGN(4); __stext_l1 = .; @@ -202,7 +258,11 @@ SECTIONS /* Force trailing alignment of our init section so that when we * free our init memory, we don't leave behind a partial page. */ +#ifdef CONFIG_RAMKERNEL . = __l2_lma + __l2_len; +#else + . = __init_data_end; +#endif . = ALIGN(PAGE_SIZE); ___init_end = .; |