From 6ffac1e90a17ea0aded5c581204397421eec91b6 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 24 Jul 2008 18:07:56 -0700 Subject: x64, fpu: fix possible FPU leakage in error conditions On Thu, Jul 24, 2008 at 03:43:44PM -0700, Linus Torvalds wrote: > So how about this patch as a starting point? This is the RightThing(tm) to > do regardless, and if it then makes it easier to do some other cleanups, > we should do it first. What do you think? restore_fpu_checking() calls init_fpu() in error conditions. While this is wrong(as our main intention is to clear the fpu state of the thread), this was benign before commit 92d140e21f1 ("x86: fix taking DNA during 64bit sigreturn"). Post commit 92d140e21f1, live FPU registers may not belong to this process at this error scenario. In the error condition for restore_fpu_checking() (especially during the 64bit signal return), we are doing init_fpu(), which saves the live FPU register state (possibly belonging to some other process context) into the thread struct (through unlazy_fpu() in init_fpu()). This is wrong and can leak the FPU data. For the signal handler restore error condition in restore_i387(), clear the fpu state present in the thread struct(before ultimately sending a SIGSEGV for badframe). For the paranoid error condition check in math_state_restore(), send a SIGSEGV, if we fail to restore the state. Signed-off-by: Suresh Siddha Cc: Cc: Linus Torvalds Signed-off-by: Ingo Molnar --- include/asm-x86/i387.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 96fa8449ff1..0048fb77afc 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -62,8 +62,6 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) #else : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); #endif - if (unlikely(err)) - init_fpu(current); return err; } -- cgit v1.2.3-70-g09d2 From e4e4e534faa3c2be4e165ce414f44b76ada7208c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 14 Apr 2008 08:50:02 +0200 Subject: sched clock: revert various sched_clock() changes Found an interactivity problem on a quad core test-system - simple CPU loops would occasionally delay the system un an unacceptable way. After much debugging with Peter Zijlstra it turned out that the problem is caused by the string of sched_clock() changes - they caused the CPU clock to jump backwards a bit - which confuses the scheduler arithmetics. (which is unsigned for performance reasons) So revert: # c300ba2: sched_clock: and multiplier for TSC to gtod drift # c0c8773: sched_clock: only update deltas with local reads. # af52a90: sched_clock: stop maximum check on NO HZ # f7cce27: sched_clock: widen the max and min time This solves the interactivity problems. Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra Acked-by: Mike Galbraith --- include/linux/sched.h | 17 +------- kernel/sched_clock.c | 109 ++++++----------------------------------------- kernel/time/tick-sched.c | 2 - 3 files changed, 14 insertions(+), 114 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 5270d449ff9..ea436bc1a0e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1572,28 +1572,13 @@ static inline void sched_clock_idle_sleep_event(void) static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } - -#ifdef CONFIG_NO_HZ -static inline void sched_clock_tick_stop(int cpu) -{ -} - -static inline void sched_clock_tick_start(int cpu) -{ -} -#endif - -#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ +#else extern void sched_clock_init(void); extern u64 sched_clock_cpu(int cpu); extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); -#ifdef CONFIG_NO_HZ -extern void sched_clock_tick_stop(int cpu); -extern void sched_clock_tick_start(int cpu); #endif -#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 5a2dc7d8fd9..9a7844158ae 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -44,11 +44,6 @@ unsigned long long __attribute__((weak)) sched_clock(void) #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -#define MULTI_SHIFT 15 -/* Max is double, Min is 1/2 */ -#define MAX_MULTI (2LL << MULTI_SHIFT) -#define MIN_MULTI (1LL << (MULTI_SHIFT-1)) - struct sched_clock_data { /* * Raw spinlock - this is a special case: this might be called @@ -62,10 +57,6 @@ struct sched_clock_data { u64 tick_raw; u64 tick_gtod; u64 clock; - s64 multi; -#ifdef CONFIG_NO_HZ - int check_max; -#endif }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); @@ -97,53 +88,18 @@ void sched_clock_init(void) scd->tick_raw = 0; scd->tick_gtod = ktime_now; scd->clock = ktime_now; - scd->multi = 1 << MULTI_SHIFT; -#ifdef CONFIG_NO_HZ - scd->check_max = 1; -#endif } sched_clock_running = 1; } -#ifdef CONFIG_NO_HZ -/* - * The dynamic ticks makes the delta jiffies inaccurate. This - * prevents us from checking the maximum time update. - * Disable the maximum check during stopped ticks. - */ -void sched_clock_tick_stop(int cpu) -{ - struct sched_clock_data *scd = cpu_sdc(cpu); - - scd->check_max = 0; -} - -void sched_clock_tick_start(int cpu) -{ - struct sched_clock_data *scd = cpu_sdc(cpu); - - scd->check_max = 1; -} - -static int check_max(struct sched_clock_data *scd) -{ - return scd->check_max; -} -#else -static int check_max(struct sched_clock_data *scd) -{ - return 1; -} -#endif /* CONFIG_NO_HZ */ - /* * update the percpu scd from the raw @now value * * - filter out backward motion * - use jiffies to generate a min,max window to clip the raw values */ -static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time) +static void __update_sched_clock(struct sched_clock_data *scd, u64 now) { unsigned long now_jiffies = jiffies; long delta_jiffies = now_jiffies - scd->tick_jiffies; @@ -152,31 +108,16 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim s64 delta = now - scd->prev_raw; WARN_ON_ONCE(!irqs_disabled()); - - /* - * At schedule tick the clock can be just under the gtod. We don't - * want to push it too prematurely. - */ - min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC); - if (min_clock > TICK_NSEC) - min_clock -= TICK_NSEC / 2; + min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; if (unlikely(delta < 0)) { clock++; goto out; } - /* - * The clock must stay within a jiffie of the gtod. - * But since we may be at the start of a jiffy or the end of one - * we add another jiffy buffer. - */ - max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC; - - delta *= scd->multi; - delta >>= MULTI_SHIFT; + max_clock = min_clock + TICK_NSEC; - if (unlikely(clock + delta > max_clock) && check_max(scd)) { + if (unlikely(clock + delta > max_clock)) { if (clock < max_clock) clock = max_clock; else @@ -189,12 +130,9 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim if (unlikely(clock < min_clock)) clock = min_clock; - if (time) - *time = clock; - else { - scd->prev_raw = now; - scd->clock = clock; - } + scd->prev_raw = now; + scd->tick_jiffies = now_jiffies; + scd->clock = clock; } static void lock_double_clock(struct sched_clock_data *data1, @@ -238,26 +176,21 @@ u64 sched_clock_cpu(int cpu) now -= scd->tick_gtod; __raw_spin_unlock(&my_scd->lock); - - __update_sched_clock(scd, now, &clock); - - __raw_spin_unlock(&scd->lock); - } else { __raw_spin_lock(&scd->lock); - __update_sched_clock(scd, now, NULL); - clock = scd->clock; - __raw_spin_unlock(&scd->lock); } + __update_sched_clock(scd, now); + clock = scd->clock; + + __raw_spin_unlock(&scd->lock); + return clock; } void sched_clock_tick(void) { struct sched_clock_data *scd = this_scd(); - unsigned long now_jiffies = jiffies; - s64 mult, delta_gtod, delta_raw; u64 now, now_gtod; if (unlikely(!sched_clock_running)) @@ -269,29 +202,14 @@ void sched_clock_tick(void) now = sched_clock(); __raw_spin_lock(&scd->lock); - __update_sched_clock(scd, now, NULL); + __update_sched_clock(scd, now); /* * update tick_gtod after __update_sched_clock() because that will * already observe 1 new jiffy; adding a new tick_gtod to that would * increase the clock 2 jiffies. */ - delta_gtod = now_gtod - scd->tick_gtod; - delta_raw = now - scd->tick_raw; - - if ((long)delta_raw > 0) { - mult = delta_gtod << MULTI_SHIFT; - do_div(mult, delta_raw); - scd->multi = mult; - if (scd->multi > MAX_MULTI) - scd->multi = MAX_MULTI; - else if (scd->multi < MIN_MULTI) - scd->multi = MIN_MULTI; - } else - scd->multi = 1 << MULTI_SHIFT; - scd->tick_raw = now; scd->tick_gtod = now_gtod; - scd->tick_jiffies = now_jiffies; __raw_spin_unlock(&scd->lock); } @@ -321,7 +239,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) __raw_spin_lock(&scd->lock); scd->prev_raw = now; scd->clock += delta_ns; - scd->multi = 1 << MULTI_SHIFT; __raw_spin_unlock(&scd->lock); touch_softlockup_watchdog(); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 825b4c00fe4..f5da526424a 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -289,7 +289,6 @@ void tick_nohz_stop_sched_tick(int inidle) ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; rcu_enter_nohz(); - sched_clock_tick_stop(cpu); } /* @@ -392,7 +391,6 @@ void tick_nohz_restart_sched_tick(void) select_nohz_load_balancer(0); now = ktime_get(); tick_do_update_jiffies64(now); - sched_clock_tick_start(cpu); cpu_clear(cpu, nohz_cpu_mask); /* -- cgit v1.2.3-70-g09d2 From 419ca3f13532793b81aff09f80c60af3eacbb43d Mon Sep 17 00:00:00 2001 From: David Miller Date: Tue, 29 Jul 2008 21:45:03 -0700 Subject: lockdep: fix combinatorial explosion in lock subgraph traversal When we traverse the graph, either forwards or backwards, we are interested in whether a certain property exists somewhere in a node reachable in the graph. Therefore it is never necessary to traverse through a node more than once to get a correct answer to the given query. Take advantage of this property using a global ID counter so that we need not clear all the markers in all the lock_class entries before doing a traversal. A new ID is choosen when we start to traverse, and we continue through a lock_class only if it's ID hasn't been marked with the new value yet. This short-circuiting is essential especially for high CPU count systems. The scheduler has a runqueue per cpu, and needs to take two runqueue locks at a time, which leads to long chains of backwards and forwards subgraphs from these runqueue lock nodes. Without the short-circuit implemented here, a graph traversal on a runqueue lock can take up to (1 << (N - 1)) checks on a system with N cpus. For anything more than 16 cpus or so, lockdep will eventually bring the machine to a complete standstill. Signed-off-by: David S. Miller Acked-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 1 + kernel/lockdep.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/lockdep_internals.h | 3 ++ kernel/lockdep_proc.c | 34 ++---------------- 4 files changed, 93 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2486eb4edbf..1bfdc30bb0a 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -89,6 +89,7 @@ struct lock_class { struct lockdep_subclass_key *key; unsigned int subclass; + unsigned int dep_gen_id; /* * IRQ/softirq usage tracking bits: diff --git a/kernel/lockdep.c b/kernel/lockdep.c index d38a6436297..6999e64fc24 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -372,6 +372,19 @@ unsigned int nr_process_chains; unsigned int max_lockdep_depth; unsigned int max_recursion_depth; +static unsigned int lockdep_dependency_gen_id; + +static bool lockdep_dependency_visit(struct lock_class *source, + unsigned int depth) +{ + if (!depth) + lockdep_dependency_gen_id++; + if (source->dep_gen_id == lockdep_dependency_gen_id) + return true; + source->dep_gen_id = lockdep_dependency_gen_id; + return false; +} + #ifdef CONFIG_DEBUG_LOCKDEP /* * We cannot printk in early bootup code. Not even early_printk() @@ -558,6 +571,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth) { struct lock_list *entry; + if (lockdep_dependency_visit(class, depth)) + return; + if (DEBUG_LOCKS_WARN_ON(depth >= 20)) return; @@ -959,6 +975,67 @@ static int noinline print_infinite_recursion_bug(void) return 0; } +unsigned long __lockdep_count_forward_deps(struct lock_class *class, + unsigned int depth) +{ + struct lock_list *entry; + unsigned long ret = 1; + + if (lockdep_dependency_visit(class, depth)) + return 0; + + /* + * Recurse this class's dependency list: + */ + list_for_each_entry(entry, &class->locks_after, entry) + ret += __lockdep_count_forward_deps(entry->class, depth + 1); + + return ret; +} + +unsigned long lockdep_count_forward_deps(struct lock_class *class) +{ + unsigned long ret, flags; + + local_irq_save(flags); + __raw_spin_lock(&lockdep_lock); + ret = __lockdep_count_forward_deps(class, 0); + __raw_spin_unlock(&lockdep_lock); + local_irq_restore(flags); + + return ret; +} + +unsigned long __lockdep_count_backward_deps(struct lock_class *class, + unsigned int depth) +{ + struct lock_list *entry; + unsigned long ret = 1; + + if (lockdep_dependency_visit(class, depth)) + return 0; + /* + * Recurse this class's dependency list: + */ + list_for_each_entry(entry, &class->locks_before, entry) + ret += __lockdep_count_backward_deps(entry->class, depth + 1); + + return ret; +} + +unsigned long lockdep_count_backward_deps(struct lock_class *class) +{ + unsigned long ret, flags; + + local_irq_save(flags); + __raw_spin_lock(&lockdep_lock); + ret = __lockdep_count_backward_deps(class, 0); + __raw_spin_unlock(&lockdep_lock); + local_irq_restore(flags); + + return ret; +} + /* * Prove that the dependency graph starting at can not * lead to . Print an error and return 0 if it does. @@ -968,6 +1045,9 @@ check_noncircular(struct lock_class *source, unsigned int depth) { struct lock_list *entry; + if (lockdep_dependency_visit(source, depth)) + return 1; + debug_atomic_inc(&nr_cyclic_check_recursions); if (depth > max_recursion_depth) max_recursion_depth = depth; @@ -1011,6 +1091,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) struct lock_list *entry; int ret; + if (lockdep_dependency_visit(source, depth)) + return 1; + if (depth > max_recursion_depth) max_recursion_depth = depth; if (depth >= RECURSION_LIMIT) @@ -1050,6 +1133,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) struct lock_list *entry; int ret; + if (lockdep_dependency_visit(source, depth)) + return 1; + if (!__raw_spin_is_locked(&lockdep_lock)) return DEBUG_LOCKS_WARN_ON(1); diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index c3600a091a2..68d44ec77ab 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -53,6 +53,9 @@ extern unsigned int nr_process_chains; extern unsigned int max_lockdep_depth; extern unsigned int max_recursion_depth; +extern unsigned long lockdep_count_forward_deps(struct lock_class *); +extern unsigned long lockdep_count_backward_deps(struct lock_class *); + #ifdef CONFIG_DEBUG_LOCKDEP /* * Various lockdep statistics: diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 9b0e940e254..6252ff799d1 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v) { } -static unsigned long count_forward_deps(struct lock_class *class) -{ - struct lock_list *entry; - unsigned long ret = 1; - - /* - * Recurse this class's dependency list: - */ - list_for_each_entry(entry, &class->locks_after, entry) - ret += count_forward_deps(entry->class); - - return ret; -} - -static unsigned long count_backward_deps(struct lock_class *class) -{ - struct lock_list *entry; - unsigned long ret = 1; - - /* - * Recurse this class's dependency list: - */ - list_for_each_entry(entry, &class->locks_before, entry) - ret += count_backward_deps(entry->class); - - return ret; -} - static void print_name(struct seq_file *m, struct lock_class *class) { char str[128]; @@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v) #ifdef CONFIG_DEBUG_LOCKDEP seq_printf(m, " OPS:%8ld", class->ops); #endif - nr_forward_deps = count_forward_deps(class); + nr_forward_deps = lockdep_count_forward_deps(class); seq_printf(m, " FD:%5ld", nr_forward_deps); - nr_backward_deps = count_backward_deps(class); + nr_backward_deps = lockdep_count_backward_deps(class); seq_printf(m, " BD:%5ld", nr_backward_deps); get_usage_chars(class, &c1, &c2, &c3, &c4); @@ -350,7 +322,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) nr_hardirq_read_unsafe++; - sum_forward_deps += count_forward_deps(class); + sum_forward_deps += lockdep_count_forward_deps(class); } #ifdef CONFIG_DEBUG_LOCKDEP DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); -- cgit v1.2.3-70-g09d2 From 5595cffc8248e4672c5803547445e85e4053c8fc Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 5 Aug 2008 09:28:47 +0300 Subject: SLUB: dynamic per-cache MIN_PARTIAL This patch changes the static MIN_PARTIAL to a dynamic per-cache ->min_partial value that is calculated from object size. The bigger the object size, the more pages we keep on the partial list. I tested SLAB, SLUB, and SLUB with this patch on Jens Axboe's 'netio' example script of the fio benchmarking tool. The script stresses the networking subsystem which should also give a fairly good beating of kmalloc() et al. To run the test yourself, first clone the fio repository: git clone git://git.kernel.dk/fio.git and then run the following command n times on your machine: time ./fio examples/netio The results on my 2-way 64-bit x86 machine are as follows: [ the minimum, maximum, and average are captured from 50 individual runs ] real time (seconds) min max avg sd SLAB 22.76 23.38 22.98 0.17 SLUB 22.80 25.78 23.46 0.72 SLUB (dynamic) 22.74 23.54 23.00 0.20 sys time (seconds) min max avg sd SLAB 6.90 8.28 7.70 0.28 SLUB 7.42 16.95 8.89 2.28 SLUB (dynamic) 7.17 8.64 7.73 0.29 user time (seconds) min max avg sd SLAB 36.89 38.11 37.50 0.29 SLUB 30.85 37.99 37.06 1.67 SLUB (dynamic) 36.75 38.07 37.59 0.32 As you can see from the above numbers, this patch brings SLUB to the same level as SLAB for this particular workload fixing a ~2% regression. I'd expect this change to help similar workloads that allocate a lot of objects that are close to the size of a page. Cc: Matthew Wilcox Cc: Andrew Morton Acked-by: Christoph Lameter Signed-off-by: Pekka Enberg --- include/linux/slub_def.h | 1 + mm/slub.c | 26 +++++++++++++++++++------- 2 files changed, 20 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5bad61a93f6..2f5c16b1aac 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -46,6 +46,7 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; + unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; diff --git a/mm/slub.c b/mm/slub.c index c26d4c36fba..4f5b9614945 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1329,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) n = get_node(s, zone_to_nid(zone)); if (n && cpuset_zone_allowed_hardwall(zone, flags) && - n->nr_partial > MIN_PARTIAL) { + n->nr_partial > n->min_partial) { page = get_partial_node(n); if (page) return page; @@ -1381,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) slab_unlock(page); } else { stat(c, DEACTIVATE_EMPTY); - if (n->nr_partial < MIN_PARTIAL) { + if (n->nr_partial < n->min_partial) { /* * Adding an empty slab to the partial slabs in order * to avoid page allocator overhead. This slab needs @@ -1913,9 +1913,21 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, #endif } -static void init_kmem_cache_node(struct kmem_cache_node *n) +static void +init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) { n->nr_partial = 0; + + /* + * The larger the object size is, the more pages we want on the partial + * list to avoid pounding the page allocator excessively. + */ + n->min_partial = ilog2(s->size); + if (n->min_partial < MIN_PARTIAL) + n->min_partial = MIN_PARTIAL; + else if (n->min_partial > MAX_PARTIAL) + n->min_partial = MAX_PARTIAL; + spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG @@ -2087,7 +2099,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, init_object(kmalloc_caches, n, 1); init_tracking(kmalloc_caches, n); #endif - init_kmem_cache_node(n); + init_kmem_cache_node(n, kmalloc_caches); inc_slabs_node(kmalloc_caches, node, page->objects); /* @@ -2144,7 +2156,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) } s->node[node] = n; - init_kmem_cache_node(n); + init_kmem_cache_node(n, s); } return 1; } @@ -2155,7 +2167,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { - init_kmem_cache_node(&s->local_node); + init_kmem_cache_node(&s->local_node, s); return 1; } #endif @@ -2889,7 +2901,7 @@ static int slab_mem_going_online_callback(void *arg) ret = -ENOMEM; goto out; } - init_kmem_cache_node(n); + init_kmem_cache_node(n, s); s->node[nid] = n; } out: -- cgit v1.2.3-70-g09d2 From 0f0625d895bc5b3c3d7352486a94e5a75f10fd35 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 5 Aug 2008 17:10:58 +0800 Subject: Blackfin arch: remove useless mtd defines in uClinux dont bother protecting the mtd defines as anything that incorrectly uses it will get an error during link time anyways ... this prevents large pointless rebuilds of most files whenever the uclinux mtd map changes state Signed-off-by: Mike Frysinger Signed-off-by: Bryan Wu --- include/asm-blackfin/bfin-global.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h index 320aa5e167e..9fbbee61f98 100644 --- a/include/asm-blackfin/bfin-global.h +++ b/include/asm-blackfin/bfin-global.h @@ -122,9 +122,8 @@ extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], _ebss_l2[], _l2_lma_start[]; -#ifdef CONFIG_MTD_UCLINUX +/* only used when CONFIG_MTD_UCLINUX */ extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; -#endif #endif -- cgit v1.2.3-70-g09d2 From 4a88d0ce494034fbb8dd0076d80e71b38abf5748 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Tue, 5 Aug 2008 17:38:41 +0800 Subject: Blackfin arch: Functional power management support Merge VR Regulator Hibernate wakeups into set_irq_wake for internal interrupts. Signed-off-by: Michael Hennerich Signed-off-by: Bryan Wu --- arch/blackfin/Kconfig | 36 ------------------------ arch/blackfin/mach-common/ints-priority.c | 46 +++++++++++++++++++++++++++++-- arch/blackfin/mach-common/pm.c | 18 +----------- include/asm-blackfin/bfin-global.h | 1 + 4 files changed, 45 insertions(+), 56 deletions(-) (limited to 'include') diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 5a097c46bc4..cc2add7e39e 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -933,13 +933,6 @@ endchoice comment "Possible Suspend Mem / Hibernate Wake-Up Sources" depends on PM -config PM_BFIN_WAKE_RTC - bool "Allow Wake-Up from RESET and on-chip RTC" - depends on PM - default n - help - Enable RTC Wake-Up (Voltage Regulator Power-Up) - config PM_BFIN_WAKE_PH6 bool "Allow Wake-Up from on-chip PHY or PH6 GP" depends on PM && (BF52x || BF534 || BF536 || BF537) @@ -947,41 +940,12 @@ config PM_BFIN_WAKE_PH6 help Enable PHY and PH6 GP Wake-Up (Voltage Regulator Power-Up) -config PM_BFIN_WAKE_CAN - bool "Allow Wake-Up from on-chip CAN0/1" - depends on PM && (BF54x || BF534 || BF536 || BF537) - default n - help - Enable CAN0/1 Wake-Up (Voltage Regulator Power-Up) - config PM_BFIN_WAKE_GP bool "Allow Wake-Up from GPIOs" depends on PM && BF54x default n help Enable General-Purpose Wake-Up (Voltage Regulator Power-Up) - -config PM_BFIN_WAKE_USB - bool "Allow Wake-Up from on-chip USB" - depends on PM && (BF54x || BF52x) - default n - help - Enable USB Wake-Up (Voltage Regulator Power-Up) - -config PM_BFIN_WAKE_KEYPAD - bool "Allow Wake-Up from on-chip Keypad" - depends on PM && BF54x - default n - help - Enable Keypad Wake-Up (Voltage Regulator Power-Up) - -config PM_BFIN_WAKE_ROTARY - bool "Allow Wake-Up from on-chip Rotary" - depends on PM && BF54x - default n - help - Enable Rotary Wake-Up (Voltage Regulator Power-Up) - endmenu menu "CPU Frequency scaling" diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 64d746114e4..e713b9db867 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -71,6 +71,7 @@ atomic_t num_spurious; #ifdef CONFIG_PM unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */ +unsigned vr_wakeup; #endif struct ivgx { @@ -184,17 +185,56 @@ static void bfin_internal_unmask_irq(unsigned int irq) #ifdef CONFIG_PM int bfin_internal_set_wake(unsigned int irq, unsigned int state) { - unsigned bank, bit; + unsigned bank, bit, wakeup = 0; unsigned long flags; bank = SIC_SYSIRQ(irq) / 32; bit = SIC_SYSIRQ(irq) % 32; + switch (irq) { +#ifdef IRQ_RTC + case IRQ_RTC: + wakeup |= WAKE; + break; +#endif +#ifdef IRQ_CAN0_RX + case IRQ_CAN0_RX: + wakeup |= CANWE; + break; +#endif +#ifdef IRQ_CAN1_RX + case IRQ_CAN1_RX: + wakeup |= CANWE; + break; +#endif +#ifdef IRQ_USB_INT0 + case IRQ_USB_INT0: + wakeup |= USBWE; + break; +#endif +#ifdef IRQ_KEY + case IRQ_KEY: + wakeup |= KPADWE; + break; +#endif +#ifdef IRQ_CNT + case IRQ_CNT: + wakeup |= ROTWE; + break; +#endif + default: + break; + } + local_irq_save(flags); - if (state) + if (state) { bfin_sic_iwr[bank] |= (1 << bit); - else + vr_wakeup |= wakeup; + + } else { bfin_sic_iwr[bank] &= ~(1 << bit); + vr_wakeup &= ~wakeup; + } local_irq_restore(flags); diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c index 4fe6a2366b1..143134b852e 100644 --- a/arch/blackfin/mach-common/pm.c +++ b/arch/blackfin/mach-common/pm.c @@ -229,28 +229,12 @@ int bfin_pm_suspend_mem_enter(void) wakeup = bfin_read_VR_CTL() & ~FREQ; wakeup |= SCKELOW; - /* FIXME: merge this somehow with set_irq_wake */ -#ifdef CONFIG_PM_BFIN_WAKE_RTC - wakeup |= WAKE; -#endif #ifdef CONFIG_PM_BFIN_WAKE_PH6 wakeup |= PHYWE; #endif -#ifdef CONFIG_PM_BFIN_WAKE_CAN - wakeup |= CANWE; -#endif #ifdef CONFIG_PM_BFIN_WAKE_GP wakeup |= GPWE; #endif -#ifdef CONFIG_PM_BFIN_WAKE_USB - wakeup |= USBWE; -#endif -#ifdef CONFIG_PM_BFIN_WAKE_KEYPAD - wakeup |= KPADWE; -#endif -#ifdef CONFIG_PM_BFIN_WAKE_ROTARY - wakeup |= ROTWE; -#endif local_irq_save(flags); @@ -268,7 +252,7 @@ int bfin_pm_suspend_mem_enter(void) icache_disable(); bf53x_suspend_l1_mem(memptr); - do_hibernate(wakeup); /* Goodbye */ + do_hibernate(wakeup | vr_wakeup); /* Goodbye */ bf53x_resume_l1_mem(memptr); diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h index 9fbbee61f98..93ae5335e8a 100644 --- a/include/asm-blackfin/bfin-global.h +++ b/include/asm-blackfin/bfin-global.h @@ -113,6 +113,7 @@ extern const char bfin_board_name[]; extern unsigned long wall_jiffies; extern unsigned long bfin_sic_iwr[]; +extern unsigned vr_wakeup; extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */ extern struct file_operations dpmc_fops; extern unsigned long _ramstart, _ramend, _rambase; -- cgit v1.2.3-70-g09d2 From 778307d372555f979cf6cef112a6d7fbff056cd9 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Wed, 6 Aug 2008 17:05:20 +0800 Subject: Blackfin arch: remove support for Anomaly 05000125 as it doesnt exist on any supported processor/silicon Signed-off-by: Mike Frysinger Signed-off-by: Bryan Wu --- arch/blackfin/mach-bf527/head.S | 19 ------ arch/blackfin/mach-bf533/head.S | 18 ----- arch/blackfin/mach-bf537/head.S | 39 +---------- arch/blackfin/mach-bf561/head.S | 17 ----- arch/blackfin/mach-common/Makefile | 2 +- arch/blackfin/mach-common/cacheinit.S | 77 ---------------------- include/asm-blackfin/mach-common/cdef_LPBlackfin.h | 8 --- 7 files changed, 2 insertions(+), 178 deletions(-) delete mode 100644 arch/blackfin/mach-common/cacheinit.S (limited to 'include') diff --git a/arch/blackfin/mach-bf527/head.S b/arch/blackfin/mach-bf527/head.S index fe05cc1ef17..a16a2657528 100644 --- a/arch/blackfin/mach-bf527/head.S +++ b/arch/blackfin/mach-bf527/head.S @@ -105,17 +105,8 @@ ENTRY(__start) R1 = [p0]; R0 = ~ENICPLB; R0 = R0 & R1; - - /* Anomaly 05000125 */ -#if ANOMALY_05000125 - CLI R2; - SSYNC; -#endif [p0] = R0; SSYNC; -#if ANOMALY_05000125 - STI R2; -#endif /* Turn off the dcache */ p0.l = LO(DMEM_CONTROL); @@ -123,18 +114,8 @@ ENTRY(__start) R1 = [p0]; R0 = ~ENDCPLB; R0 = R0 & R1; - - /* Anomaly 05000125 */ -#if ANOMALY_05000125 - CLI R2; - SSYNC; -#endif [p0] = R0; SSYNC; -#if ANOMALY_05000125 - STI R2; -#endif - #if defined(CONFIG_BF527) p0.h = hi(EMAC_SYSTAT); diff --git a/arch/blackfin/mach-bf533/head.S b/arch/blackfin/mach-bf533/head.S index c671e8549b1..fb49169c0e7 100644 --- a/arch/blackfin/mach-bf533/head.S +++ b/arch/blackfin/mach-bf533/head.S @@ -116,17 +116,8 @@ ENTRY(__start) R1 = [p0]; R0 = ~ENICPLB; R0 = R0 & R1; - - /* Anomaly 05000125 */ -#if ANOMALY_05000125 - CLI R2; - SSYNC; -#endif [p0] = R0; SSYNC; -#if ANOMALY_05000125 - STI R2; -#endif /* Turn off the dcache */ p0.l = LO(DMEM_CONTROL); @@ -134,17 +125,8 @@ ENTRY(__start) R1 = [p0]; R0 = ~ENDCPLB; R0 = R0 & R1; - - /* Anomaly 05000125 */ -#if ANOMALY_05000125 - CLI R2; - SSYNC; -#endif [p0] = R0; SSYNC; -#if ANOMALY_05000125 - STI R2; -#endif /* Initialise UART - when booting from u-boot, the UART is not disabled * so if we dont initalize here, our serial console gets hosed */ diff --git a/arch/blackfin/mach-bf537/head.S b/arch/blackfin/mach-bf537/head.S index 6b019eaee0b..5bc89bbb89d 100644 --- a/arch/blackfin/mach-bf537/head.S +++ b/arch/blackfin/mach-bf537/head.S @@ -105,17 +105,8 @@ ENTRY(__start) R1 = [p0]; R0 = ~ENICPLB; R0 = R0 & R1; - - /* Anomaly 05000125 */ -#if ANOMALY_05000125 - CLI R2; - SSYNC; -#endif [p0] = R0; SSYNC; -#if ANOMALY_05000125 - STI R2; -#endif /* Turn off the dcache */ p0.l = LO(DMEM_CONTROL); @@ -123,48 +114,20 @@ ENTRY(__start) R1 = [p0]; R0 = ~ENDCPLB; R0 = R0 & R1; - - /* Anomaly 05000125 */ -#if ANOMALY_05000125 - CLI R2; - SSYNC; -#endif [p0] = R0; SSYNC; -#if ANOMALY_05000125 - STI R2; -#endif /* Initialise General-Purpose I/O Modules on BF537 */ - /* Rev 0.0 Anomaly 05000212 - PORTx_FER, - * PORT_MUX Registers Do Not accept "writes" correctly: - */ p0.h = hi(BFIN_PORT_MUX); p0.l = lo(BFIN_PORT_MUX); -#if ANOMALY_05000212 - R0.L = W[P0]; /* Read */ - SSYNC; -#endif R0 = (PGDE_UART | PFTE_UART)(Z); -#if ANOMALY_05000212 - W[P0] = R0.L; /* Write */ - SSYNC; -#endif W[P0] = R0.L; /* Enable both UARTS */ SSYNC; + /* Enable peripheral function of PORTF for UART0 and UART1 */ p0.h = hi(PORTF_FER); p0.l = lo(PORTF_FER); -#if ANOMALY_05000212 - R0.L = W[P0]; /* Read */ - SSYNC; -#endif R0 = 0x000F(Z); -#if ANOMALY_05000212 - W[P0] = R0.L; /* Write */ - SSYNC; -#endif - /* Enable peripheral function of PORTF for UART0 and UART1 */ W[P0] = R0.L; SSYNC; diff --git a/arch/blackfin/mach-bf561/head.S b/arch/blackfin/mach-bf561/head.S index cf1a2dff01e..0a1443b6b46 100644 --- a/arch/blackfin/mach-bf561/head.S +++ b/arch/blackfin/mach-bf561/head.S @@ -105,16 +105,8 @@ ENTRY(__start) R1 = [p0]; R0 = ~ENICPLB; R0 = R0 & R1; - -#if ANOMALY_05000125 - CLI R2; - SSYNC; -#endif [p0] = R0; SSYNC; -#if ANOMALY_05000125 - STI R2; -#endif /* Turn off the dcache */ p0.l = LO(DMEM_CONTROL); @@ -122,17 +114,8 @@ ENTRY(__start) R1 = [p0]; R0 = ~ENDCPLB; R0 = R0 & R1; - - /* Anomaly 05000125 */ -#if ANOMALY_05000125 - CLI R2; - SSYNC; -#endif [p0] = R0; SSYNC; -#if ANOMALY_05000125 - STI R2; -#endif /* Initialise UART - when booting from u-boot, the UART is not disabled * so if we dont initalize here, our serial console gets hosed */ diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile index 422bfee34ad..5e6b20e423d 100644 --- a/arch/blackfin/mach-common/Makefile +++ b/arch/blackfin/mach-common/Makefile @@ -3,7 +3,7 @@ # obj-y := \ - cache.o cacheinit.o entry.o \ + cache.o entry.o \ interrupt.o lock.o irqpanic.o arch_checks.o ints-priority.o obj-$(CONFIG_PM) += pm.o dpmc_modes.o diff --git a/arch/blackfin/mach-common/cacheinit.S b/arch/blackfin/mach-common/cacheinit.S deleted file mode 100644 index 22fada0c1cb..00000000000 --- a/arch/blackfin/mach-common/cacheinit.S +++ /dev/null @@ -1,77 +0,0 @@ -/* - * File: arch/blackfin/mach-common/cacheinit.S - * Based on: - * Author: LG Soft India - * - * Created: ? - * Description: cache initialization - * - * Modified: - * Copyright 2004-2006 Analog Devices Inc. - * - * Bugs: Enter bugs at http://blackfin.uclinux.org/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see the file COPYING, or write - * to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* This function sets up the data and instruction cache. The - * tables like icplb table, dcplb table and Page Descriptor table - * are defined in cplbtab.h. You can configure those tables for - * your suitable requirements - */ - -#include -#include - -.text - -#if ANOMALY_05000125 -#if defined(CONFIG_BFIN_ICACHE) -ENTRY(_bfin_write_IMEM_CONTROL) - - /* Enable Instruction Cache */ - P0.l = LO(IMEM_CONTROL); - P0.h = HI(IMEM_CONTROL); - - /* Anomaly 05000125 */ - CLI R1; - SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ - .align 8; - [P0] = R0; - SSYNC; - STI R1; - RTS; - -ENDPROC(_bfin_write_IMEM_CONTROL) -#endif - -#if defined(CONFIG_BFIN_DCACHE) -ENTRY(_bfin_write_DMEM_CONTROL) - P0.l = LO(DMEM_CONTROL); - P0.h = HI(DMEM_CONTROL); - - CLI R1; - SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ - .align 8; - [P0] = R0; - SSYNC; - STI R1; - RTS; - -ENDPROC(_bfin_write_DMEM_CONTROL) -#endif - -#endif diff --git a/include/asm-blackfin/mach-common/cdef_LPBlackfin.h b/include/asm-blackfin/mach-common/cdef_LPBlackfin.h index ede210eca4e..d39c396f850 100644 --- a/include/asm-blackfin/mach-common/cdef_LPBlackfin.h +++ b/include/asm-blackfin/mach-common/cdef_LPBlackfin.h @@ -39,11 +39,7 @@ #define bfin_read_SRAM_BASE_ADDRESS() bfin_read32(SRAM_BASE_ADDRESS) #define bfin_write_SRAM_BASE_ADDRESS(val) bfin_write32(SRAM_BASE_ADDRESS,val) #define bfin_read_DMEM_CONTROL() bfin_read32(DMEM_CONTROL) -#if ANOMALY_05000125 -extern void bfin_write_DMEM_CONTROL(unsigned int val); -#else #define bfin_write_DMEM_CONTROL(val) bfin_write32(DMEM_CONTROL,val) -#endif #define bfin_read_DCPLB_STATUS() bfin_read32(DCPLB_STATUS) #define bfin_write_DCPLB_STATUS(val) bfin_write32(DCPLB_STATUS,val) #define bfin_read_DCPLB_FAULT_ADDR() bfin_read32(DCPLB_FAULT_ADDR) @@ -129,11 +125,7 @@ extern void bfin_write_DMEM_CONTROL(unsigned int val); #define DTEST_DATA3 0xFFE0040C */ #define bfin_read_IMEM_CONTROL() bfin_read32(IMEM_CONTROL) -#if ANOMALY_05000125 -extern void bfin_write_IMEM_CONTROL(unsigned int val); -#else #define bfin_write_IMEM_CONTROL(val) bfin_write32(IMEM_CONTROL,val) -#endif #define bfin_read_ICPLB_STATUS() bfin_read32(ICPLB_STATUS) #define bfin_write_ICPLB_STATUS(val) bfin_write32(ICPLB_STATUS,val) #define bfin_read_ICPLB_FAULT_ADDR() bfin_read32(ICPLB_FAULT_ADDR) -- cgit v1.2.3-70-g09d2 From 07aa7be5708afb3d9afa68f6f853c98e51bc64b3 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Wed, 13 Aug 2008 16:16:11 +0800 Subject: Blackfin arch: convert L2 defines to be the same as the L1 defines Signed-off-by: Mike Frysinger Signed-off-by: Bryan Wu --- arch/blackfin/kernel/cplb-nompu/cplbinit.c | 6 +----- arch/blackfin/kernel/setup.c | 14 +++++++------- arch/blackfin/kernel/traps.c | 2 +- arch/blackfin/kernel/vmlinux.lds.S | 10 ++++------ arch/blackfin/mm/blackfin_sram.c | 12 ++++++------ include/asm-blackfin/mach-bf527/mem_map.h | 5 +++++ include/asm-blackfin/mach-bf533/mem_map.h | 5 +++++ include/asm-blackfin/mach-bf537/mem_map.h | 5 +++++ 8 files changed, 34 insertions(+), 25 deletions(-) (limited to 'include') diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c index 224e7cc30bc..728f708d398 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c @@ -164,17 +164,13 @@ static struct cplb_desc cplb_data[] = { .name = "Asynchronous Memory Banks", }, { -#ifdef L2_START .start = L2_START, .end = L2_START + L2_LENGTH, .psize = SIZE_1M, .attr = SWITCH_T | I_CPLB | D_CPLB, .i_conf = L2_MEMORY, .d_conf = L2_MEMORY, - .valid = 1, -#else - .valid = 0, -#endif + .valid = (L2_LENGTH > 0), .name = "L2 Memory", }, { diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 15967e7578c..936c06d820d 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -131,14 +131,14 @@ void __init bf53x_relocate_l1_mem(void) dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + l1_data_a_length, l1_data_b_length); -#ifdef L2_LENGTH - l2_length = _ebss_l2 - _stext_l2; - if (l2_length > L2_LENGTH) - panic("L2 SRAM Overflow\n"); + if (L2_LENGTH != 0) { + l2_length = _ebss_l2 - _stext_l2; + if (l2_length > L2_LENGTH) + panic("L2 SRAM Overflow\n"); - /* Copy _stext_l2 to _edata_l2 to L2 SRAM */ - dma_memcpy(_stext_l2, _l2_lma_start, l2_length); -#endif + /* Copy _stext_l2 to _edata_l2 to L2 SRAM */ + dma_memcpy(_stext_l2, _l2_lma_start, l2_length); + } } /* add_memory_region to memmap */ diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index ad922ab9154..62a47d67d87 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -567,7 +567,7 @@ bool get_instruction(unsigned short *val, unsigned short *address) * we don't read something in the async space that can hang forever */ if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) || -#ifdef L2_START +#if L2_LENGTH != 0 (addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) || #endif (addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) || diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index d062597e621..7d12c6692a6 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -102,7 +102,7 @@ SECTIONS #if !L1_DATA_B_LENGTH *(.l1.data.B) #endif -#ifndef L2_LENGTH +#if !L2_LENGTH . = ALIGN(32); *(.data_l2.cacheline_aligned) *(.l2.data) @@ -212,20 +212,19 @@ SECTIONS __ebss_b_l1 = .; } -#ifdef L2_LENGTH __l2_lma_start = .; .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1)) { . = ALIGN(4); __stext_l2 = .; - *(.l1.text) + *(.l2.text) . = ALIGN(4); __etext_l2 = .; . = ALIGN(4); __sdata_l2 = .; - *(.l1.data) + *(.l2.data) __edata_l2 = .; . = ALIGN(32); @@ -233,11 +232,10 @@ SECTIONS . = ALIGN(4); __sbss_l2 = .; - *(.l1.bss) + *(.l2.bss) . = ALIGN(4); __ebss_l2 = .; } -#endif /* Force trailing alignment of our init section so that when we * free our init memory, we don't leave behind a partial page. diff --git a/arch/blackfin/mm/blackfin_sram.c b/arch/blackfin/mm/blackfin_sram.c index 5af3c31c936..9d2be43ac3d 100644 --- a/arch/blackfin/mm/blackfin_sram.c +++ b/arch/blackfin/mm/blackfin_sram.c @@ -66,7 +66,7 @@ static struct sram_piece free_l1_data_B_sram_head, used_l1_data_B_sram_head; static struct sram_piece free_l1_inst_sram_head, used_l1_inst_sram_head; #endif -#ifdef L2_LENGTH +#if L2_LENGTH != 0 static struct sram_piece free_l2_sram_head, used_l2_sram_head; #endif @@ -175,7 +175,7 @@ static void __init l1_inst_sram_init(void) static void __init l2_sram_init(void) { -#ifdef L2_LENGTH +#if L2_LENGTH != 0 free_l2_sram_head.next = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); if (!free_l2_sram_head.next) { @@ -367,7 +367,7 @@ int sram_free(const void *addr) && addr < (void *)(L1_DATA_B_START + L1_DATA_B_LENGTH)) return l1_data_B_sram_free(addr); #endif -#ifdef L2_LENGTH +#if L2_LENGTH != 0 else if (addr >= (void *)L2_START && addr < (void *)(L2_START + L2_LENGTH)) return l2_sram_free(addr); @@ -604,7 +604,7 @@ int l1sram_free(const void *addr) void *l2_sram_alloc(size_t size) { -#ifdef L2_LENGTH +#if L2_LENGTH != 0 unsigned flags; void *addr; @@ -640,7 +640,7 @@ EXPORT_SYMBOL(l2_sram_zalloc); int l2_sram_free(const void *addr) { -#ifdef L2_LENGTH +#if L2_LENGTH != 0 unsigned flags; int ret; @@ -779,7 +779,7 @@ static int sram_proc_read(char *buf, char **start, off_t offset, int count, &free_l1_inst_sram_head, &used_l1_inst_sram_head)) goto not_done; #endif -#ifdef L2_LENGTH +#if L2_LENGTH != 0 if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head, &used_l2_sram_head)) goto not_done; diff --git a/include/asm-blackfin/mach-bf527/mem_map.h b/include/asm-blackfin/mach-bf527/mem_map.h index 193082deaa4..ef46dc991cd 100644 --- a/include/asm-blackfin/mach-bf527/mem_map.h +++ b/include/asm-blackfin/mach-bf527/mem_map.h @@ -89,6 +89,11 @@ #define BFIN_DSUPBANKS 0 #endif /*CONFIG_BFIN_DCACHE */ +/* Level 2 Memory - none */ + +#define L2_START 0 +#define L2_LENGTH 0 + /* Scratch Pad Memory */ #define L1_SCRATCH_START 0xFFB00000 diff --git a/include/asm-blackfin/mach-bf533/mem_map.h b/include/asm-blackfin/mach-bf533/mem_map.h index bd30b6f3be0..581fc6eea78 100644 --- a/include/asm-blackfin/mach-bf533/mem_map.h +++ b/include/asm-blackfin/mach-bf533/mem_map.h @@ -158,6 +158,11 @@ #endif +/* Level 2 Memory - none */ + +#define L2_START 0 +#define L2_LENGTH 0 + /* Scratch Pad Memory */ #define L1_SCRATCH_START 0xFFB00000 diff --git a/include/asm-blackfin/mach-bf537/mem_map.h b/include/asm-blackfin/mach-bf537/mem_map.h index 5c6726d6f3b..5078b669431 100644 --- a/include/asm-blackfin/mach-bf537/mem_map.h +++ b/include/asm-blackfin/mach-bf537/mem_map.h @@ -166,6 +166,11 @@ #endif +/* Level 2 Memory - none */ + +#define L2_START 0 +#define L2_LENGTH 0 + /* Scratch Pad Memory */ #define L1_SCRATCH_START 0xFFB00000 -- cgit v1.2.3-70-g09d2 From 56f5f59052bb662a77d5ffd6cbe5861a2ef2407c Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Wed, 6 Aug 2008 17:55:32 +0800 Subject: Blackfin arch: Fix Bug - System with EMAC driver enabled - Core not idling - Disable all bits in SIC_IWR unless we are going into a real (DPMC) power saving mode. Any Interrupt can wake the core form it's idle state. - Remove deep sleep mode as it is not going to be used anywhere: We support sleep, sleep deeper and hibernate. Signed-off-by: Michael Hennerich Signed-off-by: Bryan Wu --- arch/blackfin/mach-bf527/head.S | 7 ---- arch/blackfin/mach-bf533/head.S | 7 ---- arch/blackfin/mach-bf537/head.S | 7 ---- arch/blackfin/mach-bf548/head.S | 7 ---- arch/blackfin/mach-common/dpmc_modes.S | 56 ------------------------------- arch/blackfin/mach-common/ints-priority.c | 8 ++--- arch/blackfin/mach-common/pm.c | 8 ++--- include/asm-blackfin/dpmc.h | 1 - 8 files changed, 8 insertions(+), 93 deletions(-) (limited to 'include') diff --git a/arch/blackfin/mach-bf527/head.S b/arch/blackfin/mach-bf527/head.S index af20183d0d9..2cc46f8fa9a 100644 --- a/arch/blackfin/mach-bf527/head.S +++ b/arch/blackfin/mach-bf527/head.S @@ -183,13 +183,6 @@ ENTRY(_start_dma_code) [P2] = R1; SSYNC; - p0.h = hi(SIC_IWR0); - p0.l = lo(SIC_IWR0); - r0.l = lo(IWR_ENABLE_ALL); - r0.h = hi(IWR_ENABLE_ALL); - [p0] = r0; - SSYNC; - RTS; ENDPROC(_start_dma_code) #endif /* CONFIG_BFIN_KERNEL_CLOCK */ diff --git a/arch/blackfin/mach-bf533/head.S b/arch/blackfin/mach-bf533/head.S index 6603967367e..184296bee3c 100644 --- a/arch/blackfin/mach-bf533/head.S +++ b/arch/blackfin/mach-bf533/head.S @@ -177,13 +177,6 @@ ENTRY(_start_dma_code) [P2] = R1; SSYNC; - p0.h = hi(SIC_IWR); - p0.l = lo(SIC_IWR); - r0.l = lo(IWR_ENABLE_ALL); - r0.h = hi(IWR_ENABLE_ALL); - [p0] = r0; - SSYNC; - RTS; ENDPROC(_start_dma_code) #endif /* CONFIG_BFIN_KERNEL_CLOCK */ diff --git a/arch/blackfin/mach-bf537/head.S b/arch/blackfin/mach-bf537/head.S index 6a02e472587..c02c8ce2d96 100644 --- a/arch/blackfin/mach-bf537/head.S +++ b/arch/blackfin/mach-bf537/head.S @@ -197,13 +197,6 @@ ENTRY(_start_dma_code) [P2] = R1; SSYNC; - p0.h = hi(SIC_IWR); - p0.l = lo(SIC_IWR); - r0.l = lo(IWR_ENABLE_ALL); - r0.h = hi(IWR_ENABLE_ALL); - [p0] = r0; - SSYNC; - RTS; ENDPROC(_start_dma_code) #endif /* CONFIG_BFIN_KERNEL_CLOCK */ diff --git a/arch/blackfin/mach-bf548/head.S b/arch/blackfin/mach-bf548/head.S index cf94e1e222b..0b18196df86 100644 --- a/arch/blackfin/mach-bf548/head.S +++ b/arch/blackfin/mach-bf548/head.S @@ -201,13 +201,6 @@ ENTRY(_start_dma_code) SSYNC; #endif - p0.h = hi(SIC_IWR0); - p0.l = lo(SIC_IWR0); - r0.l = lo(IWR_ENABLE_ALL); - r0.h = hi(IWR_ENABLE_ALL); - [p0] = r0; - SSYNC; - RTS; ENDPROC(_start_dma_code) #endif /* CONFIG_BFIN_KERNEL_CLOCK */ diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S index 5e3f1d8a4fb..838b0b2ce9a 100644 --- a/arch/blackfin/mach-common/dpmc_modes.S +++ b/arch/blackfin/mach-common/dpmc_modes.S @@ -78,62 +78,6 @@ ENTRY(_hibernate_mode) jump .Lforever; ENDPROC(_hibernate_mode) -ENTRY(_deep_sleep) - [--SP] = ( R7:0, P5:0 ); - [--SP] = RETS; - - CLI R4; - - R0 = IWR_ENABLE(0); - R1 = IWR_DISABLE_ALL; - R2 = IWR_DISABLE_ALL; - - call _set_sic_iwr; - - call _set_dram_srfs; - - /* Clear all the interrupts,bits sticky */ - R0 = 0xFFFF (Z); - call _set_rtc_istat - - P0.H = hi(PLL_CTL); - P0.L = lo(PLL_CTL); - R0 = W[P0](z); - BITSET (R0, 5); - W[P0] = R0.L; - - call _test_pll_locked; - - SSYNC; - IDLE; - - call _unset_dram_srfs; - - call _test_pll_locked; - - R0 = IWR_ENABLE(0); - R1 = IWR_DISABLE_ALL; - R2 = IWR_DISABLE_ALL; - - call _set_sic_iwr; - - P0.H = hi(PLL_CTL); - P0.L = lo(PLL_CTL); - R0 = w[p0](z); - BITCLR (R0, 3); - BITCLR (R0, 5); - BITCLR (R0, 8); - w[p0] = R0; - IDLE; - call _test_pll_locked; - - STI R4; - - RETS = [SP++]; - ( R7:0, P5:0 ) = [SP++]; - RTS; -ENDPROC(_deep_sleep) - ENTRY(_sleep_deeper) [--SP] = ( R7:0, P5:0 ); [--SP] = RETS; diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index e713b9db867..4271ef3f201 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -1068,13 +1068,13 @@ int __init init_arch_irq(void) IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) - bfin_write_SIC_IWR0(IWR_ENABLE_ALL); - bfin_write_SIC_IWR1(IWR_ENABLE_ALL); + bfin_write_SIC_IWR0(IWR_DISABLE_ALL); + bfin_write_SIC_IWR1(IWR_DISABLE_ALL); # ifdef CONFIG_BF54x - bfin_write_SIC_IWR2(IWR_ENABLE_ALL); + bfin_write_SIC_IWR2(IWR_DISABLE_ALL); # endif #else - bfin_write_SIC_IWR(IWR_ENABLE_ALL); + bfin_write_SIC_IWR(IWR_DISABLE_ALL); #endif return 0; diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c index 143134b852e..a17ace3e0e4 100644 --- a/arch/blackfin/mach-common/pm.c +++ b/arch/blackfin/mach-common/pm.c @@ -83,13 +83,13 @@ void bfin_pm_suspend_standby_enter(void) bfin_pm_standby_restore(); #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) - bfin_write_SIC_IWR0(IWR_ENABLE_ALL); - bfin_write_SIC_IWR1(IWR_ENABLE_ALL); + bfin_write_SIC_IWR0(IWR_DISABLE_ALL); + bfin_write_SIC_IWR1(IWR_DISABLE_ALL); # ifdef CONFIG_BF54x - bfin_write_SIC_IWR2(IWR_ENABLE_ALL); + bfin_write_SIC_IWR2(IWR_DISABLE_ALL); # endif #else - bfin_write_SIC_IWR(IWR_ENABLE_ALL); + bfin_write_SIC_IWR(IWR_DISABLE_ALL); #endif local_irq_restore(flags); diff --git a/include/asm-blackfin/dpmc.h b/include/asm-blackfin/dpmc.h index de28e6e018b..96e8208f929 100644 --- a/include/asm-blackfin/dpmc.h +++ b/include/asm-blackfin/dpmc.h @@ -11,7 +11,6 @@ #ifndef __ASSEMBLY__ void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); -void deep_sleep(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); void hibernate_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); void sleep_deeper(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); void do_hibernate(int wakeup); -- cgit v1.2.3-70-g09d2 From bba81165867313766534dd31603de51bdd36ef9b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 30 Jul 2008 12:07:04 -0700 Subject: PCI: make pci_register_driver() a macro alpha: CC [M] drivers/usb/gadget/u_ether.o In file included from include/asm/dma-mapping.h:7, from include/linux/dma-mapping.h:52, from include/linux/dmaengine.h:29, from include/linux/skbuff.h:29, from include/linux/if_ether.h:114, from include/linux/etherdevice.h:27, from drivers/usb/gadget/u_ether.c:29: include/linux/pci.h: In function 'pci_register_driver': include/linux/pci.h:673: error: 'KBUILD_MODNAME' undeclared (first use in this function) include/linux/pci.h:673: error: (Each undeclared identifier is reported only once include/linux/pci.h:673: error: for each function it appears in.) Sam says: The problem is that u_ether.o is used by two modules so when we build it KBUILD_MODNAME is not defined because kbuild does not know what value to use. And in pci.h we have the following inline: static inline int __must_check pci_register_driver(struct pci_driver *driver) { return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME); } And alpha uses dma-mapping.h to nullify a number of functions that seem to require something from pci.h. Making it a macro fixes this particular problem. However, the underlying issue of a file using KBUILD_MODNAME and being shared between multiple modules is *not* addressed. I guess the answer there is "don't do that". Cc: Sam Ravnborg Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Jesse Barnes --- include/linux/pci.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/pci.h b/include/linux/pci.h index 825be3878f6..b0269492c34 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -680,10 +680,12 @@ void pci_enable_bridges(struct pci_bus *bus); /* Proper probing supporting hot-pluggable devices */ int __must_check __pci_register_driver(struct pci_driver *, struct module *, const char *mod_name); -static inline int __must_check pci_register_driver(struct pci_driver *driver) -{ - return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME); -} + +/* + * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded + */ +#define pci_register_driver(driver) \ + __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) void pci_unregister_driver(struct pci_driver *dev); void pci_remove_behind_bridge(struct pci_dev *dev); -- cgit v1.2.3-70-g09d2 From 7bed523a95425b70af7a59df61d5adb422ef2038 Mon Sep 17 00:00:00 2001 From: "akpm@linux-foundation.org" Date: Tue, 5 Aug 2008 14:07:53 -0700 Subject: PCI: remove duplicate symbol from pci_ids.h pci.ids.h: remove a duplicated symbol Cc: Doug Thompson Signed-off-by: Grant Coady Signed-off-by: Andrew Morton Signed-off-by: Jesse Barnes --- include/linux/pci_ids.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 35a78415acc..9ec2bcce8e8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2177,8 +2177,6 @@ #define PCI_DEVICE_ID_HERC_WIN 0x5732 #define PCI_DEVICE_ID_HERC_UNI 0x5832 -#define PCI_VENDOR_ID_RDC 0x17f3 - #define PCI_VENDOR_ID_SITECOM 0x182d #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 -- cgit v1.2.3-70-g09d2 From 5a6c9b60b4cc15b22d3102b0033e5cb842125456 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 8 Aug 2008 00:14:24 +0200 Subject: PCI PM: Export pci_pme_active to drivers Export pci_pme_active() to drivers, so that they can clear the PME_status bit and disable PME# for their devices without involving ACPI. Signed-off-by: Rafael J. Wysocki Signed-off-by: Jesse Barnes --- drivers/pci/pci.c | 3 ++- include/linux/pci.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 0a3d856833f..c9884bba22d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1060,7 +1060,7 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) * The caller must verify that the device is capable of generating PME# before * calling this function with @enable equal to 'true'. */ -static void pci_pme_active(struct pci_dev *dev, bool enable) +void pci_pme_active(struct pci_dev *dev, bool enable) { u16 pmcsr; @@ -1941,6 +1941,7 @@ EXPORT_SYMBOL(pci_set_power_state); EXPORT_SYMBOL(pci_save_state); EXPORT_SYMBOL(pci_restore_state); EXPORT_SYMBOL(pci_pme_capable); +EXPORT_SYMBOL(pci_pme_active); EXPORT_SYMBOL(pci_enable_wake); EXPORT_SYMBOL(pci_target_state); EXPORT_SYMBOL(pci_prepare_to_sleep); diff --git a/include/linux/pci.h b/include/linux/pci.h index b0269492c34..c0e14008a3c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -641,6 +641,7 @@ int pci_restore_state(struct pci_dev *dev); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); +void pci_pme_active(struct pci_dev *dev, bool enable); int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); pci_power_t pci_target_state(struct pci_dev *dev); int pci_prepare_to_sleep(struct pci_dev *dev); -- cgit v1.2.3-70-g09d2 From a7448db4826efb097e94f601f4cd9b37053e81bf Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 5 Aug 2008 14:50:11 +0200 Subject: avr32: Remove include/asm-avr32/arch-at32ap Since all users have been converted over to use , there's no need for the arch-at32ap directory and associated symlink anymore. Signed-off-by: Haavard Skinnemoen --- arch/avr32/Makefile | 14 ---- include/asm-avr32/arch-at32ap/at32ap700x.h | 49 ------------ include/asm-avr32/arch-at32ap/board.h | 121 ----------------------------- include/asm-avr32/arch-at32ap/cpu.h | 35 --------- include/asm-avr32/arch-at32ap/gpio.h | 45 ----------- include/asm-avr32/arch-at32ap/init.h | 18 ----- include/asm-avr32/arch-at32ap/io.h | 39 ---------- include/asm-avr32/arch-at32ap/irq.h | 14 ---- include/asm-avr32/arch-at32ap/pm.h | 51 ------------ include/asm-avr32/arch-at32ap/portmux.h | 29 ------- include/asm-avr32/arch-at32ap/smc.h | 113 --------------------------- include/asm-avr32/arch-at32ap/sram.h | 30 ------- 12 files changed, 558 deletions(-) delete mode 100644 include/asm-avr32/arch-at32ap/at32ap700x.h delete mode 100644 include/asm-avr32/arch-at32ap/board.h delete mode 100644 include/asm-avr32/arch-at32ap/cpu.h delete mode 100644 include/asm-avr32/arch-at32ap/gpio.h delete mode 100644 include/asm-avr32/arch-at32ap/init.h delete mode 100644 include/asm-avr32/arch-at32ap/io.h delete mode 100644 include/asm-avr32/arch-at32ap/irq.h delete mode 100644 include/asm-avr32/arch-at32ap/pm.h delete mode 100644 include/asm-avr32/arch-at32ap/portmux.h delete mode 100644 include/asm-avr32/arch-at32ap/smc.h delete mode 100644 include/asm-avr32/arch-at32ap/sram.h (limited to 'include') diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile index 5b46433d53a..c9e1f0b47fd 100644 --- a/arch/avr32/Makefile +++ b/arch/avr32/Makefile @@ -39,20 +39,6 @@ core-y += arch/avr32/mm/ drivers-$(CONFIG_OPROFILE) += arch/avr32/oprofile/ libs-y += arch/avr32/lib/ -archincdir-$(CONFIG_PLATFORM_AT32AP) := arch-at32ap - -include/asm-avr32/.arch: $(wildcard include/config/platform/*.h) include/config/auto.conf - @echo ' SYMLINK include/asm-avr32/arch -> include/asm-avr32/$(archincdir-y)' -ifneq ($(KBUILD_SRC),) - $(Q)mkdir -p include/asm-avr32 - $(Q)ln -fsn $(srctree)/include/asm-avr32/$(archincdir-y) include/asm-avr32/arch -else - $(Q)ln -fsn $(archincdir-y) include/asm-avr32/arch -endif - @touch $@ - -archprepare: include/asm-avr32/.arch - CLEAN_FILES += include/asm-avr32/.arch include/asm-avr32/arch BOOT_TARGETS := vmlinux.elf vmlinux.bin uImage uImage.srec diff --git a/include/asm-avr32/arch-at32ap/at32ap700x.h b/include/asm-avr32/arch-at32ap/at32ap700x.h deleted file mode 100644 index d18a3053be0..00000000000 --- a/include/asm-avr32/arch-at32ap/at32ap700x.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Pin definitions for AT32AP7000. - * - * Copyright (C) 2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_ARCH_AT32AP700X_H__ -#define __ASM_ARCH_AT32AP700X_H__ - -#define GPIO_PERIPH_A 0 -#define GPIO_PERIPH_B 1 - -/* - * Pin numbers identifying specific GPIO pins on the chip. They can - * also be converted to IRQ numbers by passing them through - * gpio_to_irq(). - */ -#define GPIO_PIOA_BASE (0) -#define GPIO_PIOB_BASE (GPIO_PIOA_BASE + 32) -#define GPIO_PIOC_BASE (GPIO_PIOB_BASE + 32) -#define GPIO_PIOD_BASE (GPIO_PIOC_BASE + 32) -#define GPIO_PIOE_BASE (GPIO_PIOD_BASE + 32) - -#define GPIO_PIN_PA(N) (GPIO_PIOA_BASE + (N)) -#define GPIO_PIN_PB(N) (GPIO_PIOB_BASE + (N)) -#define GPIO_PIN_PC(N) (GPIO_PIOC_BASE + (N)) -#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N)) -#define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N)) - - -/* - * DMAC peripheral hardware handshaking interfaces, used with dw_dmac - */ -#define DMAC_MCI_RX 0 -#define DMAC_MCI_TX 1 -#define DMAC_DAC_TX 2 -#define DMAC_AC97_A_RX 3 -#define DMAC_AC97_A_TX 4 -#define DMAC_AC97_B_RX 5 -#define DMAC_AC97_B_TX 6 -#define DMAC_DMAREQ_0 7 -#define DMAC_DMAREQ_1 8 -#define DMAC_DMAREQ_2 9 -#define DMAC_DMAREQ_3 10 - -#endif /* __ASM_ARCH_AT32AP700X_H__ */ diff --git a/include/asm-avr32/arch-at32ap/board.h b/include/asm-avr32/arch-at32ap/board.h deleted file mode 100644 index e60e9076544..00000000000 --- a/include/asm-avr32/arch-at32ap/board.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Platform data definitions. - */ -#ifndef __ASM_ARCH_BOARD_H -#define __ASM_ARCH_BOARD_H - -#include - -#define GPIO_PIN_NONE (-1) - -/* - * Clock rates for various on-board oscillators. The number of entries - * in this array is chip-dependent. - */ -extern unsigned long at32_board_osc_rates[]; - -/* Add basic devices: system manager, interrupt controller, portmuxes, etc. */ -void at32_add_system_devices(void); - -#define ATMEL_MAX_UART 4 -extern struct platform_device *atmel_default_console_device; - -struct atmel_uart_data { - short use_dma_tx; /* use transmit DMA? */ - short use_dma_rx; /* use receive DMA? */ - void __iomem *regs; /* virtual base address, if any */ -}; -void at32_map_usart(unsigned int hw_id, unsigned int line); -struct platform_device *at32_add_device_usart(unsigned int id); - -struct eth_platform_data { - u32 phy_mask; - u8 is_rmii; -}; -struct platform_device * -at32_add_device_eth(unsigned int id, struct eth_platform_data *data); - -struct spi_board_info; -struct platform_device * -at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n); - -struct atmel_lcdfb_info; -struct platform_device * -at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data, - unsigned long fbmem_start, unsigned long fbmem_len, - unsigned int pin_config); - -struct usba_platform_data; -struct platform_device * -at32_add_device_usba(unsigned int id, struct usba_platform_data *data); - -struct ide_platform_data { - u8 cs; -}; -struct platform_device * -at32_add_device_ide(unsigned int id, unsigned int extint, - struct ide_platform_data *data); - -/* mask says which PWM channels to mux */ -struct platform_device *at32_add_device_pwm(u32 mask); - -/* depending on what's hooked up, not all SSC pins will be used */ -#define ATMEL_SSC_TK 0x01 -#define ATMEL_SSC_TF 0x02 -#define ATMEL_SSC_TD 0x04 -#define ATMEL_SSC_TX (ATMEL_SSC_TK | ATMEL_SSC_TF | ATMEL_SSC_TD) - -#define ATMEL_SSC_RK 0x10 -#define ATMEL_SSC_RF 0x20 -#define ATMEL_SSC_RD 0x40 -#define ATMEL_SSC_RX (ATMEL_SSC_RK | ATMEL_SSC_RF | ATMEL_SSC_RD) - -struct platform_device * -at32_add_device_ssc(unsigned int id, unsigned int flags); - -struct i2c_board_info; -struct platform_device *at32_add_device_twi(unsigned int id, - struct i2c_board_info *b, - unsigned int n); - -struct mci_platform_data; -struct platform_device * -at32_add_device_mci(unsigned int id, struct mci_platform_data *data); - -struct ac97c_platform_data { - unsigned short dma_rx_periph_id; - unsigned short dma_tx_periph_id; - unsigned short dma_controller_id; - int reset_pin; -}; -struct platform_device * -at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data); - -struct platform_device *at32_add_device_abdac(unsigned int id); -struct platform_device *at32_add_device_psif(unsigned int id); - -struct cf_platform_data { - int detect_pin; - int reset_pin; - int vcc_pin; - int ready_pin; - u8 cs; -}; -struct platform_device * -at32_add_device_cf(unsigned int id, unsigned int extint, - struct cf_platform_data *data); - -/* NAND / SmartMedia */ -struct atmel_nand_data { - int enable_pin; /* chip enable */ - int det_pin; /* card detect */ - int rdy_pin; /* ready/busy */ - u8 ale; /* address line number connected to ALE */ - u8 cle; /* address line number connected to CLE */ - u8 bus_width_16; /* buswidth is 16 bit */ - struct mtd_partition *(*partition_info)(int size, int *num_partitions); -}; -struct platform_device * -at32_add_device_nand(unsigned int id, struct atmel_nand_data *data); - -#endif /* __ASM_ARCH_BOARD_H */ diff --git a/include/asm-avr32/arch-at32ap/cpu.h b/include/asm-avr32/arch-at32ap/cpu.h deleted file mode 100644 index 44d0bfa1f40..00000000000 --- a/include/asm-avr32/arch-at32ap/cpu.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * AVR32 and (fake) AT91 CPU identification - * - * Copyright (C) 2007 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_ARCH_CPU_H -#define __ASM_ARCH_CPU_H - -/* - * Only AT32AP7000 is defined for now. We can identify the specific - * chip at runtime, but I'm not sure if it's really worth it. - */ -#ifdef CONFIG_CPU_AT32AP700X -# define cpu_is_at32ap7000() (1) -#else -# define cpu_is_at32ap7000() (0) -#endif - -/* - * Since this is AVR32, we will never run on any AT91 CPU. But these - * definitions may reduce clutter in common drivers. - */ -#define cpu_is_at91rm9200() (0) -#define cpu_is_at91sam9xe() (0) -#define cpu_is_at91sam9260() (0) -#define cpu_is_at91sam9261() (0) -#define cpu_is_at91sam9263() (0) -#define cpu_is_at91sam9rl() (0) -#define cpu_is_at91cap9() (0) - -#endif /* __ASM_ARCH_CPU_H */ diff --git a/include/asm-avr32/arch-at32ap/gpio.h b/include/asm-avr32/arch-at32ap/gpio.h deleted file mode 100644 index 0180f584ef0..00000000000 --- a/include/asm-avr32/arch-at32ap/gpio.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef __ASM_AVR32_ARCH_GPIO_H -#define __ASM_AVR32_ARCH_GPIO_H - -#include -#include - - -/* Some GPIO chips can manage IRQs; some can't. The exact numbers can - * be changed if needed, but for the moment they're not configurable. - */ -#define ARCH_NR_GPIOS (NR_GPIO_IRQS + 2 * 32) - - -/* Arch-neutral GPIO API, supporting both "native" and external GPIOs. */ -#include - -static inline int gpio_get_value(unsigned int gpio) -{ - return __gpio_get_value(gpio); -} - -static inline void gpio_set_value(unsigned int gpio, int value) -{ - __gpio_set_value(gpio, value); -} - -static inline int gpio_cansleep(unsigned int gpio) -{ - return __gpio_cansleep(gpio); -} - - -static inline int gpio_to_irq(unsigned int gpio) -{ - if (gpio < NR_GPIO_IRQS) - return gpio + GPIO_IRQ_BASE; - return -EINVAL; -} - -static inline int irq_to_gpio(unsigned int irq) -{ - return irq - GPIO_IRQ_BASE; -} - -#endif /* __ASM_AVR32_ARCH_GPIO_H */ diff --git a/include/asm-avr32/arch-at32ap/init.h b/include/asm-avr32/arch-at32ap/init.h deleted file mode 100644 index bc40e3d4615..00000000000 --- a/include/asm-avr32/arch-at32ap/init.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * AT32AP platform initialization calls. - * - * Copyright (C) 2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_AVR32_AT32AP_INIT_H__ -#define __ASM_AVR32_AT32AP_INIT_H__ - -void setup_platform(void); -void setup_board(void); - -void at32_setup_serial_console(unsigned int usart_id); - -#endif /* __ASM_AVR32_AT32AP_INIT_H__ */ diff --git a/include/asm-avr32/arch-at32ap/io.h b/include/asm-avr32/arch-at32ap/io.h deleted file mode 100644 index 4ec6abc68ea..00000000000 --- a/include/asm-avr32/arch-at32ap/io.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef __ASM_AVR32_ARCH_AT32AP_IO_H -#define __ASM_AVR32_ARCH_AT32AP_IO_H - -/* For "bizarre" halfword swapping */ -#include - -#if defined(CONFIG_AP700X_32_BIT_SMC) -# define __swizzle_addr_b(addr) (addr ^ 3UL) -# define __swizzle_addr_w(addr) (addr ^ 2UL) -# define __swizzle_addr_l(addr) (addr) -# define ioswabb(a, x) (x) -# define ioswabw(a, x) (x) -# define ioswabl(a, x) (x) -# define __mem_ioswabb(a, x) (x) -# define __mem_ioswabw(a, x) swab16(x) -# define __mem_ioswabl(a, x) swab32(x) -#elif defined(CONFIG_AP700X_16_BIT_SMC) -# define __swizzle_addr_b(addr) (addr ^ 1UL) -# define __swizzle_addr_w(addr) (addr) -# define __swizzle_addr_l(addr) (addr) -# define ioswabb(a, x) (x) -# define ioswabw(a, x) (x) -# define ioswabl(a, x) swahw32(x) -# define __mem_ioswabb(a, x) (x) -# define __mem_ioswabw(a, x) swab16(x) -# define __mem_ioswabl(a, x) swahb32(x) -#else -# define __swizzle_addr_b(addr) (addr) -# define __swizzle_addr_w(addr) (addr) -# define __swizzle_addr_l(addr) (addr) -# define ioswabb(a, x) (x) -# define ioswabw(a, x) swab16(x) -# define ioswabl(a, x) swab32(x) -# define __mem_ioswabb(a, x) (x) -# define __mem_ioswabw(a, x) (x) -# define __mem_ioswabl(a, x) (x) -#endif - -#endif /* __ASM_AVR32_ARCH_AT32AP_IO_H */ diff --git a/include/asm-avr32/arch-at32ap/irq.h b/include/asm-avr32/arch-at32ap/irq.h deleted file mode 100644 index 608e350368c..00000000000 --- a/include/asm-avr32/arch-at32ap/irq.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef __ASM_AVR32_ARCH_IRQ_H -#define __ASM_AVR32_ARCH_IRQ_H - -#define EIM_IRQ_BASE NR_INTERNAL_IRQS -#define NR_EIM_IRQS 32 -#define AT32_EXTINT(n) (EIM_IRQ_BASE + (n)) - -#define GPIO_IRQ_BASE (EIM_IRQ_BASE + NR_EIM_IRQS) -#define NR_GPIO_CTLR (5 /*internal*/ + 1 /*external*/) -#define NR_GPIO_IRQS (NR_GPIO_CTLR * 32) - -#define NR_IRQS (GPIO_IRQ_BASE + NR_GPIO_IRQS) - -#endif /* __ASM_AVR32_ARCH_IRQ_H */ diff --git a/include/asm-avr32/arch-at32ap/pm.h b/include/asm-avr32/arch-at32ap/pm.h deleted file mode 100644 index 979b355b77b..00000000000 --- a/include/asm-avr32/arch-at32ap/pm.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * AVR32 AP Power Management. - * - * Copyright (C) 2008 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_AVR32_ARCH_PM_H -#define __ASM_AVR32_ARCH_PM_H - -/* Possible arguments to the "sleep" instruction */ -#define CPU_SLEEP_IDLE 0 -#define CPU_SLEEP_FROZEN 1 -#define CPU_SLEEP_STANDBY 2 -#define CPU_SLEEP_STOP 3 -#define CPU_SLEEP_STATIC 5 - -#ifndef __ASSEMBLY__ -extern void cpu_enter_idle(void); -extern void cpu_enter_standby(unsigned long sdramc_base); - -extern bool disable_idle_sleep; - -static inline void cpu_disable_idle_sleep(void) -{ - disable_idle_sleep = true; -} - -static inline void cpu_enable_idle_sleep(void) -{ - disable_idle_sleep = false; -} - -static inline void cpu_idle_sleep(void) -{ - /* - * If we're using the COUNT and COMPARE registers for - * timekeeping, we can't use the IDLE state. - */ - if (disable_idle_sleep) - cpu_relax(); - else - cpu_enter_idle(); -} - -void intc_set_suspend_handler(unsigned long offset); -#endif - -#endif /* __ASM_AVR32_ARCH_PM_H */ diff --git a/include/asm-avr32/arch-at32ap/portmux.h b/include/asm-avr32/arch-at32ap/portmux.h deleted file mode 100644 index b1abe6b4e4e..00000000000 --- a/include/asm-avr32/arch-at32ap/portmux.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * AT32 portmux interface. - * - * Copyright (C) 2006 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_ARCH_PORTMUX_H__ -#define __ASM_ARCH_PORTMUX_H__ - -/* - * Set up pin multiplexing, called from board init only. - * - * The following flags determine the initial state of the pin. - */ -#define AT32_GPIOF_PULLUP 0x00000001 /* (not-OUT) Enable pull-up */ -#define AT32_GPIOF_OUTPUT 0x00000002 /* (OUT) Enable output driver */ -#define AT32_GPIOF_HIGH 0x00000004 /* (OUT) Set output high */ -#define AT32_GPIOF_DEGLITCH 0x00000008 /* (IN) Filter glitches */ -#define AT32_GPIOF_MULTIDRV 0x00000010 /* Enable multidriver option */ - -void at32_select_periph(unsigned int pin, unsigned int periph, - unsigned long flags); -void at32_select_gpio(unsigned int pin, unsigned long flags); -void at32_reserve_pin(unsigned int pin); - -#endif /* __ASM_ARCH_PORTMUX_H__ */ diff --git a/include/asm-avr32/arch-at32ap/smc.h b/include/asm-avr32/arch-at32ap/smc.h deleted file mode 100644 index c98eea44a70..00000000000 --- a/include/asm-avr32/arch-at32ap/smc.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Static Memory Controller for AT32 chips - * - * Copyright (C) 2006 Atmel Corporation - * - * Inspired by the OMAP2 General-Purpose Memory Controller interface - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ARCH_AT32AP_SMC_H -#define __ARCH_AT32AP_SMC_H - -/* - * All timing parameters are in nanoseconds. - */ -struct smc_timing { - /* Delay from address valid to assertion of given strobe */ - int ncs_read_setup; - int nrd_setup; - int ncs_write_setup; - int nwe_setup; - - /* Pulse length of given strobe */ - int ncs_read_pulse; - int nrd_pulse; - int ncs_write_pulse; - int nwe_pulse; - - /* Total cycle length of given operation */ - int read_cycle; - int write_cycle; - - /* Minimal recovery times, will extend cycle if needed */ - int ncs_read_recover; - int nrd_recover; - int ncs_write_recover; - int nwe_recover; -}; - -/* - * All timing parameters are in clock cycles. - */ -struct smc_config { - - /* Delay from address valid to assertion of given strobe */ - u8 ncs_read_setup; - u8 nrd_setup; - u8 ncs_write_setup; - u8 nwe_setup; - - /* Pulse length of given strobe */ - u8 ncs_read_pulse; - u8 nrd_pulse; - u8 ncs_write_pulse; - u8 nwe_pulse; - - /* Total cycle length of given operation */ - u8 read_cycle; - u8 write_cycle; - - /* Bus width in bytes */ - u8 bus_width; - - /* - * 0: Data is sampled on rising edge of NCS - * 1: Data is sampled on rising edge of NRD - */ - unsigned int nrd_controlled:1; - - /* - * 0: Data is driven on falling edge of NCS - * 1: Data is driven on falling edge of NWR - */ - unsigned int nwe_controlled:1; - - /* - * 0: NWAIT is disabled - * 1: Reserved - * 2: NWAIT is frozen mode - * 3: NWAIT in ready mode - */ - unsigned int nwait_mode:2; - - /* - * 0: Byte select access type - * 1: Byte write access type - */ - unsigned int byte_write:1; - - /* - * Number of clock cycles before data is released after - * the rising edge of the read controlling signal - * - * Total cycles from SMC is tdf_cycles + 1 - */ - unsigned int tdf_cycles:4; - - /* - * 0: TDF optimization disabled - * 1: TDF optimization enabled - */ - unsigned int tdf_mode:1; -}; - -extern void smc_set_timing(struct smc_config *config, - const struct smc_timing *timing); - -extern int smc_set_configuration(int cs, const struct smc_config *config); -extern struct smc_config *smc_get_configuration(int cs); - -#endif /* __ARCH_AT32AP_SMC_H */ diff --git a/include/asm-avr32/arch-at32ap/sram.h b/include/asm-avr32/arch-at32ap/sram.h deleted file mode 100644 index 4838dae7601..00000000000 --- a/include/asm-avr32/arch-at32ap/sram.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Simple SRAM allocator - * - * Copyright (C) 2008 Atmel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_AVR32_ARCH_SRAM_H -#define __ASM_AVR32_ARCH_SRAM_H - -#include - -extern struct gen_pool *sram_pool; - -static inline unsigned long sram_alloc(size_t len) -{ - if (!sram_pool) - return 0UL; - - return gen_pool_alloc(sram_pool, len); -} - -static inline void sram_free(unsigned long addr, size_t len) -{ - return gen_pool_free(sram_pool, addr, len); -} - -#endif /* __ASM_AVR32_ARCH_SRAM_H */ -- cgit v1.2.3-70-g09d2 From 6f088f1d215be5250582b974f83f0e3aa6ad3a28 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Sat, 9 Aug 2008 13:44:58 +0200 Subject: [ARM] Move include/asm-arm/plat-orion to arch/arm/plat-orion/include/plat This patch performs the equivalent include directory shuffle for plat-orion, and fixes up all users. Signed-off-by: Lennert Buytenhek --- arch/arm/mach-kirkwood/common.c | 8 +++--- arch/arm/mach-kirkwood/irq.c | 2 +- arch/arm/mach-kirkwood/pcie.c | 2 +- arch/arm/mach-kirkwood/rd88f6281-setup.c | 2 +- arch/arm/mach-loki/common.c | 4 +-- arch/arm/mach-loki/irq.c | 2 +- arch/arm/mach-mv78xx0/common.c | 8 +++--- arch/arm/mach-mv78xx0/irq.c | 2 +- arch/arm/mach-mv78xx0/pcie.c | 2 +- arch/arm/mach-orion5x/common.c | 6 ++-- arch/arm/mach-orion5x/db88f5281-setup.c | 2 +- arch/arm/mach-orion5x/irq.c | 2 +- arch/arm/mach-orion5x/kurobox_pro-setup.c | 2 +- arch/arm/mach-orion5x/pci.c | 2 +- arch/arm/mm/cache-feroceon-l2.c | 2 +- .../plat-orion/include/plat/cache-feroceon-l2.h | 11 ++++++++ arch/arm/plat-orion/include/plat/ehci-orion.h | 19 +++++++++++++ arch/arm/plat-orion/include/plat/irq.h | 17 ++++++++++++ arch/arm/plat-orion/include/plat/mv_xor.h | 30 ++++++++++++++++++++ arch/arm/plat-orion/include/plat/orion_nand.h | 25 +++++++++++++++++ arch/arm/plat-orion/include/plat/pcie.h | 32 ++++++++++++++++++++++ arch/arm/plat-orion/include/plat/time.h | 17 ++++++++++++ arch/arm/plat-orion/irq.c | 2 +- arch/arm/plat-orion/pcie.c | 2 +- drivers/dma/mv_xor.c | 2 +- drivers/mtd/nand/orion_nand.c | 2 +- drivers/usb/host/ehci-orion.c | 2 +- include/asm-arm/plat-orion/cache-feroceon-l2.h | 11 -------- include/asm-arm/plat-orion/ehci-orion.h | 19 ------------- include/asm-arm/plat-orion/irq.h | 17 ------------ include/asm-arm/plat-orion/mv_xor.h | 28 ------------------- include/asm-arm/plat-orion/orion_nand.h | 25 ----------------- include/asm-arm/plat-orion/pcie.h | 32 ---------------------- include/asm-arm/plat-orion/time.h | 17 ------------ 34 files changed, 180 insertions(+), 178 deletions(-) create mode 100644 arch/arm/plat-orion/include/plat/cache-feroceon-l2.h create mode 100644 arch/arm/plat-orion/include/plat/ehci-orion.h create mode 100644 arch/arm/plat-orion/include/plat/irq.h create mode 100644 arch/arm/plat-orion/include/plat/mv_xor.h create mode 100644 arch/arm/plat-orion/include/plat/orion_nand.h create mode 100644 arch/arm/plat-orion/include/plat/pcie.h create mode 100644 arch/arm/plat-orion/include/plat/time.h delete mode 100644 include/asm-arm/plat-orion/cache-feroceon-l2.h delete mode 100644 include/asm-arm/plat-orion/ehci-orion.h delete mode 100644 include/asm-arm/plat-orion/irq.h delete mode 100644 include/asm-arm/plat-orion/mv_xor.h delete mode 100644 include/asm-arm/plat-orion/orion_nand.h delete mode 100644 include/asm-arm/plat-orion/pcie.h delete mode 100644 include/asm-arm/plat-orion/time.h (limited to 'include') diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index 0e509b8ad56..02edd0dccc4 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -20,10 +20,10 @@ #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include "common.h" /***************************************************************************** diff --git a/arch/arm/mach-kirkwood/irq.c b/arch/arm/mach-kirkwood/irq.c index 302bb2cf666..5790643ffe0 100644 --- a/arch/arm/mach-kirkwood/irq.c +++ b/arch/arm/mach-kirkwood/irq.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "common.h" void __init kirkwood_init_irq(void) diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c index 8282d0ff84b..2195fa31f6b 100644 --- a/arch/arm/mach-kirkwood/pcie.c +++ b/arch/arm/mach-kirkwood/pcie.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "common.h" diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index d8a43018c7d..d96487a0f18 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include "common.h" static struct mtd_partition rd88f6281_nand_parts[] = { diff --git a/arch/arm/mach-loki/common.c b/arch/arm/mach-loki/common.c index e20cdbca1eb..c0d2d9d12e7 100644 --- a/arch/arm/mach-loki/common.c +++ b/arch/arm/mach-loki/common.c @@ -19,8 +19,8 @@ #include #include #include -#include -#include +#include +#include #include "common.h" /***************************************************************************** diff --git a/arch/arm/mach-loki/irq.c b/arch/arm/mach-loki/irq.c index d839af91fe0..5a487930cb2 100644 --- a/arch/arm/mach-loki/irq.c +++ b/arch/arm/mach-loki/irq.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "common.h" void __init loki_init_irq(void) diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c index e633f9cb239..953a26c469c 100644 --- a/arch/arm/mach-mv78xx0/common.c +++ b/arch/arm/mach-mv78xx0/common.c @@ -18,10 +18,10 @@ #include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include "common.h" diff --git a/arch/arm/mach-mv78xx0/irq.c b/arch/arm/mach-mv78xx0/irq.c index 3198abf54c9..28248d37b99 100644 --- a/arch/arm/mach-mv78xx0/irq.c +++ b/arch/arm/mach-mv78xx0/irq.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "common.h" void __init mv78xx0_init_irq(void) diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c index b78e1443159..430ea84d587 100644 --- a/arch/arm/mach-mv78xx0/pcie.c +++ b/arch/arm/mach-mv78xx0/pcie.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include "common.h" struct pcie_port { diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 168eeacaa4c..b44f26d7613 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -26,9 +26,9 @@ #include #include #include -#include -#include -#include +#include +#include +#include #include "common.h" /***************************************************************************** diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c index 48ce6d0e002..ff13e9060b1 100644 --- a/arch/arm/mach-orion5x/db88f5281-setup.c +++ b/arch/arm/mach-orion5x/db88f5281-setup.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include "common.h" #include "mpp.h" diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c index cc2a017fd2a..2545ff9e583 100644 --- a/arch/arm/mach-orion5x/irq.c +++ b/arch/arm/mach-orion5x/irq.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include "common.h" /***************************************************************************** diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c index 0caaaac74bc..45dfc9f99d8 100644 --- a/arch/arm/mach-orion5x/kurobox_pro-setup.c +++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include "common.h" #include "mpp.h" diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c index 256a4f68093..fbceecc4b7e 100644 --- a/arch/arm/mach-orion5x/pci.c +++ b/arch/arm/mach-orion5x/pci.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "common.h" /***************************************************************************** diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 20eec4ba173..7b5a25d8157 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -14,7 +14,7 @@ #include #include -#include +#include /* diff --git a/arch/arm/plat-orion/include/plat/cache-feroceon-l2.h b/arch/arm/plat-orion/include/plat/cache-feroceon-l2.h new file mode 100644 index 00000000000..06f982d5569 --- /dev/null +++ b/arch/arm/plat-orion/include/plat/cache-feroceon-l2.h @@ -0,0 +1,11 @@ +/* + * arch/arm/plat-orion/include/plat/cache-feroceon-l2.h + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +extern void __init feroceon_l2_init(int l2_wt_override); diff --git a/arch/arm/plat-orion/include/plat/ehci-orion.h b/arch/arm/plat-orion/include/plat/ehci-orion.h new file mode 100644 index 00000000000..64343051095 --- /dev/null +++ b/arch/arm/plat-orion/include/plat/ehci-orion.h @@ -0,0 +1,19 @@ +/* + * arch/arm/plat-orion/include/plat/ehci-orion.h + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __PLAT_EHCI_ORION_H +#define __PLAT_EHCI_ORION_H + +#include + +struct orion_ehci_data { + struct mbus_dram_target_info *dram; +}; + + +#endif diff --git a/arch/arm/plat-orion/include/plat/irq.h b/arch/arm/plat-orion/include/plat/irq.h new file mode 100644 index 00000000000..f05eeab9496 --- /dev/null +++ b/arch/arm/plat-orion/include/plat/irq.h @@ -0,0 +1,17 @@ +/* + * arch/arm/plat-orion/include/plat/irq.h + * + * Marvell Orion SoC IRQ handling. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __PLAT_IRQ_H +#define __PLAT_IRQ_H + +void orion_irq_init(unsigned int irq_start, void __iomem *maskaddr); + + +#endif diff --git a/arch/arm/plat-orion/include/plat/mv_xor.h b/arch/arm/plat-orion/include/plat/mv_xor.h new file mode 100644 index 00000000000..bd5f3bdb4ae --- /dev/null +++ b/arch/arm/plat-orion/include/plat/mv_xor.h @@ -0,0 +1,30 @@ +/* + * arch/arm/plat-orion/include/plat/mv_xor.h + * + * Marvell XOR platform device data definition file. + */ + +#ifndef __PLAT_MV_XOR_H +#define __PLAT_MV_XOR_H + +#include +#include + +#define MV_XOR_SHARED_NAME "mv_xor_shared" +#define MV_XOR_NAME "mv_xor" + +struct mbus_dram_target_info; + +struct mv_xor_platform_shared_data { + struct mbus_dram_target_info *dram; +}; + +struct mv_xor_platform_data { + struct platform_device *shared; + int hw_id; + dma_cap_mask_t cap_mask; + size_t pool_size; +}; + + +#endif diff --git a/arch/arm/plat-orion/include/plat/orion_nand.h b/arch/arm/plat-orion/include/plat/orion_nand.h new file mode 100644 index 00000000000..d6a4cfa3778 --- /dev/null +++ b/arch/arm/plat-orion/include/plat/orion_nand.h @@ -0,0 +1,25 @@ +/* + * arch/arm/plat-orion/include/plat/orion_nand.h + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __PLAT_ORION_NAND_H +#define __PLAT_ORION_NAND_H + +/* + * Device bus NAND private data + */ +struct orion_nand_data { + struct mtd_partition *parts; + u32 nr_parts; + u8 ale; /* address line number connected to ALE */ + u8 cle; /* address line number connected to CLE */ + u8 width; /* buswidth */ + u8 chip_delay; +}; + + +#endif diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h new file mode 100644 index 00000000000..3ebfef72b4e --- /dev/null +++ b/arch/arm/plat-orion/include/plat/pcie.h @@ -0,0 +1,32 @@ +/* + * arch/arm/plat-orion/include/plat/pcie.h + * + * Marvell Orion SoC PCIe handling. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __PLAT_PCIE_H +#define __PLAT_PCIE_H + +u32 orion_pcie_dev_id(void __iomem *base); +u32 orion_pcie_rev(void __iomem *base); +int orion_pcie_link_up(void __iomem *base); +int orion_pcie_x4_mode(void __iomem *base); +int orion_pcie_get_local_bus_nr(void __iomem *base); +void orion_pcie_set_local_bus_nr(void __iomem *base, int nr); +void orion_pcie_setup(void __iomem *base, + struct mbus_dram_target_info *dram); +int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val); +int orion_pcie_rd_conf_tlp(void __iomem *base, struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val); +int orion_pcie_rd_conf_wa(void __iomem *wa_base, struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val); +int orion_pcie_wr_conf(void __iomem *base, struct pci_bus *bus, + u32 devfn, int where, int size, u32 val); + + +#endif diff --git a/arch/arm/plat-orion/include/plat/time.h b/arch/arm/plat-orion/include/plat/time.h new file mode 100644 index 00000000000..c06ca35f361 --- /dev/null +++ b/arch/arm/plat-orion/include/plat/time.h @@ -0,0 +1,17 @@ +/* + * arch/arm/plat-orion/include/plat/time.h + * + * Marvell Orion SoC time handling. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __PLAT_TIME_H +#define __PLAT_TIME_H + +void orion_time_init(unsigned int irq, unsigned int tclk); + + +#endif diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c index fe66a183516..3f9d34fc738 100644 --- a/arch/arm/plat-orion/irq.c +++ b/arch/arm/plat-orion/irq.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include static void orion_irq_mask(u32 irq) { diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c index ca32c60e14d..883902fead8 100644 --- a/arch/arm/plat-orion/pcie.c +++ b/arch/arm/plat-orion/pcie.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include /* * PCIe unit register offsets. diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index a4e4494663b..0328da020a1 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include "mv_xor.h" static void mv_xor_issue_pending(struct dma_chan *chan); diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 64002488c6e..917cf8d3ae9 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #ifdef CONFIG_MTD_CMDLINE_PARTS static const char *part_probes[] = { "cmdlinepart", NULL }; diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 5fbdc14e63b..5416cf96900 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #define rdl(off) __raw_readl(hcd->regs + (off)) #define wrl(off, val) __raw_writel((val), hcd->regs + (off)) diff --git a/include/asm-arm/plat-orion/cache-feroceon-l2.h b/include/asm-arm/plat-orion/cache-feroceon-l2.h deleted file mode 100644 index ba4e016d3ec..00000000000 --- a/include/asm-arm/plat-orion/cache-feroceon-l2.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * include/asm-arm/plat-orion/cache-feroceon-l2.h - * - * Copyright (C) 2008 Marvell Semiconductor - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -extern void __init feroceon_l2_init(int l2_wt_override); diff --git a/include/asm-arm/plat-orion/ehci-orion.h b/include/asm-arm/plat-orion/ehci-orion.h deleted file mode 100644 index 785705651e2..00000000000 --- a/include/asm-arm/plat-orion/ehci-orion.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * include/asm-arm/plat-orion/ehci-orion.h - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __ASM_PLAT_ORION_EHCI_ORION_H -#define __ASM_PLAT_ORION_EHCI_ORION_H - -#include - -struct orion_ehci_data { - struct mbus_dram_target_info *dram; -}; - - -#endif diff --git a/include/asm-arm/plat-orion/irq.h b/include/asm-arm/plat-orion/irq.h deleted file mode 100644 index 94aeed919d5..00000000000 --- a/include/asm-arm/plat-orion/irq.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * include/asm-arm/plat-orion/irq.h - * - * Marvell Orion SoC IRQ handling. - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __ASM_PLAT_ORION_IRQ_H -#define __ASM_PLAT_ORION_IRQ_H - -void orion_irq_init(unsigned int irq_start, void __iomem *maskaddr); - - -#endif diff --git a/include/asm-arm/plat-orion/mv_xor.h b/include/asm-arm/plat-orion/mv_xor.h deleted file mode 100644 index c349e8ff5cc..00000000000 --- a/include/asm-arm/plat-orion/mv_xor.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Marvell XOR platform device data definition file. - */ - -#ifndef __ASM_PLAT_ORION_MV_XOR_H -#define __ASM_PLAT_ORION_MV_XOR_H - -#include -#include - -#define MV_XOR_SHARED_NAME "mv_xor_shared" -#define MV_XOR_NAME "mv_xor" - -struct mbus_dram_target_info; - -struct mv_xor_platform_shared_data { - struct mbus_dram_target_info *dram; -}; - -struct mv_xor_platform_data { - struct platform_device *shared; - int hw_id; - dma_cap_mask_t cap_mask; - size_t pool_size; -}; - - -#endif diff --git a/include/asm-arm/plat-orion/orion_nand.h b/include/asm-arm/plat-orion/orion_nand.h deleted file mode 100644 index ad4ce94c199..00000000000 --- a/include/asm-arm/plat-orion/orion_nand.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * include/asm-arm/plat-orion/orion_nand.h - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __ASM_PLAT_ORION_ORION_NAND_H -#define __ASM_PLAT_ORION_ORION_NAND_H - -/* - * Device bus NAND private data - */ -struct orion_nand_data { - struct mtd_partition *parts; - u32 nr_parts; - u8 ale; /* address line number connected to ALE */ - u8 cle; /* address line number connected to CLE */ - u8 width; /* buswidth */ - u8 chip_delay; -}; - - -#endif diff --git a/include/asm-arm/plat-orion/pcie.h b/include/asm-arm/plat-orion/pcie.h deleted file mode 100644 index e61b7bd97af..00000000000 --- a/include/asm-arm/plat-orion/pcie.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * include/asm-arm/plat-orion/pcie.h - * - * Marvell Orion SoC PCIe handling. - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __ASM_PLAT_ORION_PCIE_H -#define __ASM_PLAT_ORION_PCIE_H - -u32 orion_pcie_dev_id(void __iomem *base); -u32 orion_pcie_rev(void __iomem *base); -int orion_pcie_link_up(void __iomem *base); -int orion_pcie_x4_mode(void __iomem *base); -int orion_pcie_get_local_bus_nr(void __iomem *base); -void orion_pcie_set_local_bus_nr(void __iomem *base, int nr); -void orion_pcie_setup(void __iomem *base, - struct mbus_dram_target_info *dram); -int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val); -int orion_pcie_rd_conf_tlp(void __iomem *base, struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val); -int orion_pcie_rd_conf_wa(void __iomem *wa_base, struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val); -int orion_pcie_wr_conf(void __iomem *base, struct pci_bus *bus, - u32 devfn, int where, int size, u32 val); - - -#endif diff --git a/include/asm-arm/plat-orion/time.h b/include/asm-arm/plat-orion/time.h deleted file mode 100644 index 0e85cc8f44d..00000000000 --- a/include/asm-arm/plat-orion/time.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * include/asm-arm/plat-orion/time.h - * - * Marvell Orion SoC time handling. - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __ASM_PLAT_ORION_TIME_H -#define __ASM_PLAT_ORION_TIME_H - -void orion_time_init(unsigned int irq, unsigned int tclk); - - -#endif -- cgit v1.2.3-70-g09d2 From 631c9a8a79b9b94eef56afa3bff107ce27c4c2bb Mon Sep 17 00:00:00 2001 From: Krzysztof Helt Date: Fri, 8 Aug 2008 21:34:10 +0100 Subject: [ARM] S3C24XX: Compilation fix if s3c2410 is not selected This patch fixes compilation error if no s3c2410 processor is selected but the s3c244x is selected. The function s3c2410_baseclk_add() is now available for all Samsung cpus. Signed-off-by: Krzysztof Helt [ben-linux@fluff.org: Whitespace and description fixups] Signed-off-by: Ben Dooks --- include/asm-arm/plat-s3c24xx/s3c2410.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-arm/plat-s3c24xx/s3c2410.h b/include/asm-arm/plat-s3c24xx/s3c2410.h index 36de0b83587..3cd1ec677b3 100644 --- a/include/asm-arm/plat-s3c24xx/s3c2410.h +++ b/include/asm-arm/plat-s3c24xx/s3c2410.h @@ -21,11 +21,11 @@ extern void s3c2410_init_uarts(struct s3c2410_uartcfg *cfg, int no); extern void s3c2410_init_clocks(int xtal); -extern int s3c2410_baseclk_add(void); - #else #define s3c2410_init_clocks NULL #define s3c2410_init_uarts NULL #define s3c2410_map_io NULL #define s3c2410_init NULL #endif + +extern int s3c2410_baseclk_add(void); -- cgit v1.2.3-70-g09d2 From d3a2f71853ce543c5515d4982e202751e15b0b6d Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Thu, 31 Jul 2008 20:44:28 +0200 Subject: mfd: TMIO MMC structures and accessors. Signed-off-by: Ian Molton Signed-off-by: Samuel Ortiz --- include/linux/mfd/tmio.h | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 9438d8c9ac1..ec612e66391 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -1,6 +1,21 @@ #ifndef MFD_TMIO_H #define MFD_TMIO_H +#define tmio_ioread8(addr) readb(addr) +#define tmio_ioread16(addr) readw(addr) +#define tmio_ioread16_rep(r, b, l) readsw(r, b, l) +#define tmio_ioread32(addr) \ + (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16)) + +#define tmio_iowrite8(val, addr) writeb((val), (addr)) +#define tmio_iowrite16(val, addr) writew((val), (addr)) +#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) +#define tmio_iowrite32(val, addr) \ + do { \ + writew((val), (addr)); \ + writew((val) >> 16, (addr) + 2); \ + } while (0) + /* * data for the NAND controller */ @@ -10,8 +25,4 @@ struct tmio_nand_data { unsigned int num_partitions; }; -#define TMIO_NAND_CONFIG "tmio-nand-config" -#define TMIO_NAND_CONTROL "tmio-nand-control" -#define TMIO_NAND_IRQ "tmio-nand" - #endif -- cgit v1.2.3-70-g09d2 From 1f192015ca5b2f4d0a79c191f03f64e72fd8fc29 Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Tue, 15 Jul 2008 15:09:43 +0100 Subject: mfd: driver for the T7L66XB TMIO SoC This patchset provides support for the core functinality of the T7L66XB SoC from Toshiba. Supported in this patchset is the IRQ MUX, MMC controller and NAND flash controller. Signed-off-by: Ian Molton Signed-off-by: Samuel Ortiz --- drivers/mfd/Kconfig | 6 + drivers/mfd/Makefile | 1 + drivers/mfd/t7l66xb.c | 409 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/t7l66xb.h | 36 ++++ 4 files changed, 452 insertions(+) create mode 100644 drivers/mfd/t7l66xb.c create mode 100644 include/linux/mfd/t7l66xb.h (limited to 'include') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 883e7ea31de..fc7c919693b 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -50,6 +50,12 @@ config HTC_PASIC3 HTC Magician devices, respectively. Actual functionality is handled by the leds-pasic3 and ds1wm drivers. +config MFD_T7L66XB + bool "Support Toshiba T7L66XB" + select MFD_CORE + help + Support for Toshiba Mobile IO Controller T7L66XB + config MFD_TC6393XB bool "Support Toshiba TC6393XB" depends on GPIOLIB && ARM diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 33daa2f45dd..3531ad2a276 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_MFD_ASIC3) += asic3.o obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o +obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o obj-$(CONFIG_MFD_CORE) += mfd-core.o diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c new file mode 100644 index 00000000000..5be42054f73 --- /dev/null +++ b/drivers/mfd/t7l66xb.c @@ -0,0 +1,409 @@ +/* + * + * Toshiba T7L66XB core mfd support + * + * Copyright (c) 2005, 2007, 2008 Ian Molton + * Copyright (c) 2008 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * T7L66 features: + * + * Supported in this driver: + * SD/MMC + * SM/NAND flash controller + * + * As yet not supported + * GPIO interface (on NAND pins) + * Serial interface + * TFT 'interface converter' + * PCMCIA interface logic + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + T7L66XB_CELL_NAND, + T7L66XB_CELL_MMC, +}; + +#define SCR_REVID 0x08 /* b Revision ID */ +#define SCR_IMR 0x42 /* b Interrupt Mask */ +#define SCR_DEV_CTL 0xe0 /* b Device control */ +#define SCR_ISR 0xe1 /* b Interrupt Status */ +#define SCR_GPO_OC 0xf0 /* b GPO output control */ +#define SCR_GPO_OS 0xf1 /* b GPO output enable */ +#define SCR_GPI_S 0xf2 /* w GPI status */ +#define SCR_APDC 0xf8 /* b Active pullup down ctrl */ + +#define SCR_DEV_CTL_USB BIT(0) /* USB enable */ +#define SCR_DEV_CTL_MMC BIT(1) /* MMC enable */ + +/*--------------------------------------------------------------------------*/ + +struct t7l66xb { + void __iomem *scr; + /* Lock to protect registers requiring read/modify/write ops. */ + spinlock_t lock; + + struct resource rscr; + int irq; + int irq_base; +}; + +/*--------------------------------------------------------------------------*/ + +static int t7l66xb_mmc_enable(struct platform_device *mmc) +{ + struct platform_device *dev = to_platform_device(mmc->dev.parent); + struct t7l66xb_platform_data *pdata = dev->dev.platform_data; + struct t7l66xb *t7l66xb = platform_get_drvdata(dev); + unsigned long flags; + u8 dev_ctl; + + if (pdata->enable_clk32k) + pdata->enable_clk32k(dev); + + spin_lock_irqsave(&t7l66xb->lock, flags); + + dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL); + dev_ctl |= SCR_DEV_CTL_MMC; + tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL); + + spin_unlock_irqrestore(&t7l66xb->lock, flags); + + return 0; +} + +static int t7l66xb_mmc_disable(struct platform_device *mmc) +{ + struct platform_device *dev = to_platform_device(mmc->dev.parent); + struct t7l66xb_platform_data *pdata = dev->dev.platform_data; + struct t7l66xb *t7l66xb = platform_get_drvdata(dev); + unsigned long flags; + u8 dev_ctl; + + spin_lock_irqsave(&t7l66xb->lock, flags); + + dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL); + dev_ctl &= ~SCR_DEV_CTL_MMC; + tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL); + + spin_unlock_irqrestore(&t7l66xb->lock, flags); + + if (pdata->disable_clk32k) + pdata->disable_clk32k(dev); + + return 0; +} + +/*--------------------------------------------------------------------------*/ + +const static struct resource t7l66xb_mmc_resources[] = { + { + .start = 0x800, + .end = 0x9ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 0x200, + .end = 0x2ff, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_T7L66XB_MMC, + .end = IRQ_T7L66XB_MMC, + .flags = IORESOURCE_IRQ, + }, +}; + +const static struct resource t7l66xb_nand_resources[] = { + { + .start = 0xc00, + .end = 0xc07, + .flags = IORESOURCE_MEM, + }, + { + .start = 0x0100, + .end = 0x01ff, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_T7L66XB_NAND, + .end = IRQ_T7L66XB_NAND, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell t7l66xb_cells[] = { + [T7L66XB_CELL_MMC] = { + .name = "tmio-mmc", + .enable = t7l66xb_mmc_enable, + .disable = t7l66xb_mmc_disable, + .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources), + .resources = t7l66xb_mmc_resources, + }, + [T7L66XB_CELL_NAND] = { + .name = "tmio-nand", + .num_resources = ARRAY_SIZE(t7l66xb_nand_resources), + .resources = t7l66xb_nand_resources, + }, +}; + +/*--------------------------------------------------------------------------*/ + +/* Handle the T7L66XB interrupt mux */ +static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) +{ + struct t7l66xb *t7l66xb = get_irq_data(irq); + unsigned int isr; + unsigned int i, irq_base; + + irq_base = t7l66xb->irq_base; + + while ((isr = tmio_ioread8(t7l66xb->scr + SCR_ISR) & + ~tmio_ioread8(t7l66xb->scr + SCR_IMR))) + for (i = 0; i < T7L66XB_NR_IRQS; i++) + if (isr & (1 << i)) + generic_handle_irq(irq_base + i); +} + +static void t7l66xb_irq_mask(unsigned int irq) +{ + struct t7l66xb *t7l66xb = get_irq_chip_data(irq); + unsigned long flags; + u8 imr; + + spin_lock_irqsave(&t7l66xb->lock, flags); + imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); + imr |= 1 << (irq - t7l66xb->irq_base); + tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); + spin_unlock_irqrestore(&t7l66xb->lock, flags); +} + +static void t7l66xb_irq_unmask(unsigned int irq) +{ + struct t7l66xb *t7l66xb = get_irq_chip_data(irq); + unsigned long flags; + u8 imr; + + spin_lock_irqsave(&t7l66xb->lock, flags); + imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); + imr &= ~(1 << (irq - t7l66xb->irq_base)); + tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); + spin_unlock_irqrestore(&t7l66xb->lock, flags); +} + +static struct irq_chip t7l66xb_chip = { + .name = "t7l66xb", + .ack = t7l66xb_irq_mask, + .mask = t7l66xb_irq_mask, + .unmask = t7l66xb_irq_unmask, +}; + +/*--------------------------------------------------------------------------*/ + +/* Install the IRQ handler */ +static void t7l66xb_attach_irq(struct platform_device *dev) +{ + struct t7l66xb *t7l66xb = platform_get_drvdata(dev); + unsigned int irq, irq_base; + + irq_base = t7l66xb->irq_base; + + for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { + set_irq_chip(irq, &t7l66xb_chip); + set_irq_chip_data(irq, t7l66xb); + set_irq_handler(irq, handle_level_irq); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); +#endif + } + + set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING); + set_irq_data(t7l66xb->irq, t7l66xb); + set_irq_chained_handler(t7l66xb->irq, t7l66xb_irq); +} + +static void t7l66xb_detach_irq(struct platform_device *dev) +{ + struct t7l66xb *t7l66xb = platform_get_drvdata(dev); + unsigned int irq, irq_base; + + irq_base = t7l66xb->irq_base; + + set_irq_chained_handler(t7l66xb->irq, NULL); + set_irq_data(t7l66xb->irq, NULL); + + for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#endif + set_irq_chip(irq, NULL); + set_irq_chip_data(irq, NULL); + } +} + +/*--------------------------------------------------------------------------*/ + +#ifdef CONFIG_PM +static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state) +{ + struct t7l66xb_platform_data *pdata = dev->dev.platform_data; + + if (pdata && pdata->suspend) + pdata->suspend(dev); + + return 0; +} + +static int t7l66xb_resume(struct platform_device *dev) +{ + struct t7l66xb_platform_data *pdata = dev->dev.platform_data; + + if (pdata && pdata->resume) + pdata->resume(dev); + + return 0; +} +#else +#define t7l66xb_suspend NULL +#define t7l66xb_resume NULL +#endif + +/*--------------------------------------------------------------------------*/ + +static int t7l66xb_probe(struct platform_device *dev) +{ + struct t7l66xb_platform_data *pdata = dev->dev.platform_data; + struct t7l66xb *t7l66xb; + struct resource *iomem, *rscr; + int ret; + + iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!iomem) + return -EINVAL; + + t7l66xb = kzalloc(sizeof *t7l66xb, GFP_KERNEL); + if (!t7l66xb) + return -ENOMEM; + + spin_lock_init(&t7l66xb->lock); + + platform_set_drvdata(dev, t7l66xb); + + ret = platform_get_irq(dev, 0); + if (ret >= 0) + t7l66xb->irq = ret; + else + goto err_noirq; + + t7l66xb->irq_base = pdata->irq_base; + + rscr = &t7l66xb->rscr; + rscr->name = "t7l66xb-core"; + rscr->start = iomem->start; + rscr->end = iomem->start + 0xff; + rscr->flags = IORESOURCE_MEM; + + ret = request_resource(iomem, rscr); + if (ret) + goto err_request_scr; + + t7l66xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1); + if (!t7l66xb->scr) { + ret = -ENOMEM; + goto err_ioremap; + } + + if (pdata && pdata->enable) + pdata->enable(dev); + + /* Mask all interrupts */ + tmio_iowrite8(0xbf, t7l66xb->scr + SCR_IMR); + + printk(KERN_INFO "%s rev %d @ 0x%08lx, irq %d\n", + dev->name, tmio_ioread8(t7l66xb->scr + SCR_REVID), + (unsigned long)iomem->start, t7l66xb->irq); + + t7l66xb_attach_irq(dev); + + t7l66xb_cells[T7L66XB_CELL_NAND].driver_data = pdata->nand_data; + + ret = mfd_add_devices(dev, t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells), + iomem, t7l66xb->irq_base); + + if (!ret) + return 0; + + t7l66xb_detach_irq(dev); + iounmap(t7l66xb->scr); +err_ioremap: + release_resource(&t7l66xb->rscr); +err_noirq: +err_request_scr: + kfree(t7l66xb); + return ret; +} + +static int t7l66xb_remove(struct platform_device *dev) +{ + struct t7l66xb_platform_data *pdata = dev->dev.platform_data; + struct t7l66xb *t7l66xb = platform_get_drvdata(dev); + int ret; + + ret = pdata->disable(dev); + + t7l66xb_detach_irq(dev); + iounmap(t7l66xb->scr); + release_resource(&t7l66xb->rscr); + mfd_remove_devices(dev); + platform_set_drvdata(dev, NULL); + kfree(t7l66xb); + + return ret; + +} + +static struct platform_driver t7l66xb_platform_driver = { + .driver = { + .name = "t7l66xb", + .owner = THIS_MODULE, + }, + .suspend = t7l66xb_suspend, + .resume = t7l66xb_resume, + .probe = t7l66xb_probe, + .remove = t7l66xb_remove, +}; + +/*--------------------------------------------------------------------------*/ + +static int __init t7l66xb_init(void) +{ + int retval = 0; + + retval = platform_driver_register(&t7l66xb_platform_driver); + return retval; +} + +static void __exit t7l66xb_exit(void) +{ + platform_driver_unregister(&t7l66xb_platform_driver); +} + +module_init(t7l66xb_init); +module_exit(t7l66xb_exit); + +MODULE_DESCRIPTION("Toshiba T7L66XB core driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Ian Molton"); +MODULE_ALIAS("platform:t7l66xb"); diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h new file mode 100644 index 00000000000..e83c7f2036f --- /dev/null +++ b/include/linux/mfd/t7l66xb.h @@ -0,0 +1,36 @@ +/* + * This file contains the definitions for the T7L66XB + * + * (C) Copyright 2005 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef MFD_T7L66XB_H +#define MFD_T7L66XB_H + +#include +#include + +struct t7l66xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* The base for subdevice irqs */ + + struct tmio_nand_data *nand_data; +}; + + +#define IRQ_T7L66XB_MMC (1) +#define IRQ_T7L66XB_NAND (3) + +#define T7L66XB_NR_IRQS 8 + +#endif -- cgit v1.2.3-70-g09d2 From cbdfb426392557d49b1a0e7cb59b16c20dc42955 Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Tue, 15 Jul 2008 15:12:52 +0100 Subject: mfd: driver for the TC6387XB TMIO controller. This patch adds support for the TC6387XB. Unlike other TMIO devices this one has only one subdevice and no interrupt mux, however using the MFD framework allows it to share the TMIO MMC driver. Signed-off-by: Ian Molton Signed-off-by: Samuel Ortiz --- drivers/mfd/Kconfig | 6 ++ drivers/mfd/Makefile | 1 + drivers/mfd/tc6387xb.c | 172 +++++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/tc6387xb.h | 23 ++++++ 4 files changed, 202 insertions(+) create mode 100644 drivers/mfd/tc6387xb.c create mode 100644 include/linux/mfd/tc6387xb.h (limited to 'include') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index fc7c919693b..5beff5b7ef2 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -56,6 +56,12 @@ config MFD_T7L66XB help Support for Toshiba Mobile IO Controller T7L66XB +config MFD_TC6387XB + bool "Support Toshiba TC6387XB" + select MFD_CORE + help + Support for Toshiba Mobile IO Controller TC6387XB + config MFD_TC6393XB bool "Support Toshiba TC6393XB" depends on GPIOLIB && ARM diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 3531ad2a276..03ad239ecef 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o +obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o obj-$(CONFIG_MFD_CORE) += mfd-core.o diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c new file mode 100644 index 00000000000..03718feda4d --- /dev/null +++ b/drivers/mfd/tc6387xb.c @@ -0,0 +1,172 @@ +/* + * Toshiba TC6387XB support + * Copyright (c) 2005 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains TC6387XB base support. + * + */ + +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PM +static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state) +{ + struct tc6387xb_platform_data *pdata = platform_get_drvdata(dev); + + if (pdata && pdata->suspend) + pdata->suspend(dev); + + return 0; +} + +static int tc6387xb_resume(struct platform_device *dev) +{ + struct tc6387xb_platform_data *pdata = platform_get_drvdata(dev); + + if (pdata && pdata->resume) + pdata->resume(dev); + + return 0; +} +#else +#define tc6387xb_suspend NULL +#define tc6387xb_resume NULL +#endif + +/*--------------------------------------------------------------------------*/ + +static int tc6387xb_mmc_enable(struct platform_device *mmc) +{ + struct platform_device *dev = to_platform_device(mmc->dev.parent); + struct tc6387xb_platform_data *tc6387xb = dev->dev.platform_data; + + if (tc6387xb->enable_clk32k) + tc6387xb->enable_clk32k(dev); + + return 0; +} + +static int tc6387xb_mmc_disable(struct platform_device *mmc) +{ + struct platform_device *dev = to_platform_device(mmc->dev.parent); + struct tc6387xb_platform_data *tc6387xb = dev->dev.platform_data; + + if (tc6387xb->disable_clk32k) + tc6387xb->disable_clk32k(dev); + + return 0; +} + +/*--------------------------------------------------------------------------*/ + +static struct resource tc6387xb_mmc_resources[] = { + { + .start = 0x800, + .end = 0x9ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 0x200, + .end = 0x2ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 0, + .end = 0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell tc6387xb_cells[] = { + { + .name = "tmio-mmc", + .enable = tc6387xb_mmc_enable, + .disable = tc6387xb_mmc_disable, + .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources), + .resources = tc6387xb_mmc_resources, + }, +}; + +static int tc6387xb_probe(struct platform_device *dev) +{ + struct tc6387xb_platform_data *data = platform_get_drvdata(dev); + struct resource *iomem; + int irq, ret; + + iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!iomem) { + ret = -EINVAL; + goto err_resource; + } + + ret = platform_get_irq(dev, 0); + if (ret >= 0) + irq = ret; + else + goto err_resource; + + if (data && data->enable) + data->enable(dev); + + printk(KERN_INFO "Toshiba tc6387xb initialised\n"); + + ret = mfd_add_devices(dev, tc6387xb_cells, + ARRAY_SIZE(tc6387xb_cells), iomem, irq); + + if (!ret) + return 0; + +err_resource: + return ret; +} + +static int tc6387xb_remove(struct platform_device *dev) +{ + struct tc6387xb_platform_data *data = platform_get_drvdata(dev); + + if (data && data->disable) + data->disable(dev); + + /* FIXME - free the resources! */ + + return 0; +} + + +static struct platform_driver tc6387xb_platform_driver = { + .driver = { + .name = "tc6387xb", + }, + .probe = tc6387xb_probe, + .remove = tc6387xb_remove, + .suspend = tc6387xb_suspend, + .resume = tc6387xb_resume, +}; + + +static int __init tc6387xb_init(void) +{ + return platform_driver_register(&tc6387xb_platform_driver); +} + +static void __exit tc6387xb_exit(void) +{ + platform_driver_unregister(&tc6387xb_platform_driver); +} + +module_init(tc6387xb_init); +module_exit(tc6387xb_exit); + +MODULE_DESCRIPTION("Toshiba TC6387XB core driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Ian Molton"); +MODULE_ALIAS("platform:tc6387xb"); diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h new file mode 100644 index 00000000000..fa06e0610b8 --- /dev/null +++ b/include/linux/mfd/tc6387xb.h @@ -0,0 +1,23 @@ +/* + * This file contains the definitions for the TC6387XB + * + * (C) Copyright 2005 Ian Molton + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + */ +#ifndef MFD_TC6387XB_H +#define MFD_TC6387XB_H + +struct tc6387xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); +}; + +#endif -- cgit v1.2.3-70-g09d2 From 25d6cbd840d958aada29a342c9ee370590ff7b21 Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Sun, 10 Aug 2008 23:32:07 +0200 Subject: mfd: tc6393 cleanup and update This patchset cleans up the TC6393XB support. * Add provision for the MMC subdevice * Disable / enable clocks on suspend / resume * Remove fragments of badly merged code (eg. linux/fb include etc.) * Use a device specific clock name to break dependancy on ARM/PXA2XX * Drop unnecessary resource names * Switch to tmio_io* accessors Signed-off-by: Ian Molton Signed-off-by: Samuel Ortiz --- drivers/mfd/tc6393xb.c | 156 ++++++++++++++++++++++++++----------------- include/linux/mfd/tc6393xb.h | 9 +-- 2 files changed, 96 insertions(+), 69 deletions(-) (limited to 'include') diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index 81e2605ea10..e4c1c788b5f 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c @@ -19,8 +19,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -112,6 +112,7 @@ struct tc6393xb { enum { TC6393XB_CELL_NAND, + TC6393XB_CELL_MMC, }; /*--------------------------------------------------------------------------*/ @@ -126,7 +127,7 @@ static int tc6393xb_nand_enable(struct platform_device *nand) /* SMD buffer on */ dev_dbg(&dev->dev, "SMD buffer on\n"); - iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1)); + tmio_iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1)); spin_unlock_irqrestore(&tc6393xb->lock, flags); @@ -135,13 +136,13 @@ static int tc6393xb_nand_enable(struct platform_device *nand) static struct resource __devinitdata tc6393xb_nand_resources[] = { { - .start = 0x0100, - .end = 0x01ff, + .start = 0x1000, + .end = 0x1007, .flags = IORESOURCE_MEM, }, { - .start = 0x1000, - .end = 0x1007, + .start = 0x0100, + .end = 0x01ff, .flags = IORESOURCE_MEM, }, { @@ -151,6 +152,24 @@ static struct resource __devinitdata tc6393xb_nand_resources[] = { }, }; +static struct resource __devinitdata tc6393xb_mmc_resources[] = { + { + .start = 0x800, + .end = 0x9ff, + .flags = IORESOURCE_MEM, + }, + { + .start = 0x200, + .end = 0x2ff, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_TC6393_MMC, + .end = IRQ_TC6393_MMC, + .flags = IORESOURCE_IRQ, + }, +}; + static struct mfd_cell __devinitdata tc6393xb_cells[] = { [TC6393XB_CELL_NAND] = { .name = "tmio-nand", @@ -158,6 +177,11 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = { .num_resources = ARRAY_SIZE(tc6393xb_nand_resources), .resources = tc6393xb_nand_resources, }, + [TC6393XB_CELL_MMC] = { + .name = "tmio-mmc", + .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources), + .resources = tc6393xb_mmc_resources, + }, }; /*--------------------------------------------------------------------------*/ @@ -168,7 +192,7 @@ static int tc6393xb_gpio_get(struct gpio_chip *chip, struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); /* XXX: does dsr also represent inputs? */ - return ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)) + return tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)) & TC_GPIO_BIT(offset); } @@ -178,13 +202,13 @@ static void __tc6393xb_gpio_set(struct gpio_chip *chip, struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); u8 dsr; - dsr = ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)); + dsr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)); if (value) dsr |= TC_GPIO_BIT(offset); else dsr &= ~TC_GPIO_BIT(offset); - iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8)); + tmio_iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8)); } static void tc6393xb_gpio_set(struct gpio_chip *chip, @@ -209,9 +233,9 @@ static int tc6393xb_gpio_direction_input(struct gpio_chip *chip, spin_lock_irqsave(&tc6393xb->lock, flags); - doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); + doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); doecr &= ~TC_GPIO_BIT(offset); - iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); + tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); spin_unlock_irqrestore(&tc6393xb->lock, flags); @@ -229,9 +253,9 @@ static int tc6393xb_gpio_direction_output(struct gpio_chip *chip, __tc6393xb_gpio_set(chip, offset, value); - doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); + doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); doecr |= TC_GPIO_BIT(offset); - iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); + tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); spin_unlock_irqrestore(&tc6393xb->lock, flags); @@ -262,8 +286,8 @@ tc6393xb_irq(unsigned int irq, struct irq_desc *desc) irq_base = tc6393xb->irq_base; - while ((isr = ioread8(tc6393xb->scr + SCR_ISR) & - ~ioread8(tc6393xb->scr + SCR_IMR))) + while ((isr = tmio_ioread8(tc6393xb->scr + SCR_ISR) & + ~tmio_ioread8(tc6393xb->scr + SCR_IMR))) for (i = 0; i < TC6393XB_NR_IRQS; i++) { if (isr & (1 << i)) generic_handle_irq(irq_base + i); @@ -281,9 +305,9 @@ static void tc6393xb_irq_mask(unsigned int irq) u8 imr; spin_lock_irqsave(&tc6393xb->lock, flags); - imr = ioread8(tc6393xb->scr + SCR_IMR); + imr = tmio_ioread8(tc6393xb->scr + SCR_IMR); imr |= 1 << (irq - tc6393xb->irq_base); - iowrite8(imr, tc6393xb->scr + SCR_IMR); + tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR); spin_unlock_irqrestore(&tc6393xb->lock, flags); } @@ -294,9 +318,9 @@ static void tc6393xb_irq_unmask(unsigned int irq) u8 imr; spin_lock_irqsave(&tc6393xb->lock, flags); - imr = ioread8(tc6393xb->scr + SCR_IMR); + imr = tmio_ioread8(tc6393xb->scr + SCR_IMR); imr &= ~(1 << (irq - tc6393xb->irq_base)); - iowrite8(imr, tc6393xb->scr + SCR_IMR); + tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR); spin_unlock_irqrestore(&tc6393xb->lock, flags); } @@ -377,9 +401,8 @@ static int __devinit tc6393xb_probe(struct platform_device *dev) { struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; struct tc6393xb *tc6393xb; - struct resource *iomem; - struct resource *rscr; - int retval, temp; + struct resource *iomem, *rscr; + int ret, temp; int i; iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); @@ -388,20 +411,26 @@ static int __devinit tc6393xb_probe(struct platform_device *dev) tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL); if (!tc6393xb) { - retval = -ENOMEM; + ret = -ENOMEM; goto err_kzalloc; } spin_lock_init(&tc6393xb->lock); platform_set_drvdata(dev, tc6393xb); + + ret = platform_get_irq(dev, 0); + if (ret >= 0) + tc6393xb->irq = ret; + else + goto err_noirq; + tc6393xb->iomem = iomem; - tc6393xb->irq = platform_get_irq(dev, 0); tc6393xb->irq_base = tcpd->irq_base; - tc6393xb->clk = clk_get(&dev->dev, "GPIO27_CLK" /* "CK3P6MI" */); + tc6393xb->clk = clk_get(&dev->dev, "CLK_CK3P6MI"); if (IS_ERR(tc6393xb->clk)) { - retval = PTR_ERR(tc6393xb->clk); + ret = PTR_ERR(tc6393xb->clk); goto err_clk_get; } @@ -411,71 +440,73 @@ static int __devinit tc6393xb_probe(struct platform_device *dev) rscr->end = iomem->start + 0xff; rscr->flags = IORESOURCE_MEM; - retval = request_resource(iomem, rscr); - if (retval) + ret = request_resource(iomem, rscr); + if (ret) goto err_request_scr; tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1); if (!tc6393xb->scr) { - retval = -ENOMEM; + ret = -ENOMEM; goto err_ioremap; } - retval = clk_enable(tc6393xb->clk); - if (retval) + ret = clk_enable(tc6393xb->clk); + if (ret) goto err_clk_enable; - retval = tcpd->enable(dev); - if (retval) + ret = tcpd->enable(dev); + if (ret) goto err_enable; tc6393xb->suspend_state.fer = 0; + for (i = 0; i < 3; i++) { tc6393xb->suspend_state.gpo_dsr[i] = (tcpd->scr_gpo_dsr >> (8 * i)) & 0xff; tc6393xb->suspend_state.gpo_doecr[i] = (tcpd->scr_gpo_doecr >> (8 * i)) & 0xff; } - /* - * It may be necessary to change this back to - * platform-dependant code - */ + tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 | SCR_CCR_HCLK_48; - retval = tc6393xb_hw_init(dev); - if (retval) + ret = tc6393xb_hw_init(dev); + if (ret) goto err_hw_init; printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n", - ioread8(tc6393xb->scr + SCR_REVID), + tmio_ioread8(tc6393xb->scr + SCR_REVID), (unsigned long) iomem->start, tc6393xb->irq); tc6393xb->gpio.base = -1; if (tcpd->gpio_base >= 0) { - retval = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base); - if (retval) + ret = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base); + if (ret) goto err_gpio_add; } - if (tc6393xb->irq) - tc6393xb_attach_irq(dev); + tc6393xb_attach_irq(dev); tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data; tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = &tc6393xb_cells[TC6393XB_CELL_NAND]; tc6393xb_cells[TC6393XB_CELL_NAND].data_size = sizeof(tc6393xb_cells[TC6393XB_CELL_NAND]); + tc6393xb_cells[TC6393XB_CELL_MMC].platform_data = + &tc6393xb_cells[TC6393XB_CELL_MMC]; + tc6393xb_cells[TC6393XB_CELL_MMC].data_size = + sizeof(tc6393xb_cells[TC6393XB_CELL_MMC]); + - retval = mfd_add_devices(&dev->dev, dev->id, + ret = mfd_add_devices(&dev->dev, dev->id, tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells), iomem, tcpd->irq_base); - return 0; + if (!ret) + return 0; - if (tc6393xb->irq) - tc6393xb_detach_irq(dev); + tc6393xb_detach_irq(dev); err_gpio_add: if (tc6393xb->gpio.base != -1) @@ -490,10 +521,11 @@ err_ioremap: release_resource(&tc6393xb->rscr); err_request_scr: clk_put(tc6393xb->clk); +err_noirq: err_clk_get: kfree(tc6393xb); err_kzalloc: - return retval; + return ret; } static int __devexit tc6393xb_remove(struct platform_device *dev) @@ -503,9 +535,7 @@ static int __devexit tc6393xb_remove(struct platform_device *dev) int ret; mfd_remove_devices(&dev->dev); - - if (tc6393xb->irq) - tc6393xb_detach_irq(dev); + tc6393xb_detach_irq(dev); if (tc6393xb->gpio.base != -1) { ret = gpiochip_remove(&tc6393xb->gpio); @@ -516,17 +546,11 @@ static int __devexit tc6393xb_remove(struct platform_device *dev) } ret = tcpd->disable(dev); - clk_disable(tc6393xb->clk); - iounmap(tc6393xb->scr); - release_resource(&tc6393xb->rscr); - platform_set_drvdata(dev, NULL); - clk_put(tc6393xb->clk); - kfree(tc6393xb); return ret; @@ -537,8 +561,7 @@ static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state) { struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; struct tc6393xb *tc6393xb = platform_get_drvdata(dev); - int i; - + int i, ret; tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR); tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER); @@ -551,14 +574,21 @@ static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state) tc6393xb->suspend_state.gpi_bcr[i] = ioread8(tc6393xb->scr + SCR_GPI_BCR(i)); } + ret = tcpd->suspend(dev); + clk_disable(tc6393xb->clk); - return tcpd->suspend(dev); + return ret; } static int tc6393xb_resume(struct platform_device *dev) { struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; - int ret = tcpd->resume(dev); + struct tc6393xb *tc6393xb = platform_get_drvdata(dev); + int ret; + + clk_enable(tc6393xb->clk); + + ret = tcpd->resume(dev); if (ret) return ret; @@ -595,7 +625,7 @@ static void __exit tc6393xb_exit(void) subsys_initcall(tc6393xb_init); module_exit(tc6393xb_exit); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer"); MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller"); MODULE_ALIAS("platform:tc6393xb"); diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h index 7cc824a58f7..fec7b3f7a81 100644 --- a/include/linux/mfd/tc6393xb.h +++ b/include/linux/mfd/tc6393xb.h @@ -14,8 +14,8 @@ * published by the Free Software Foundation. */ -#ifndef TC6393XB_H -#define TC6393XB_H +#ifndef MFD_TC6393XB_H +#define MFD_TC6393XB_H /* Also one should provide the CK3P6MI clock */ struct tc6393xb_platform_data { @@ -29,7 +29,7 @@ struct tc6393xb_platform_data { int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); - int irq_base; /* a base for cascaded irq */ + int irq_base; /* base for subdevice irqs */ int gpio_base; struct tmio_nand_data *nand_data; @@ -40,9 +40,6 @@ struct tc6393xb_platform_data { */ #define IRQ_TC6393_NAND 0 #define IRQ_TC6393_MMC 1 -#define IRQ_TC6393_OHCI 2 -#define IRQ_TC6393_SERIAL 3 -#define IRQ_TC6393_FB 4 #define TC6393XB_NR_IRQS 8 -- cgit v1.2.3-70-g09d2 From c1955a3d4762e7a9bf84035eb3c4886a900f0d15 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 11 Aug 2008 08:59:03 +0200 Subject: sched_clock: delay using sched_clock() Some arch's can't handle sched_clock() being called too early - delay this until sched_clock_init() has been called. Reported-by: Bill Gatliff Signed-off-by: Peter Zijlstra Tested-by: Nishanth Aravamudan CC: Russell King - ARM Linux Signed-off-by: Ingo Molnar --- include/linux/sched.h | 14 +++----------- kernel/sched_clock.c | 19 +++++++++++++++++-- 2 files changed, 20 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index ea436bc1a0e..5850bfb968a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init(void) -{ -} - -static inline u64 sched_clock_cpu(int cpu) -{ - return sched_clock(); -} +extern void sched_clock_init(void); +extern u64 sched_clock_cpu(int cpu); +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) { } @@ -1573,8 +1567,6 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } #else -extern void sched_clock_init(void); -extern u64 sched_clock_cpu(int cpu); extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 074edc98937..204991a0bfa 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -42,6 +42,8 @@ unsigned long long __attribute__((weak)) sched_clock(void) return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); } +static __read_mostly int sched_clock_running; + #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK struct sched_clock_data { @@ -70,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) return &per_cpu(sched_clock_data, cpu); } -static __read_mostly int sched_clock_running; - void sched_clock_init(void) { u64 ktime_now = ktime_to_ns(ktime_get()); @@ -248,6 +248,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); +#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ + +void sched_clock_init(void) +{ + sched_clock_running = 1; +} + +u64 sched_clock_cpu(int cpu) +{ + if (unlikely(!sched_clock_running)) + return 0; + + return sched_clock(); +} + #endif unsigned long long cpu_clock(int cpu) -- cgit v1.2.3-70-g09d2 From 64aa348edc617dea17bbd01ddee4e47886d5ec8c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 11 Aug 2008 09:30:21 +0200 Subject: lockdep: lock_set_subclass - reset a held lock's subclass this can be used to reset a held lock's subclass, for arbitrary-depth iterated data structures such as trees or lists which have per-node locks. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 4 +++ kernel/lockdep.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 1bfdc30bb0a..f270ce1582f 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -300,6 +300,9 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); +extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, + unsigned long ip); + # define INIT_LOCKDEP .lockdep_recursion = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -316,6 +319,7 @@ static inline void lockdep_on(void) # define lock_acquire(l, s, t, r, c, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) +# define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 6999e64fc24..e14d383dcb0 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2660,6 +2660,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, return 1; } +static int +__lock_set_subclass(struct lockdep_map *lock, + unsigned int subclass, unsigned long ip) +{ + struct task_struct *curr = current; + struct held_lock *hlock, *prev_hlock; + struct lock_class *class; + unsigned int depth; + int i; + + depth = curr->lockdep_depth; + if (DEBUG_LOCKS_WARN_ON(!depth)) + return 0; + + prev_hlock = NULL; + for (i = depth-1; i >= 0; i--) { + hlock = curr->held_locks + i; + /* + * We must not cross into another context: + */ + if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) + break; + if (hlock->instance == lock) + goto found_it; + prev_hlock = hlock; + } + return print_unlock_inbalance_bug(curr, lock, ip); + +found_it: + class = register_lock_class(lock, subclass, 0); + hlock->class = class; + + curr->lockdep_depth = i; + curr->curr_chain_key = hlock->prev_chain_key; + + for (; i < depth; i++) { + hlock = curr->held_locks + i; + if (!__lock_acquire(hlock->instance, + hlock->class->subclass, hlock->trylock, + hlock->read, hlock->check, hlock->hardirqs_off, + hlock->acquire_ip)) + return 0; + } + + if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) + return 0; + return 1; +} + /* * Remove the lock to the list of currently held locks in a * potentially non-nested (out of order) manner. This is a @@ -2824,6 +2873,26 @@ static void check_flags(unsigned long flags) #endif } +void +lock_set_subclass(struct lockdep_map *lock, + unsigned int subclass, unsigned long ip) +{ + unsigned long flags; + + if (unlikely(current->lockdep_recursion)) + return; + + raw_local_irq_save(flags); + current->lockdep_recursion = 1; + check_flags(flags); + if (__lock_set_subclass(lock, subclass, ip)) + check_chain_key(current); + current->lockdep_recursion = 0; + raw_local_irq_restore(flags); +} + +EXPORT_SYMBOL_GPL(lock_set_subclass); + /* * We are not always called with irqs disabled - do that here, * and also avoid lockdep recursion: -- cgit v1.2.3-70-g09d2 From f82b217e3513fe3af342c0f3ee1494e86250c21c Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Mon, 11 Aug 2008 09:30:23 +0200 Subject: lockdep: shrink held_lock structure struct held_lock { u64 prev_chain_key; /* 0 8 */ struct lock_class * class; /* 8 8 */ long unsigned int acquire_ip; /* 16 8 */ struct lockdep_map * instance; /* 24 8 */ int irq_context; /* 32 4 */ int trylock; /* 36 4 */ int read; /* 40 4 */ int check; /* 44 4 */ int hardirqs_off; /* 48 4 */ /* size: 56, cachelines: 1 */ /* padding: 4 */ /* last cacheline: 56 bytes */ }; struct held_lock { u64 prev_chain_key; /* 0 8 */ long unsigned int acquire_ip; /* 8 8 */ struct lockdep_map * instance; /* 16 8 */ unsigned int class_idx:11; /* 24:21 4 */ unsigned int irq_context:2; /* 24:19 4 */ unsigned int trylock:1; /* 24:18 4 */ unsigned int read:2; /* 24:16 4 */ unsigned int check:2; /* 24:14 4 */ unsigned int hardirqs_off:1; /* 24:13 4 */ /* size: 32, cachelines: 1 */ /* padding: 4 */ /* bit_padding: 13 bits */ /* last cacheline: 32 bytes */ }; [mingo@elte.hu: shrunk hlock->class too] [peterz@infradead.org: fixup bit sizes] Signed-off-by: Dave Jones Signed-off-by: Ingo Molnar Signed-off-by: Peter Zijlstra --- include/linux/lockdep.h | 16 ++++--- kernel/lockdep.c | 113 ++++++++++++++++++++++++++------------------- kernel/lockdep_internals.h | 3 -- 3 files changed, 74 insertions(+), 58 deletions(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index f270ce1582f..b49bfa8e4a5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -190,6 +190,9 @@ struct lock_chain { u64 chain_key; }; +#define MAX_LOCKDEP_KEYS_BITS 11 +#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) + struct held_lock { /* * One-way hash of the dependency chain up to this point. We @@ -206,14 +209,13 @@ struct held_lock { * with zero), here we store the previous hash value: */ u64 prev_chain_key; - struct lock_class *class; unsigned long acquire_ip; struct lockdep_map *instance; - #ifdef CONFIG_LOCK_STAT u64 waittime_stamp; u64 holdtime_stamp; #endif + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -227,11 +229,11 @@ struct held_lock { * The following field is used to detect when we cross into an * interrupt context: */ - int irq_context; - int trylock; - int read; - int check; - int hardirqs_off; + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:2; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; }; /* diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e14d383dcb0..d3c72ad8d09 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; unsigned long nr_lock_classes; static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; +static inline struct lock_class *hlock_class(struct held_lock *hlock) +{ + if (!hlock->class_idx) { + DEBUG_LOCKS_WARN_ON(1); + return NULL; + } + return lock_classes + hlock->class_idx - 1; +} + #ifdef CONFIG_LOCK_STAT static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); @@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock) holdtime = sched_clock() - hlock->holdtime_stamp; - stats = get_lock_stats(hlock->class); + stats = get_lock_stats(hlock_class(hlock)); if (hlock->read) lock_time_inc(&stats->read_holdtime, holdtime); else @@ -518,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock) static void print_lock(struct held_lock *hlock) { - print_lock_name(hlock->class); + print_lock_name(hlock_class(hlock)); printk(", at: "); print_ip_sym(hlock->acquire_ip); } @@ -948,7 +957,7 @@ static noinline int print_circular_bug_tail(void) if (debug_locks_silent) return 0; - this.class = check_source->class; + this.class = hlock_class(check_source); if (!save_trace(&this.trace)) return 0; @@ -1057,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) * Check this lock's dependency list: */ list_for_each_entry(entry, &source->locks_after, entry) { - if (entry->class == check_target->class) + if (entry->class == hlock_class(check_target)) return print_circular_bug_header(entry, depth+1); debug_atomic_inc(&nr_cyclic_checks); if (!check_noncircular(entry->class, depth+1)) @@ -1150,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) return 2; } + if (!source && debug_locks_off_graph_unlock()) { + WARN_ON(1); + return 0; + } + /* * Check this lock's dependency list: */ @@ -1189,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr, printk("\nand this task is already holding:\n"); print_lock(prev); printk("which would create a new lock dependency:\n"); - print_lock_name(prev->class); + print_lock_name(hlock_class(prev)); printk(" ->"); - print_lock_name(next->class); + print_lock_name(hlock_class(next)); printk("\n"); printk("\nbut this new dependency connects a %s-irq-safe lock:\n", @@ -1232,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev, find_usage_bit = bit_backwards; /* fills in */ - ret = find_usage_backwards(prev->class, 0); + ret = find_usage_backwards(hlock_class(prev), 0); if (!ret || ret == 1) return ret; find_usage_bit = bit_forwards; - ret = find_usage_forwards(next->class, 0); + ret = find_usage_forwards(hlock_class(next), 0); if (!ret || ret == 1) return ret; /* ret == 2 */ @@ -1362,7 +1376,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, for (i = 0; i < curr->lockdep_depth; i++) { prev = curr->held_locks + i; - if (prev->class != next->class) + if (hlock_class(prev) != hlock_class(next)) continue; /* * Allow read-after-read recursion of the same @@ -1415,7 +1429,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, */ check_source = next; check_target = prev; - if (!(check_noncircular(next->class, 0))) + if (!(check_noncircular(hlock_class(next), 0))) return print_circular_bug_tail(); if (!check_prev_add_irq(curr, prev, next)) @@ -1439,8 +1453,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, * chains - the second one will be new, but L1 already has * L2 added to its dependency list, due to the first chain.) */ - list_for_each_entry(entry, &prev->class->locks_after, entry) { - if (entry->class == next->class) { + list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { + if (entry->class == hlock_class(next)) { if (distance == 1) entry->distance = 1; return 2; @@ -1451,26 +1465,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, * Ok, all validations passed, add the new lock * to the previous lock's dependency list: */ - ret = add_lock_to_list(prev->class, next->class, - &prev->class->locks_after, next->acquire_ip, distance); + ret = add_lock_to_list(hlock_class(prev), hlock_class(next), + &hlock_class(prev)->locks_after, + next->acquire_ip, distance); if (!ret) return 0; - ret = add_lock_to_list(next->class, prev->class, - &next->class->locks_before, next->acquire_ip, distance); + ret = add_lock_to_list(hlock_class(next), hlock_class(prev), + &hlock_class(next)->locks_before, + next->acquire_ip, distance); if (!ret) return 0; /* * Debugging printouts: */ - if (verbose(prev->class) || verbose(next->class)) { + if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { graph_unlock(); printk("\n new dependency: "); - print_lock_name(prev->class); + print_lock_name(hlock_class(prev)); printk(" => "); - print_lock_name(next->class); + print_lock_name(hlock_class(next)); printk("\n"); dump_stack(); return graph_lock(); @@ -1567,7 +1583,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, struct held_lock *hlock, u64 chain_key) { - struct lock_class *class = hlock->class; + struct lock_class *class = hlock_class(hlock); struct list_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; struct held_lock *hlock_curr, *hlock_next; @@ -1640,7 +1656,7 @@ cache_hit: if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { chain->base = cn; for (j = 0; j < chain->depth - 1; j++, i++) { - int lock_id = curr->held_locks[i].class - lock_classes; + int lock_id = curr->held_locks[i].class_idx - 1; chain_hlocks[chain->base + j] = lock_id; } chain_hlocks[chain->base + j] = class - lock_classes; @@ -1736,7 +1752,7 @@ static void check_chain_key(struct task_struct *curr) WARN_ON(1); return; } - id = hlock->class - lock_classes; + id = hlock->class_idx - 1; if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) return; @@ -1781,7 +1797,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, print_lock(this); printk("{%s} state was registered at:\n", usage_str[prev_bit]); - print_stack_trace(this->class->usage_traces + prev_bit, 1); + print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); print_irqtrace_events(curr); printk("\nother info that might help us debug this:\n"); @@ -1800,7 +1816,7 @@ static inline int valid_state(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) { - if (unlikely(this->class->usage_mask & (1 << bad_bit))) + if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) return print_usage_bug(curr, this, bad_bit, new_bit); return 1; } @@ -1839,7 +1855,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, lockdep_print_held_locks(curr); printk("\nthe first lock's dependencies:\n"); - print_lock_dependencies(this->class, 0); + print_lock_dependencies(hlock_class(this), 0); printk("\nthe second lock's dependencies:\n"); print_lock_dependencies(other, 0); @@ -1862,7 +1878,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, find_usage_bit = bit; /* fills in */ - ret = find_usage_forwards(this->class, 0); + ret = find_usage_forwards(hlock_class(this), 0); if (!ret || ret == 1) return ret; @@ -1881,7 +1897,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, find_usage_bit = bit; /* fills in */ - ret = find_usage_backwards(this->class, 0); + ret = find_usage_backwards(hlock_class(this), 0); if (!ret || ret == 1) return ret; @@ -1947,7 +1963,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_ENABLED_HARDIRQS_READ, "hard-read")) return 0; #endif - if (hardirq_verbose(this->class)) + if (hardirq_verbose(hlock_class(this))) ret = 2; break; case LOCK_USED_IN_SOFTIRQ: @@ -1972,7 +1988,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) return 0; #endif - if (softirq_verbose(this->class)) + if (softirq_verbose(hlock_class(this))) ret = 2; break; case LOCK_USED_IN_HARDIRQ_READ: @@ -1985,7 +2001,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (!check_usage_forwards(curr, this, LOCK_ENABLED_HARDIRQS, "hard")) return 0; - if (hardirq_verbose(this->class)) + if (hardirq_verbose(hlock_class(this))) ret = 2; break; case LOCK_USED_IN_SOFTIRQ_READ: @@ -1998,7 +2014,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (!check_usage_forwards(curr, this, LOCK_ENABLED_SOFTIRQS, "soft")) return 0; - if (softirq_verbose(this->class)) + if (softirq_verbose(hlock_class(this))) ret = 2; break; case LOCK_ENABLED_HARDIRQS: @@ -2024,7 +2040,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_USED_IN_HARDIRQ_READ, "hard-read")) return 0; #endif - if (hardirq_verbose(this->class)) + if (hardirq_verbose(hlock_class(this))) ret = 2; break; case LOCK_ENABLED_SOFTIRQS: @@ -2050,7 +2066,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) return 0; #endif - if (softirq_verbose(this->class)) + if (softirq_verbose(hlock_class(this))) ret = 2; break; case LOCK_ENABLED_HARDIRQS_READ: @@ -2065,7 +2081,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_USED_IN_HARDIRQ, "hard")) return 0; #endif - if (hardirq_verbose(this->class)) + if (hardirq_verbose(hlock_class(this))) ret = 2; break; case LOCK_ENABLED_SOFTIRQS_READ: @@ -2080,7 +2096,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, LOCK_USED_IN_SOFTIRQ, "soft")) return 0; #endif - if (softirq_verbose(this->class)) + if (softirq_verbose(hlock_class(this))) ret = 2; break; default: @@ -2396,7 +2412,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, * If already set then do not dirty the cacheline, * nor do any checks: */ - if (likely(this->class->usage_mask & new_mask)) + if (likely(hlock_class(this)->usage_mask & new_mask)) return 1; if (!graph_lock()) @@ -2404,14 +2420,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, /* * Make sure we didnt race: */ - if (unlikely(this->class->usage_mask & new_mask)) { + if (unlikely(hlock_class(this)->usage_mask & new_mask)) { graph_unlock(); return 1; } - this->class->usage_mask |= new_mask; + hlock_class(this)->usage_mask |= new_mask; - if (!save_trace(this->class->usage_traces + new_bit)) + if (!save_trace(hlock_class(this)->usage_traces + new_bit)) return 0; switch (new_bit) { @@ -2545,8 +2561,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, return 0; hlock = curr->held_locks + depth; - - hlock->class = class; + if (DEBUG_LOCKS_WARN_ON(!class)) + return 0; + hlock->class_idx = class - lock_classes + 1; hlock->acquire_ip = ip; hlock->instance = lock; hlock->trylock = trylock; @@ -2690,7 +2707,7 @@ __lock_set_subclass(struct lockdep_map *lock, found_it: class = register_lock_class(lock, subclass, 0); - hlock->class = class; + hlock->class_idx = class - lock_classes + 1; curr->lockdep_depth = i; curr->curr_chain_key = hlock->prev_chain_key; @@ -2698,7 +2715,7 @@ found_it: for (; i < depth; i++) { hlock = curr->held_locks + i; if (!__lock_acquire(hlock->instance, - hlock->class->subclass, hlock->trylock, + hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->acquire_ip)) return 0; @@ -2759,7 +2776,7 @@ found_it: for (i++; i < depth; i++) { hlock = curr->held_locks + i; if (!__lock_acquire(hlock->instance, - hlock->class->subclass, hlock->trylock, + hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->acquire_ip)) return 0; @@ -2804,7 +2821,7 @@ static int lock_release_nested(struct task_struct *curr, #ifdef CONFIG_DEBUG_LOCKDEP hlock->prev_chain_key = 0; - hlock->class = NULL; + hlock->class_idx = 0; hlock->acquire_ip = 0; hlock->irq_context = 0; #endif @@ -3000,9 +3017,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) found_it: hlock->waittime_stamp = sched_clock(); - point = lock_contention_point(hlock->class, ip); + point = lock_contention_point(hlock_class(hlock), ip); - stats = get_lock_stats(hlock->class); + stats = get_lock_stats(hlock_class(hlock)); if (point < ARRAY_SIZE(stats->contention_point)) stats->contention_point[i]++; if (lock->cpu != smp_processor_id()) @@ -3048,7 +3065,7 @@ found_it: hlock->holdtime_stamp = now; } - stats = get_lock_stats(hlock->class); + stats = get_lock_stats(hlock_class(hlock)); if (waittime) { if (hlock->read) lock_time_inc(&stats->read_waittime, waittime); diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 68d44ec77ab..55db193d366 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -17,9 +17,6 @@ */ #define MAX_LOCKDEP_ENTRIES 8192UL -#define MAX_LOCKDEP_KEYS_BITS 11 -#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) - #define MAX_LOCKDEP_CHAINS_BITS 14 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) -- cgit v1.2.3-70-g09d2 From 4f3e7524b2e703d9f8b02ac338153a53dd7ede66 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 11 Aug 2008 09:30:23 +0200 Subject: lockdep: map_acquire Most the free-standing lock_acquire() usages look remarkably similar, sweep them into a new helper. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- fs/jbd/transaction.c | 4 ++-- fs/jbd2/transaction.c | 4 ++-- include/linux/lockdep.h | 12 ++++++++++++ kernel/workqueue.c | 24 ++++++++++++------------ 4 files changed, 28 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 8dee3200750..31a4bd7f78d 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks) goto out; } - lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); + map_acquire(&handle->h_lockdep_map); out: return handle; @@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle) spin_unlock(&journal->j_state_lock); } - lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); + map_release(&handle->h_lockdep_map); jbd_free_handle(handle); return err; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 4f7cadbb19f..c074971215e 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) goto out; } - lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_); + map_acquire(&handle->h_lockdep_map); out: return handle; } @@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle) spin_unlock(&journal->j_state_lock); } - lock_release(&handle->h_lockdep_map, 1, _THIS_IP_); + map_release(&handle->h_lockdep_map); jbd2_free_handle(handle); return err; diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b49bfa8e4a5..e431d1d6eaf 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -459,4 +459,16 @@ static inline void print_irqtrace_events(struct task_struct *curr) # define rwsem_release(l, n, i) do { } while (0) #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, _THIS_IP_) +# else +# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, _THIS_IP_) +# endif +# define map_release(l) lock_release(l, 1, _THIS_IP_) +#else +# define map_acquire(l) do { } while (0) +# define map_release(l) do { } while (0) +#endif + #endif /* __LINUX_LOCKDEP_H */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ec7e4f62aaf..53564ae894a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) BUG_ON(get_wq_data(work) != cwq); work_clear_pending(work); - lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); - lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); + map_acquire(&cwq->wq->lockdep_map); + map_acquire(&lockdep_map); f(work); - lock_release(&lockdep_map, 1, _THIS_IP_); - lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); + map_release(&lockdep_map); + map_release(&cwq->wq->lockdep_map); if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " @@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq) int cpu; might_sleep(); - lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); - lock_release(&wq->lockdep_map, 1, _THIS_IP_); + map_acquire(&wq->lockdep_map); + map_release(&wq->lockdep_map); for_each_cpu_mask_nr(cpu, *cpu_map) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); } @@ -441,8 +441,8 @@ int flush_work(struct work_struct *work) if (!cwq) return 0; - lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); - lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); + map_acquire(&cwq->wq->lockdep_map); + map_release(&cwq->wq->lockdep_map); prev = NULL; spin_lock_irq(&cwq->lock); @@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work) might_sleep(); - lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); - lock_release(&work->lockdep_map, 1, _THIS_IP_); + map_acquire(&work->lockdep_map); + map_release(&work->lockdep_map); cwq = get_wq_data(work); if (!cwq) @@ -861,8 +861,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) if (cwq->thread == NULL) return; - lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); - lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); + map_acquire(&cwq->wq->lockdep_map); + map_release(&cwq->wq->lockdep_map); flush_cpu_workqueue(cwq); /* -- cgit v1.2.3-70-g09d2 From 7531e2f34d1d551b096143f19111139f0dd84c8b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 11 Aug 2008 09:30:24 +0200 Subject: lockdep: lock protection locks On Fri, 2008-08-01 at 16:26 -0700, Linus Torvalds wrote: > On Fri, 1 Aug 2008, David Miller wrote: > > > > Taking more than a few locks of the same class at once is bad > > news and it's better to find an alternative method. > > It's not always wrong. > > If you can guarantee that anybody that takes more than one lock of a > particular class will always take a single top-level lock _first_, then > that's all good. You can obviously screw up and take the same lock _twice_ > (which will deadlock), but at least you cannot get into ABBA situations. > > So maybe the right thing to do is to just teach lockdep about "lock > protection locks". That would have solved the multi-queue issues for > networking too - all the actual network drivers would still have taken > just their single queue lock, but the one case that needs to take all of > them would have taken a separate top-level lock first. > > Never mind that the multi-queue locks were always taken in the same order: > it's never wrong to just have some top-level serialization, and anybody > who needs to take locks might as well do , because they sure as > hell aren't going to be on _any_ fastpaths. > > So the simplest solution really sounds like just teaching lockdep about > that one special case. It's not "nesting" exactly, although it's obviously > related to it. Do as Linus suggested. The lock protection lock is called nest_lock. Note that we still have the MAX_LOCK_DEPTH (48) limit to consider, so anything that spills that it still up shit creek. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 34 ++++++++++++++++++---------------- include/linux/rcuclassic.h | 2 +- kernel/lockdep.c | 26 +++++++++++++++++++++----- 3 files changed, 40 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index e431d1d6eaf..93a8cc02a03 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -211,6 +211,7 @@ struct held_lock { u64 prev_chain_key; unsigned long acquire_ip; struct lockdep_map *instance; + struct lockdep_map *nest_lock; #ifdef CONFIG_LOCK_STAT u64 waittime_stamp; u64 holdtime_stamp; @@ -297,7 +298,8 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, * 2: full validation */ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, unsigned long ip); + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); @@ -319,7 +321,7 @@ static inline void lockdep_on(void) { } -# define lock_acquire(l, s, t, r, c, i) do { } while (0) +# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) @@ -407,9 +409,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) # else -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define spin_release(l, n, i) lock_release(l, n, i) #else @@ -419,11 +421,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) # else -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) # endif # define rwlock_release(l, n, i) lock_release(l, n, i) #else @@ -434,9 +436,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) # else -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define mutex_release(l, n, i) lock_release(l, n, i) #else @@ -446,11 +448,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # else -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # endif # define rwsem_release(l, n, i) lock_release(l, n, i) #else @@ -461,9 +463,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, _THIS_IP_) +# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) # else -# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, _THIS_IP_) +# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) # endif # define map_release(l) lock_release(l, 1, _THIS_IP_) #else diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 8c774905dcf..4ab84362272 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; # define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) #else # define rcu_read_acquire() do { } while (0) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index d3c72ad8d09..410c3365ad8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1372,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, struct lockdep_map *next_instance, int read) { struct held_lock *prev; + struct held_lock *nest = NULL; int i; for (i = 0; i < curr->lockdep_depth; i++) { prev = curr->held_locks + i; + + if (prev->instance == next->nest_lock) + nest = prev; + if (hlock_class(prev) != hlock_class(next)) continue; + /* * Allow read-after-read recursion of the same * lock class (i.e. read_lock(lock)+read_lock(lock)): */ if ((read == 2) && prev->read) return 2; + + /* + * We're holding the nest_lock, which serializes this lock's + * nesting behaviour. + */ + if (nest) + return 2; + return print_deadlock_bug(curr, prev, next); } return 1; @@ -2507,7 +2521,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map); */ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, int hardirqs_off, - unsigned long ip) + struct lockdep_map *nest_lock, unsigned long ip) { struct task_struct *curr = current; struct lock_class *class = NULL; @@ -2566,6 +2580,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->class_idx = class - lock_classes + 1; hlock->acquire_ip = ip; hlock->instance = lock; + hlock->nest_lock = nest_lock; hlock->trylock = trylock; hlock->read = read; hlock->check = check; @@ -2717,7 +2732,7 @@ found_it: if (!__lock_acquire(hlock->instance, hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, - hlock->acquire_ip)) + hlock->nest_lock, hlock->acquire_ip)) return 0; } @@ -2778,7 +2793,7 @@ found_it: if (!__lock_acquire(hlock->instance, hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, - hlock->acquire_ip)) + hlock->nest_lock, hlock->acquire_ip)) return 0; } @@ -2915,7 +2930,8 @@ EXPORT_SYMBOL_GPL(lock_set_subclass); * and also avoid lockdep recursion: */ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, unsigned long ip) + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip) { unsigned long flags; @@ -2930,7 +2946,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, current->lockdep_recursion = 1; __lock_acquire(lock, subclass, trylock, read, check, - irqs_disabled_flags(flags), ip); + irqs_disabled_flags(flags), nest_lock, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } -- cgit v1.2.3-70-g09d2 From b7d39aff91454f2534db2275f55908656ec0470c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 11 Aug 2008 09:30:24 +0200 Subject: lockdep: spin_lock_nest_lock() Expose the new lock protection lock. This can be used to annotate places where we take multiple locks of the same class and avoid deadlocks by always taking another (top-level) lock first. NOTE: we're still bound to the MAX_LOCK_DEPTH (48) limit. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 2 ++ include/linux/spinlock.h | 6 ++++++ include/linux/spinlock_api_smp.h | 2 ++ kernel/spinlock.c | 11 +++++++++++ 4 files changed, 21 insertions(+) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 93a8cc02a03..4452c04a7f6 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -410,8 +410,10 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) # else # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define spin_release(l, n, i) lock_release(l, n, i) #else diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 61e5610ad16..e0c0fccced4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -183,8 +183,14 @@ do { \ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ + _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) +# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #define write_lock(lock) _write_lock(lock) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8a2307ce729..d79845d034b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); +void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); diff --git a/kernel/spinlock.c b/kernel/spinlock.c index a1fb54c93cd..44baeea94ab 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) } EXPORT_SYMBOL(_spin_lock_nested); + unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) { unsigned long flags; @@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas EXPORT_SYMBOL(_spin_lock_irqsave_nested); +void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, + struct lockdep_map *nest_lock) +{ + preempt_disable(); + spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +} + +EXPORT_SYMBOL(_spin_lock_nest_lock); + #endif void __lockfunc _spin_unlock(spinlock_t *lock) -- cgit v1.2.3-70-g09d2 From 3295f0ef9ff048a4619ede597ad9ec9cab725654 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 11 Aug 2008 10:30:30 +0200 Subject: lockdep: rename map_[acquire|release]() => lock_map_[acquire|release]() the names were too generic: drivers/uio/uio.c:87: error: expected identifier or '(' before 'do' drivers/uio/uio.c:87: error: expected identifier or '(' before 'while' drivers/uio/uio.c:113: error: 'map_release' undeclared here (not in a function) Signed-off-by: Ingo Molnar --- fs/jbd/transaction.c | 4 ++-- fs/jbd2/transaction.c | 4 ++-- include/linux/lockdep.h | 10 +++++----- kernel/workqueue.c | 24 ++++++++++++------------ 4 files changed, 21 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 31a4bd7f78d..0540ca27a44 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks) goto out; } - map_acquire(&handle->h_lockdep_map); + lock_map_acquire(&handle->h_lockdep_map); out: return handle; @@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle) spin_unlock(&journal->j_state_lock); } - map_release(&handle->h_lockdep_map); + lock_map_release(&handle->h_lockdep_map); jbd_free_handle(handle); return err; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index c074971215e..e5d540588fa 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) goto out; } - map_acquire(&handle->h_lockdep_map); + lock_map_acquire(&handle->h_lockdep_map); out: return handle; } @@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle) spin_unlock(&journal->j_state_lock); } - map_release(&handle->h_lockdep_map); + lock_map_release(&handle->h_lockdep_map); jbd2_free_handle(handle); return err; diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 4452c04a7f6..67f42b300c6 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -465,14 +465,14 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) # else -# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) # endif -# define map_release(l) lock_release(l, 1, _THIS_IP_) +# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) #else -# define map_acquire(l) do { } while (0) -# define map_release(l) do { } while (0) +# define lock_map_acquire(l) do { } while (0) +# define lock_map_release(l) do { } while (0) #endif #endif /* __LINUX_LOCKDEP_H */ diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 53564ae894a..8bb5b68fb3a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) BUG_ON(get_wq_data(work) != cwq); work_clear_pending(work); - map_acquire(&cwq->wq->lockdep_map); - map_acquire(&lockdep_map); + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_acquire(&lockdep_map); f(work); - map_release(&lockdep_map); - map_release(&cwq->wq->lockdep_map); + lock_map_release(&lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " @@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq) int cpu; might_sleep(); - map_acquire(&wq->lockdep_map); - map_release(&wq->lockdep_map); + lock_map_acquire(&wq->lockdep_map); + lock_map_release(&wq->lockdep_map); for_each_cpu_mask_nr(cpu, *cpu_map) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); } @@ -441,8 +441,8 @@ int flush_work(struct work_struct *work) if (!cwq) return 0; - map_acquire(&cwq->wq->lockdep_map); - map_release(&cwq->wq->lockdep_map); + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); prev = NULL; spin_lock_irq(&cwq->lock); @@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work) might_sleep(); - map_acquire(&work->lockdep_map); - map_release(&work->lockdep_map); + lock_map_acquire(&work->lockdep_map); + lock_map_release(&work->lockdep_map); cwq = get_wq_data(work); if (!cwq) @@ -861,8 +861,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) if (cwq->thread == NULL) return; - map_acquire(&cwq->wq->lockdep_map); - map_release(&cwq->wq->lockdep_map); + lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_release(&cwq->wq->lockdep_map); flush_cpu_workqueue(cwq); /* -- cgit v1.2.3-70-g09d2 From d388e5fdc461344d04307a3fa83862b9ed429647 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sat, 9 Aug 2008 15:09:02 -0700 Subject: x86: Restore proper vector locking during cpu hotplug Having cpu_online_map change during assign_irq_vector can result in some really nasty and weird things happening. The one that bit me last time was accessing non existent per cpu memory for non existent cpus. This locking was removed in a sloppy x86_64 and x86_32 merge patch. Guys can we please try and avoid subtly breaking x86 when we are merging files together? Signed-off-by: Eric W. Biederman Signed-off-by: H. Peter Anvin --- arch/x86/kernel/io_apic_32.c | 6 +----- arch/x86/kernel/io_apic_64.c | 25 +++++++++++++++---------- arch/x86/kernel/smpboot.c | 12 +++++++++--- include/asm-x86/hw_irq.h | 12 ++++++++++-- 4 files changed, 35 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index de9aa0e3a9c..09cddb57bec 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c @@ -57,7 +57,7 @@ atomic_t irq_mis_count; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static DEFINE_SPINLOCK(ioapic_lock); -static DEFINE_SPINLOCK(vector_lock); +DEFINE_SPINLOCK(vector_lock); int timer_through_8259 __initdata; @@ -1209,10 +1209,6 @@ static int assign_irq_vector(int irq) return vector; } -void setup_vector_irq(int cpu) -{ -} - static struct irq_chip ioapic_chip; #define IOAPIC_AUTO -1 diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index 8269434d170..61a83b70c18 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c @@ -101,7 +101,7 @@ int timer_through_8259 __initdata; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static DEFINE_SPINLOCK(ioapic_lock); -DEFINE_SPINLOCK(vector_lock); +static DEFINE_SPINLOCK(vector_lock); /* * # of IRQ routing registers @@ -697,6 +697,19 @@ static int pin_2_irq(int idx, int apic, int pin) return irq; } +void lock_vector_lock(void) +{ + /* Used to the online set of cpus does not change + * during assign_irq_vector. + */ + spin_lock(&vector_lock); +} + +void unlock_vector_lock(void) +{ + spin_unlock(&vector_lock); +} + static int __assign_irq_vector(int irq, cpumask_t mask) { /* @@ -802,7 +815,7 @@ static void __clear_irq_vector(int irq) cpus_clear(cfg->domain); } -static void __setup_vector_irq(int cpu) +void __setup_vector_irq(int cpu) { /* Initialize vector_irq on a new cpu */ /* This function must be called with vector_lock held */ @@ -825,14 +838,6 @@ static void __setup_vector_irq(int cpu) } } -void setup_vector_irq(int cpu) -{ - spin_lock(&vector_lock); - __setup_vector_irq(smp_processor_id()); - spin_unlock(&vector_lock); -} - - static struct irq_chip ioapic_chip; static void ioapic_register_intr(int irq, unsigned long trigger) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 332512767f4..da10f07fc59 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -326,12 +326,16 @@ static void __cpuinit start_secondary(void *unused) * for which cpus receive the IPI. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). + * + * We need to hold vector_lock so there the set of online cpus + * does not change while we are assigning vectors to cpus. Holding + * this lock ensures we don't half assign or remove an irq from a cpu. */ ipi_call_lock_irq(); -#ifdef CONFIG_X86_IO_APIC - setup_vector_irq(smp_processor_id()); -#endif + lock_vector_lock(); + __setup_vector_irq(smp_processor_id()); cpu_set(smp_processor_id(), cpu_online_map); + unlock_vector_lock(); ipi_call_unlock_irq(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; @@ -1336,7 +1340,9 @@ int __cpu_disable(void) remove_siblinginfo(cpu); /* It's now safe to remove this processor from the online map */ + lock_vector_lock(); remove_cpu_from_maps(cpu); + unlock_vector_lock(); fixup_irqs(cpu_online_map); return 0; } diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h index 77ba51df566..edd0b95f14d 100644 --- a/include/asm-x86/hw_irq.h +++ b/include/asm-x86/hw_irq.h @@ -98,9 +98,17 @@ extern void (*const interrupt[NR_IRQS])(void); #else typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); -extern spinlock_t vector_lock; #endif -extern void setup_vector_irq(int cpu); + +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_X86_64) +extern void lock_vector_lock(void); +extern void unlock_vector_lock(void); +extern void __setup_vector_irq(int cpu); +#else +static inline void lock_vector_lock(void) {} +static inline void unlock_vector_lock(void) {} +static inline void __setup_vector_irq(int cpu) {} +#endif #endif /* !ASSEMBLY_ */ -- cgit v1.2.3-70-g09d2 From 3c7569b284e1be55d086b61a70d9f545326f6d74 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Sun, 10 Aug 2008 00:35:50 -0700 Subject: x86_64: restore the proper NR_IRQS define so larger systems work. As pointed out and tracked by Yinghai Lu : Dhaval Giani got: kernel BUG at arch/x86/kernel/io_apic_64.c:357! invalid opcode: 0000 [1] SMP CPU 24 ... his system (x3950) has 8 ioapic, irq > 256 This was caused by: commit 9b7dc567d03d74a1fbae84e88949b6a60d922d82 Author: Thomas Gleixner Date: Fri May 2 20:10:09 2008 +0200 x86: unify interrupt vector defines The interrupt vector defines are copied 4 times around with minimal differences. Move them all into asm-x86/irq_vectors.h It appears that Thomas did not notice that x86_64 does something completely different when he merge irq_vectors.h We can solve this for 2.6.27 by simply reintroducing the old heuristic for setting NR_IRQS on x86_64 to a usable value, which trivially removes the regression. Long term it would be nice to harmonize the handling of ioapic interrupts of x86_32 and x86_64 so we don't have this kind of confusion. Dhaval Giani tested an earlier version of this patch by YH which confirms simply increasing NR_IRQS fixes the problem. Signed-off-by: Eric W. Biederman Acked-by: Yinghai Lu Cc: Dhaval Giani Cc: Mike Travis Cc: Andrew Morton Signed-off-by: Ingo Molnar --- include/asm-x86/irq_vectors.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h index 90b1d1f12f0..b95d167b7fb 100644 --- a/include/asm-x86/irq_vectors.h +++ b/include/asm-x86/irq_vectors.h @@ -109,7 +109,15 @@ #define LAST_VM86_IRQ 15 #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) -#if !defined(CONFIG_X86_VOYAGER) +#ifdef CONFIG_X86_64 +# if NR_CPUS < MAX_IO_APICS +# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) +# else +# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) +# endif +# define NR_IRQ_VECTORS NR_IRQS + +#elif !defined(CONFIG_X86_VOYAGER) # if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) -- cgit v1.2.3-70-g09d2 From afdd614071aef652f5a3e2a06965de049dd8339b Mon Sep 17 00:00:00 2001 From: Sven Wegener Date: Sun, 10 Aug 2008 09:18:01 +0000 Subject: ipvs: Use ARRAY_SIZE() Signed-off-by: Sven Wegener Acked-by: Simon Horman --- include/net/ip_vs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index cbb59ebed4a..e980416bff8 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -440,7 +440,7 @@ struct ip_vs_app */ extern const char *ip_vs_proto_name(unsigned proto); extern void ip_vs_init_hash_table(struct list_head *table, int rows); -#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table(t, sizeof(t)/sizeof(t[0])) +#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) #define IP_VS_APP_TYPE_FTP 1 -- cgit v1.2.3-70-g09d2 From 5587da55fbf332ab8d1b37637536f94bc373867f Mon Sep 17 00:00:00 2001 From: Sven Wegener Date: Sun, 10 Aug 2008 18:24:40 +0000 Subject: ipvs: Mark net_vs_ctl_path const Signed-off-by: Sven Wegener Acked-by: Simon Horman --- include/net/ip_vs.h | 2 +- net/ipv4/ipvs/ip_vs_ctl.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index e980416bff8..c8ee9b89b02 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -620,7 +620,7 @@ extern int sysctl_ip_vs_expire_quiescent_template; extern int sysctl_ip_vs_sync_threshold[2]; extern int sysctl_ip_vs_nat_icmp_send; extern struct ip_vs_stats ip_vs_stats; -extern struct ctl_path net_vs_ctl_path[]; +extern const struct ctl_path net_vs_ctl_path[]; extern struct ip_vs_service * ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport); diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index df13333813a..999d884e886 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -1589,7 +1589,7 @@ static struct ctl_table vs_vars[] = { { .ctl_name = 0 } }; -struct ctl_path net_vs_ctl_path[] = { +const struct ctl_path net_vs_ctl_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "ipv4", .ctl_name = NET_IPV4, }, { .procname = "vs", }, -- cgit v1.2.3-70-g09d2 From b42e737e576339c795d9ac77a1fce6057f6bc0cf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 11 Aug 2008 12:34:42 +0200 Subject: lockdep: fix overflow in the hlock shrinkage code There is a overflow by 1 case in the new shrunken hlock code. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 67f42b300c6..c88aa3d8e87 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -191,7 +191,12 @@ struct lock_chain { }; #define MAX_LOCKDEP_KEYS_BITS 11 -#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) +/* + * Subtract one because we offset hlock->class_idx by 1 in order + * to make 0 mean no class. This avoids overflowing the class_idx + * bitfield and hitting the BUG in hlock_class(). + */ +#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) struct held_lock { /* -- cgit v1.2.3-70-g09d2 From 13fa00a8780885edcdf0bc53b81e5d0fec71119a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 11 Aug 2008 20:59:59 +1000 Subject: powerpc: Remove include/linux/harrier_defs.h It was only used by code in arch/ppc, and arch/ppc is gone, so remove the unused harrier_defs.h as well. Signed-off-by: Paul Mackerras --- include/linux/harrier_defs.h | 212 ------------------------------------------- 1 file changed, 212 deletions(-) delete mode 100644 include/linux/harrier_defs.h (limited to 'include') diff --git a/include/linux/harrier_defs.h b/include/linux/harrier_defs.h deleted file mode 100644 index efef11db790..00000000000 --- a/include/linux/harrier_defs.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * include/linux/harrier_defs.h - * - * Definitions for Motorola MCG Harrier North Bridge & Memory controller - * - * Author: Dale Farnsworth - * dale.farnsworth@mvista.com - * - * Extracted from asm-ppc/harrier.h by: - * Randy Vinson - * rvinson@mvista.com - * - * Copyright 2001-2002 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __ASMPPC_HARRIER_DEFS_H -#define __ASMPPC_HARRIER_DEFS_H - -#define HARRIER_DEFAULT_XCSR_BASE 0xfeff0000 - -#define HARRIER_VEND_DEV_ID 0x1057480b - -#define HARRIER_VENI_OFF 0x00 - -#define HARRIER_REVI_OFF 0x05 -#define HARRIER_UCTL_OFF 0xd0 -#define HARRIER_XTAL64_MASK 0x02 - -#define HARRIER_MISC_CSR_OFF 0x1c -#define HARRIER_RSTOUT 0x01000000 -#define HARRIER_SYSCON 0x08000000 -#define HARRIER_EREADY 0x10000000 -#define HARRIER_ERDYS 0x20000000 - -/* Function exception registers */ -#define HARRIER_FEEN_OFF 0x40 /* enable */ -#define HARRIER_FEST_OFF 0x44 /* status */ -#define HARRIER_FEMA_OFF 0x48 /* mask */ -#define HARRIER_FECL_OFF 0x4c /* clear */ - -#define HARRIER_FE_DMA 0x80 -#define HARRIER_FE_MIDB 0x40 -#define HARRIER_FE_MIM0 0x20 -#define HARRIER_FE_MIM1 0x10 -#define HARRIER_FE_MIP 0x08 -#define HARRIER_FE_UA0 0x04 -#define HARRIER_FE_UA1 0x02 -#define HARRIER_FE_ABT 0x01 - -#define HARRIER_SERIAL_0_OFF 0xc0 - -#define HARRIER_MBAR_OFF 0xe0 -#define HARRIER_MBAR_MSK 0xfffc0000 -#define HARRIER_MPIC_CSR_OFF 0xe4 -#define HARRIER_MPIC_OPI_ENABLE 0x40 -#define HARRIER_MPIC_IFEVP_OFF 0x10200 -#define HARRIER_MPIC_IFEVP_VECT_MSK 0xff -#define HARRIER_MPIC_IFEDE_OFF 0x10210 - -/* - * Define the Memory Controller register offsets. - */ -#define HARRIER_SDBA_OFF 0x110 -#define HARRIER_SDBB_OFF 0x114 -#define HARRIER_SDBC_OFF 0x118 -#define HARRIER_SDBD_OFF 0x11c -#define HARRIER_SDBE_OFF 0x120 -#define HARRIER_SDBF_OFF 0x124 -#define HARRIER_SDBG_OFF 0x128 -#define HARRIER_SDBH_OFF 0x12c - -#define HARRIER_SDB_ENABLE 0x00000100 -#define HARRIER_SDB_SIZE_MASK 0xf -#define HARRIER_SDB_SIZE_SHIFT 16 -#define HARRIER_SDB_BASE_MASK 0xff -#define HARRIER_SDB_BASE_SHIFT 24 - -/* - * Define outbound register offsets. - */ -#define HARRIER_OTAD0_OFF 0x220 -#define HARRIER_OTOF0_OFF 0x224 -#define HARRIER_OTAD1_OFF 0x228 -#define HARRIER_OTOF1_OFF 0x22c -#define HARRIER_OTAD2_OFF 0x230 -#define HARRIER_OTOF2_OFF 0x234 -#define HARRIER_OTAD3_OFF 0x238 -#define HARRIER_OTOF3_OFF 0x23c - -#define HARRIER_OTADX_START_MSK 0xffff0000UL -#define HARRIER_OTADX_END_MSK 0x0000ffffUL - -#define HARRIER_OTOFX_OFF_MSK 0xffff0000UL -#define HARRIER_OTOFX_ENA 0x80UL -#define HARRIER_OTOFX_WPE 0x10UL -#define HARRIER_OTOFX_SGE 0x08UL -#define HARRIER_OTOFX_RAE 0x04UL -#define HARRIER_OTOFX_MEM 0x02UL -#define HARRIER_OTOFX_IOM 0x01UL - -/* - * Define generic message passing register offsets - */ -/* Mirrored registers (visible from both PowerPC and PCI space) */ -#define HARRIER_XCSR_MP_BASE_OFF 0x290 /* base offset in XCSR space */ -#define HARRIER_PMEP_MP_BASE_OFF 0x100 /* base offset in PMEM space */ -#define HARRIER_MGOM0_OFF 0x00 /* outbound msg 0 */ -#define HARRIER_MGOM1_OFF 0x04 /* outbound msg 1 */ -#define HARRIER_MGOD_OFF 0x08 /* outbound doorbells */ - -#define HARRIER_MGIM0_OFF 0x10 /* inbound msg 0 */ -#define HARRIER_MGIM1_OFF 0x14 /* inbound msg 1 */ -#define HARRIER_MGID_OFF 0x18 /* inbound doorbells */ - -/* PowerPC-only registers */ -#define HARRIER_MGIDM_OFF 0x20 /* inbound doorbell mask */ - -/* PCI-only registers */ -#define HARRIER_PMEP_MGST_OFF 0x20 /* (outbound) interrupt status */ -#define HARRIER_PMEP_MGMS_OFF 0x24 /* (outbound) interrupt mask */ -#define HARRIER_MG_OMI0 (1<<4) -#define HARRIER_MG_OMI1 (1<<5) - -#define HARRIER_PMEP_MGODM_OFF 0x28 /* outbound doorbell mask */ - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_XCSR_TO_PCFS_OFF 0x300 - -/* - * Define message passing attribute register offset - */ -#define HARRIER_MPAT_OFF 0x44 - -/* - * Define inbound attribute register offsets. - */ -#define HARRIER_ITSZ0_OFF 0x48 -#define HARRIER_ITAT0_OFF 0x4c - -#define HARRIER_ITSZ1_OFF 0x50 -#define HARRIER_ITAT1_OFF 0x54 - -#define HARRIER_ITSZ2_OFF 0x58 -#define HARRIER_ITAT2_OFF 0x5c - -#define HARRIER_ITSZ3_OFF 0x60 -#define HARRIER_ITAT3_OFF 0x64 - -/* inbound translation size constants */ -#define HARRIER_ITSZ_MSK 0xff -#define HARRIER_ITSZ_4KB 0x00 -#define HARRIER_ITSZ_8KB 0x01 -#define HARRIER_ITSZ_16KB 0x02 -#define HARRIER_ITSZ_32KB 0x03 -#define HARRIER_ITSZ_64KB 0x04 -#define HARRIER_ITSZ_128KB 0x05 -#define HARRIER_ITSZ_256KB 0x06 -#define HARRIER_ITSZ_512KB 0x07 -#define HARRIER_ITSZ_1MB 0x08 -#define HARRIER_ITSZ_2MB 0x09 -#define HARRIER_ITSZ_4MB 0x0A -#define HARRIER_ITSZ_8MB 0x0B -#define HARRIER_ITSZ_16MB 0x0C -#define HARRIER_ITSZ_32MB 0x0D -#define HARRIER_ITSZ_64MB 0x0E -#define HARRIER_ITSZ_128MB 0x0F -#define HARRIER_ITSZ_256MB 0x10 -#define HARRIER_ITSZ_512MB 0x11 -#define HARRIER_ITSZ_1GB 0x12 -#define HARRIER_ITSZ_2GB 0x13 - -/* inbound translation offset */ -#define HARRIER_ITOF_SHIFT 0x10 -#define HARRIER_ITOF_MSK 0xffff - -/* inbound translation atttributes */ -#define HARRIER_ITAT_PRE (1<<3) -#define HARRIER_ITAT_RAE (1<<4) -#define HARRIER_ITAT_WPE (1<<5) -#define HARRIER_ITAT_MEM (1<<6) -#define HARRIER_ITAT_ENA (1<<7) -#define HARRIER_ITAT_GBL (1<<16) - -#define HARRIER_LBA_OFF 0x80 -#define HARRIER_LBA_MSK (1<<31) - -#define HARRIER_XCSR_SIZE 1024 - -/* macros to calculate message passing register offsets */ -#define HARRIER_MP_XCSR(x) ((u32)HARRIER_XCSR_MP_BASE_OFF + (u32)x) - -#define HARRIER_MP_PMEP(x) ((u32)HARRIER_PMEP_MP_BASE_OFF + (u32)x) - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_MPBAR_OFF PCI_BASE_ADDRESS_0 -#define HARRIER_ITBAR0_OFF PCI_BASE_ADDRESS_1 -#define HARRIER_ITBAR1_OFF PCI_BASE_ADDRESS_2 -#define HARRIER_ITBAR2_OFF PCI_BASE_ADDRESS_3 -#define HARRIER_ITBAR3_OFF PCI_BASE_ADDRESS_4 - -#define HARRIER_XCSR_CONFIG(x) ((u32)HARRIER_XCSR_TO_PCFS_OFF + (u32)x) - -#endif /* __ASMPPC_HARRIER_DEFS_H */ -- cgit v1.2.3-70-g09d2 From 3a14a313f9b406c37ab7e3f855b060eb8587b8c7 Mon Sep 17 00:00:00 2001 From: Sven Wegener Date: Sun, 10 Aug 2008 18:24:41 +0000 Subject: ipvs: Embed estimator object into stats object There's no reason for dynamically allocating an estimator object for every stats object. Directly embed an estimator object into every stats object and switch to using the kernel-provided list implementation. This makes the code much simpler and faster, as we do not need to traverse the list of all estimators to find the one belonging to a stats object. There's no need to use an rwlock, as we only have one reader. Also reorder the members of the estimator structure slightly to avoid padding overhead. This can't be done with the stats object as the members are currently copied to our user space object via memcpy() and changing it would break ABI. Signed-off-by: Sven Wegener Acked-by: Simon Horman --- include/net/ip_vs.h | 28 ++++++++++- net/ipv4/ipvs/ip_vs_ctl.c | 2 +- net/ipv4/ipvs/ip_vs_est.c | 117 +++++++++++++++------------------------------- 3 files changed, 65 insertions(+), 82 deletions(-) (limited to 'include') diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index c8ee9b89b02..7312c3dd309 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -140,8 +140,24 @@ struct ip_vs_seq { /* - * IPVS statistics object + * IPVS statistics objects */ +struct ip_vs_estimator { + struct list_head list; + + u64 last_inbytes; + u64 last_outbytes; + u32 last_conns; + u32 last_inpkts; + u32 last_outpkts; + + u32 cps; + u32 inpps; + u32 outpps; + u32 inbps; + u32 outbps; +}; + struct ip_vs_stats { __u32 conns; /* connections scheduled */ @@ -156,7 +172,15 @@ struct ip_vs_stats __u32 inbps; /* current in byte rate */ __u32 outbps; /* current out byte rate */ + /* + * Don't add anything before the lock, because we use memcpy() to copy + * the members before the lock to struct ip_vs_stats_user in + * ip_vs_ctl.c. + */ + spinlock_t lock; /* spin lock */ + + struct ip_vs_estimator est; /* estimator */ }; struct dst_entry; @@ -659,7 +683,7 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp); /* * IPVS rate estimator prototypes (from ip_vs_est.c) */ -extern int ip_vs_new_estimator(struct ip_vs_stats *stats); +extern void ip_vs_new_estimator(struct ip_vs_stats *stats); extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 999d884e886..d651bce0549 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -684,8 +684,8 @@ ip_vs_zero_stats(struct ip_vs_stats *stats) { spin_lock_bh(&stats->lock); memset(stats, 0, (char *)&stats->lock - (char *)stats); - spin_unlock_bh(&stats->lock); ip_vs_zero_estimator(stats); + spin_unlock_bh(&stats->lock); } /* diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c index 1d6e58e502f..5a20f93bd7f 100644 --- a/net/ipv4/ipvs/ip_vs_est.c +++ b/net/ipv4/ipvs/ip_vs_est.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -44,28 +45,11 @@ */ -struct ip_vs_estimator -{ - struct ip_vs_estimator *next; - struct ip_vs_stats *stats; - - u32 last_conns; - u32 last_inpkts; - u32 last_outpkts; - u64 last_inbytes; - u64 last_outbytes; - - u32 cps; - u32 inpps; - u32 outpps; - u32 inbps; - u32 outbps; -}; +static void estimation_timer(unsigned long arg); - -static struct ip_vs_estimator *est_list = NULL; -static DEFINE_RWLOCK(est_lock); -static struct timer_list est_timer; +static LIST_HEAD(est_list); +static DEFINE_SPINLOCK(est_lock); +static DEFINE_TIMER(est_timer, estimation_timer, 0, 0); static void estimation_timer(unsigned long arg) { @@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg) u64 n_inbytes, n_outbytes; u32 rate; - read_lock(&est_lock); - for (e = est_list; e; e = e->next) { - s = e->stats; + spin_lock(&est_lock); + list_for_each_entry(e, &est_list, list) { + s = container_of(e, struct ip_vs_stats, est); spin_lock(&s->lock); n_conns = s->conns; @@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg) s->outbps = (e->outbps+0xF)>>5; spin_unlock(&s->lock); } - read_unlock(&est_lock); + spin_unlock(&est_lock); mod_timer(&est_timer, jiffies + 2*HZ); } -int ip_vs_new_estimator(struct ip_vs_stats *stats) +void ip_vs_new_estimator(struct ip_vs_stats *stats) { - struct ip_vs_estimator *est; + struct ip_vs_estimator *est = &stats->est; - est = kzalloc(sizeof(*est), GFP_KERNEL); - if (est == NULL) - return -ENOMEM; + INIT_LIST_HEAD(&est->list); - est->stats = stats; est->last_conns = stats->conns; est->cps = stats->cps<<10; @@ -142,62 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats) est->last_outbytes = stats->outbytes; est->outbps = stats->outbps<<5; - write_lock_bh(&est_lock); - est->next = est_list; - if (est->next == NULL) { - setup_timer(&est_timer, estimation_timer, 0); - est_timer.expires = jiffies + 2*HZ; - add_timer(&est_timer); - } - est_list = est; - write_unlock_bh(&est_lock); - return 0; + spin_lock_bh(&est_lock); + if (list_empty(&est_list)) + mod_timer(&est_timer, jiffies + 2 * HZ); + list_add(&est->list, &est_list); + spin_unlock_bh(&est_lock); } void ip_vs_kill_estimator(struct ip_vs_stats *stats) { - struct ip_vs_estimator *est, **pest; - int killed = 0; - - write_lock_bh(&est_lock); - pest = &est_list; - while ((est=*pest) != NULL) { - if (est->stats != stats) { - pest = &est->next; - continue; - } - *pest = est->next; - kfree(est); - killed++; - } - while (killed && !est_list && try_to_del_timer_sync(&est_timer) < 0) { - write_unlock_bh(&est_lock); + struct ip_vs_estimator *est = &stats->est; + + spin_lock_bh(&est_lock); + list_del(&est->list); + while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) { + spin_unlock_bh(&est_lock); cpu_relax(); - write_lock_bh(&est_lock); + spin_lock_bh(&est_lock); } - write_unlock_bh(&est_lock); + spin_unlock_bh(&est_lock); } void ip_vs_zero_estimator(struct ip_vs_stats *stats) { - struct ip_vs_estimator *e; - - write_lock_bh(&est_lock); - for (e = est_list; e; e = e->next) { - if (e->stats != stats) - continue; - - /* set counters zero */ - e->last_conns = 0; - e->last_inpkts = 0; - e->last_outpkts = 0; - e->last_inbytes = 0; - e->last_outbytes = 0; - e->cps = 0; - e->inpps = 0; - e->outpps = 0; - e->inbps = 0; - e->outbps = 0; - } - write_unlock_bh(&est_lock); + struct ip_vs_estimator *est = &stats->est; + + /* set counters zero, caller must hold the stats->lock lock */ + est->last_inbytes = 0; + est->last_outbytes = 0; + est->last_conns = 0; + est->last_inpkts = 0; + est->last_outpkts = 0; + est->cps = 0; + est->inpps = 0; + est->outpps = 0; + est->inbps = 0; + est->outbps = 0; } -- cgit v1.2.3-70-g09d2 From e5f363e358cf16e4ad13a6826e15088c5495efe9 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 11 Aug 2008 12:37:27 +0200 Subject: lockdep: increase MAX_LOCKDEP_KEYS certain configs produce: [ 70.076229] BUG: MAX_LOCKDEP_KEYS too low! [ 70.080230] turning off the locking correctness validator. tune them up. Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index c88aa3d8e87..331e5f1c2d8 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -190,7 +190,7 @@ struct lock_chain { u64 chain_key; }; -#define MAX_LOCKDEP_KEYS_BITS 11 +#define MAX_LOCKDEP_KEYS_BITS 13 /* * Subtract one because we offset hlock->class_idx by 1 in order * to make 0 mean no class. This avoids overflowing the class_idx -- cgit v1.2.3-70-g09d2 From b0fbaa6b5976962434349849673b9ff63631b6d4 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 7 Aug 2008 15:12:39 -0700 Subject: EFI, x86: fix function prototype Fix function prototype in header file to match source code: linux-next-20080807/arch/x86/kernel/efi_64.c:100:14: error: symbol 'efi_ioremap' redeclared with different type (originally declared at include2/asm/efi.h:89) - different address spaces Signed-off-by: Randy Dunlap Signed-off-by: Ingo Molnar --- include/asm-x86/efi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h index 7ed2bd7a7f5..d4f2b0abe92 100644 --- a/include/asm-x86/efi.h +++ b/include/asm-x86/efi.h @@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) -extern void *efi_ioremap(unsigned long addr, unsigned long size); +extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); #endif /* CONFIG_X86_32 */ -- cgit v1.2.3-70-g09d2 From 0e7d5bb8480e10f98f89bd1d418a430393b1e995 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 11 Aug 2008 09:00:30 +0200 Subject: m68k{,nommu}: Wire up new system calls Wire up for m68k{,nommu} the system calls that were added in the last merge window: - 4006553b06306b34054529477b06b68a1c66249b ("flag parameters: inotify_init") - ed8cae8ba01348bfd83333f4648dd807b04d7f08 ("flag parameters: pipe") - 336dd1f70ff62d7dd8655228caed4c5bfc818c56 ("flag parameters: dup2") - a0998b50c3f0b8fdd265c63e0032f86ebe377dbf ("flag parameters: epoll_create") - 9fe5ad9c8cef9ad5873d8ee55d1cf00d9b607df0 ("flag parameters add-on: remove epoll_create size param") - b087498eb5605673b0f260a7620d91818cd72304 ("flag parameters: eventfd") - 9deb27baedb79759c3ab9435a7d8b841842d56e9 ("flag parameters: signalfd") Signed-off-by: Geert Uytterhoeven Acked-by: Greg Ungerer Signed-off-by: Linus Torvalds --- arch/m68k/kernel/entry.S | 6 ++++++ arch/m68knommu/include/asm/unistd.h | 8 +++++++- arch/m68knommu/kernel/syscalltable.S | 6 ++++++ include/asm-m68k/unistd.h | 8 +++++++- 4 files changed, 26 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index 18a9c5f4b00..f28404d9a2b 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S @@ -747,4 +747,10 @@ sys_call_table: .long sys_fallocate /* 320 */ .long sys_timerfd_settime .long sys_timerfd_gettime + .long sys_signalfd4 + .long sys_eventfd2 + .long sys_epoll_create1 /* 325 */ + .long sys_dup3 + .long sys_pipe2 + .long sys_inotify_init1 diff --git a/arch/m68knommu/include/asm/unistd.h b/arch/m68knommu/include/asm/unistd.h index 4ba98b9c5d7..b034a2f7b44 100644 --- a/arch/m68knommu/include/asm/unistd.h +++ b/arch/m68knommu/include/asm/unistd.h @@ -326,10 +326,16 @@ #define __NR_fallocate 320 #define __NR_timerfd_settime 321 #define __NR_timerfd_gettime 322 +#define __NR_signalfd4 323 +#define __NR_eventfd2 324 +#define __NR_epoll_create1 325 +#define __NR_dup3 326 +#define __NR_pipe2 327 +#define __NR_inotify_init1 328 #ifdef __KERNEL__ -#define NR_syscalls 323 +#define NR_syscalls 329 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S index fca2e49917a..812f8d8b7a8 100644 --- a/arch/m68knommu/kernel/syscalltable.S +++ b/arch/m68knommu/kernel/syscalltable.S @@ -341,6 +341,12 @@ ENTRY(sys_call_table) .long sys_fallocate /* 320 */ .long sys_timerfd_settime .long sys_timerfd_gettime + .long sys_signalfd4 + .long sys_eventfd2 + .long sys_epoll_create1 /* 325 */ + .long sys_dup3 + .long sys_pipe2 + .long sys_inotify_init1 .rept NR_syscalls-(.-sys_call_table)/4 .long sys_ni_syscall diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h index e72ba563f10..965abb8bc7f 100644 --- a/include/asm-m68k/unistd.h +++ b/include/asm-m68k/unistd.h @@ -325,10 +325,16 @@ #define __NR_fallocate 320 #define __NR_timerfd_settime 321 #define __NR_timerfd_gettime 322 +#define __NR_signalfd4 323 +#define __NR_eventfd2 324 +#define __NR_epoll_create1 325 +#define __NR_dup3 326 +#define __NR_pipe2 327 +#define __NR_inotify_init1 328 #ifdef __KERNEL__ -#define NR_syscalls 323 +#define NR_syscalls 329 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR -- cgit v1.2.3-70-g09d2 From a8c84df9f71e4a7b14bdd41687a70d366c087eef Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Thu, 31 Jul 2008 15:48:07 +1000 Subject: intel/agp: rewrite GTT on resume On my Intel chipset (965GM), the GTT is entirely erased across suspend/resume. This patch simply re-plays the current mapping at resume time to restore the table.=20 I noticed this once I started relying on persistent GTT mappings across VT switch in our GEM work -- the old X server and DRM code carefully unbind all memory from the GTT on VT switch, but GEM does not bother. I placed the list management and rewrite code in the generic layer on the assumption that it will be needed on other hardware, but I did not add the rewrite call to anything other than the Intel resume function. Keep a list of current GATT mappings. At resume time, rewrite them into the GATT. This is needed on Intel (at least) as the entire GATT is cleared across suspend/resume. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Keith Packard Cc: Dave Jones Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Dave Airlie --- drivers/char/agp/agp.h | 3 +++ drivers/char/agp/backend.c | 2 ++ drivers/char/agp/generic.c | 28 ++++++++++++++++++++++++++++ drivers/char/agp/intel-agp.c | 5 +++++ include/linux/agp_backend.h | 5 +++++ 5 files changed, 43 insertions(+) (limited to 'include') diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 81e14bea54b..4bada0e8b81 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -148,6 +148,9 @@ struct agp_bridge_data { char minor_version; struct list_head list; u32 apbase_config; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; + spinlock_t mapped_lock; }; #define KB(x) ((x) * 1024) diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 9626d3bda09..3a3cc03d401 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -185,6 +185,8 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) rc = -EINVAL; goto err_out; } + INIT_LIST_HEAD(&bridge->mapped_list); + spin_lock_init(&bridge->mapped_lock); return 0; diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 54c91000646..118dbde25dc 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -429,6 +429,10 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) curr->is_bound = true; curr->pg_start = pg_start; + spin_lock(&agp_bridge->mapped_lock); + list_add(&curr->mapped_list, &agp_bridge->mapped_list); + spin_unlock(&agp_bridge->mapped_lock); + return 0; } EXPORT_SYMBOL(agp_bind_memory); @@ -461,10 +465,34 @@ int agp_unbind_memory(struct agp_memory *curr) curr->is_bound = false; curr->pg_start = 0; + spin_lock(&curr->bridge->mapped_lock); + list_del(&curr->mapped_list); + spin_unlock(&curr->bridge->mapped_lock); return 0; } EXPORT_SYMBOL(agp_unbind_memory); +/** + * agp_rebind_emmory - Rewrite the entire GATT, useful on resume + */ +int agp_rebind_memory(void) +{ + struct agp_memory *curr; + int ret_val = 0; + + spin_lock(&agp_bridge->mapped_lock); + list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) { + ret_val = curr->bridge->driver->insert_memory(curr, + curr->pg_start, + curr->type); + if (ret_val != 0) + break; + } + spin_unlock(&agp_bridge->mapped_lock); + return ret_val; +} +EXPORT_SYMBOL(agp_rebind_memory); + /* End - Routines for handling swapping of agp_memory into the GATT */ diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 57c552ee046..016fdf0623a 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -2244,6 +2244,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) static int agp_intel_resume(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); + int ret_val; pci_restore_state(pdev); @@ -2271,6 +2272,10 @@ static int agp_intel_resume(struct pci_dev *pdev) else if (bridge->driver == &intel_i965_driver) intel_i915_configure(); + ret_val = agp_rebind_memory(); + if (ret_val != 0) + return ret_val; + return 0; } #endif diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 972b12bcfb3..2b8df8b420f 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -30,6 +30,8 @@ #ifndef _AGP_BACKEND_H #define _AGP_BACKEND_H 1 +#include + enum chipset_type { NOT_SUPPORTED, SUPPORTED, @@ -78,6 +80,8 @@ struct agp_memory { bool is_bound; bool is_flushed; bool vmalloc_flag; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; }; #define AGP_NORMAL_MEMORY 0 @@ -96,6 +100,7 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); extern int agp_bind_memory(struct agp_memory *, off_t); extern int agp_unbind_memory(struct agp_memory *); +extern int agp_rebind_memory(void); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); -- cgit v1.2.3-70-g09d2 From 987c402ac31988f7ecdb38b657bcfeea5831d479 Mon Sep 17 00:00:00 2001 From: Gerrit Renker Date: Mon, 11 Aug 2008 18:17:17 -0700 Subject: skbuff: Code readability NiT Inserting a space between the `-' improved the C readability (some languages allow hyphens within functions and variable names, which is confusing). Signed-off-by: Gerrit Renker Signed-off-by: David S. Miller --- include/linux/skbuff.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index cfcc45b3bef..358661c9990 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -901,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && - !__pskb_pull_tail(skb, len-skb_headlen(skb))) + !__pskb_pull_tail(skb, len - skb_headlen(skb))) return NULL; skb->len -= len; return skb->data += len; @@ -918,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) return 1; if (unlikely(len > skb->len)) return 0; - return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; + return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } /** @@ -1321,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) unsigned int size = skb->len; if (likely(size >= len)) return 0; - return skb_pad(skb, len-size); + return skb_pad(skb, len - size); } static inline int skb_add_data(struct sk_buff *skb, -- cgit v1.2.3-70-g09d2 From 912985dce45ef18fcdd9f5439fef054e0e22302a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 12 Aug 2008 17:52:52 -0500 Subject: mm: Make generic weak get_user_pages_fast and EXPORT_GPL it Out of line get_user_pages_fast fallback implementation, make it a weak symbol, get rid of CONFIG_HAVE_GET_USER_PAGES_FAST. Export the symbol to modules so lguest can use it. Signed-off-by: Nick Piggin Signed-off-by: Rusty Russell --- arch/powerpc/Kconfig | 3 --- arch/x86/Kconfig | 1 - arch/x86/mm/Makefile | 3 +-- include/linux/mm.h | 20 -------------------- mm/Kconfig | 3 --- mm/util.c | 15 +++++++++++++++ 6 files changed, 16 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 63c9cafda9c..587da5e0990 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -42,9 +42,6 @@ config GENERIC_HARDIRQS bool default y -config HAVE_GET_USER_PAGES_FAST - def_bool PPC64 - config HAVE_SETUP_PER_CPU_AREA def_bool PPC64 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3d0f2b6a5a1..ac2fb0641a0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -22,7 +22,6 @@ config X86 select HAVE_IDE select HAVE_OPROFILE select HAVE_IOREMAP_PROT - select HAVE_GET_USER_PAGES_FAST select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_KRETPROBES diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 2977ea37791..dfb932dcf13 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,7 +1,6 @@ obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ - pat.o pgtable.o + pat.o pgtable.o gup.o -obj-$(CONFIG_HAVE_GET_USER_PAGES_FAST) += gup.o obj-$(CONFIG_X86_32) += pgtable_32.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/include/linux/mm.h b/include/linux/mm.h index 335288bff1b..fa651609b65 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -834,7 +834,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); -#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST /* * get_user_pages_fast provides equivalent functionality to get_user_pages, * operating on current and current->mm (force=0 and doesn't return any vmas). @@ -848,25 +847,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); -#else -/* - * Should probably be moved to asm-generic, and architectures can include it if - * they don't implement their own get_user_pages_fast. - */ -#define get_user_pages_fast(start, nr_pages, write, pages) \ -({ \ - struct mm_struct *mm = current->mm; \ - int ret; \ - \ - down_read(&mm->mmap_sem); \ - ret = get_user_pages(current, mm, start, nr_pages, \ - write, 0, pages, NULL); \ - up_read(&mm->mmap_sem); \ - \ - ret; \ -}) -#endif - /* * A callback you can register to apply pressure to ageable caches. * diff --git a/mm/Kconfig b/mm/Kconfig index 446c6588c75..0bd9c2dbb2a 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -77,9 +77,6 @@ config FLAT_NODE_MEM_MAP def_bool y depends on !SPARSEMEM -config HAVE_GET_USER_PAGES_FAST - bool - # # Both the NUMA code and DISCONTIGMEM use arrays of pg_data_t's # to represent different areas of memory. This variable allows diff --git a/mm/util.c b/mm/util.c index 9341ca77bd8..cb00b748ce4 100644 --- a/mm/util.c +++ b/mm/util.c @@ -171,3 +171,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->unmap_area = arch_unmap_area; } #endif + +int __attribute__((weak)) get_user_pages_fast(unsigned long start, + int nr_pages, int write, struct page **pages) +{ + struct mm_struct *mm = current->mm; + int ret; + + down_read(&mm->mmap_sem); + ret = get_user_pages(current, mm, start, nr_pages, + write, 0, pages, NULL); + up_read(&mm->mmap_sem); + + return ret; +} +EXPORT_SYMBOL_GPL(get_user_pages_fast); -- cgit v1.2.3-70-g09d2 From 4bceba417a795b78a5146e3f85291cb7bb2402ef Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Fri, 8 Aug 2008 11:15:07 +0200 Subject: export virtio_rng.h Hello Rusty, The entropy device was added after we exported all virtio headers. This patch adds virtio_rng.h to the exportable userspace headers. Signed-off-by: Christian Borntraeger Signed-off-by: Rusty Russell --- include/linux/Kbuild | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index a26f565e818..327f60658d9 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -356,6 +356,7 @@ unifdef-y += virtio_balloon.h unifdef-y += virtio_console.h unifdef-y += virtio_pci.h unifdef-y += virtio_ring.h +unifdef-y += virtio_rng.h unifdef-y += vt.h unifdef-y += wait.h unifdef-y += wanrouter.h -- cgit v1.2.3-70-g09d2 From 59f9415ffb9759e950d775f4c400f747b332cc02 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Wed, 30 Jul 2008 12:49:02 -0700 Subject: modules: extend initcall_debug functionality to the module loader The kernel has this really nice facility where if you put "initcall_debug" on the kernel commandline, it'll print which function it's going to execute just before calling an initcall, and then after the call completes it will 1) print if it had an error code 2) checks for a few simple bugs (like leaving irqs off) and 3) print how long the init call took in milliseconds. While trying to optimize the boot speed of my laptop, I have been loving number 3 to figure out what to optimize... ... and then I wished that the same thing was done for module loading. This patch makes the module loader use this exact same functionality; it's a logical extension in my view (since modules are just sort of late binding initcalls anyway) and so far I've found it quite useful in finding where things are too slow in my boot. Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Rusty Russell --- include/linux/init.h | 1 + init/main.c | 6 ++++-- kernel/module.c | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/init.h b/include/linux/init.h index 11b84e10605..93538b696e3 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -139,6 +139,7 @@ extern initcall_t __con_initcall_start[], __con_initcall_end[]; extern initcall_t __security_initcall_start[], __security_initcall_end[]; /* Defined in init/main.c */ +extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; extern char *saved_command_line; extern unsigned int reset_devices; diff --git a/init/main.c b/init/main.c index 0bc7e167bf4..f6f7042331d 100644 --- a/init/main.c +++ b/init/main.c @@ -691,7 +691,7 @@ asmlinkage void __init start_kernel(void) rest_init(); } -static int __initdata initcall_debug; +static int initcall_debug; static int __init initcall_debug_setup(char *str) { @@ -700,7 +700,7 @@ static int __init initcall_debug_setup(char *str) } __setup("initcall_debug", initcall_debug_setup); -static void __init do_one_initcall(initcall_t fn) +int do_one_initcall(initcall_t fn) { int count = preempt_count(); ktime_t t0, t1, delta; @@ -740,6 +740,8 @@ static void __init do_one_initcall(initcall_t fn) print_fn_descriptor_symbol(KERN_WARNING "initcall %s", fn); printk(" returned with %s\n", msgbuf); } + + return result; } diff --git a/kernel/module.c b/kernel/module.c index 61d212120df..08864d257eb 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2288,7 +2288,7 @@ sys_init_module(void __user *umod, /* Start the module */ if (mod->init != NULL) - ret = mod->init(); + ret = do_one_initcall(mod->init); if (ret < 0) { /* Init routine failed: abort. Try to protect us from buggy refcounters. */ -- cgit v1.2.3-70-g09d2 From 74768ed833344bb0f82b97cee46320a3d7f09ecd Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 12 Aug 2008 15:08:39 -0700 Subject: page allocator: use no-panic variant of alloc_bootmem() in alloc_large_system_hash() .. since a failed allocation is being (initially) handled gracefully, and panic()-ed upon failure explicitly in the function if retries with smaller sizes failed. Signed-off-by: Jan Beulich Signed-off-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/bootmem.h | 4 ++++ mm/page_alloc.c | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 652470b687c..95837bfb525 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -97,10 +97,14 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_low(x, PAGE_SIZE, 0) #define alloc_bootmem_node(pgdat, x) \ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 401d104d2bb..af982f7cdb2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4437,7 +4437,7 @@ void *__init alloc_large_system_hash(const char *tablename, do { size = bucketsize << log2qty; if (flags & HASH_EARLY) - table = alloc_bootmem(size); + table = alloc_bootmem_nopanic(size); else if (hashdist) table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); else { -- cgit v1.2.3-70-g09d2 From 969830b2fedf8336c41d6195f49d250b1e166ff8 Mon Sep 17 00:00:00 2001 From: David Miller Date: Tue, 12 Aug 2008 15:08:51 -0700 Subject: radeonfb: fix accel engine hangs Some chips appear to have the 2D engine hang during screen redraw, typically in a sequence of copyarea operations. This appear to be solved by adding a flush of the engine destination pixel cache and waiting for the engine to be idle before issuing the accel operation. The performance impact seems to be fairly small. Here is a trace on an RV370 (PCI device ID 0x5b64), it records the RBBM_STATUS register, then the source x/y, destination x/y, and width/height used for the copy: ---------------------------------------- radeonfb_prim_copyarea: STATUS[00000140] src[210:70] dst[210:60] wh[a0:10] radeonfb_prim_copyarea: STATUS[00000140] src[2b8:70] dst[2b8:60] wh[88:10] radeonfb_prim_copyarea: STATUS[00000140] src[348:70] dst[348:60] wh[40:10] radeonfb_prim_copyarea: STATUS[80020140] src[390:70] dst[390:60] wh[88:10] radeonfb_prim_copyarea: STATUS[8002613f] src[40:80] dst[40:70] wh[28:10] radeonfb_prim_copyarea: STATUS[80026139] src[a8:80] dst[a8:70] wh[38:10] radeonfb_prim_copyarea: STATUS[80026133] src[e8:80] dst[e8:70] wh[80:10] radeonfb_prim_copyarea: STATUS[8002612d] src[170:80] dst[170:70] wh[30:10] radeonfb_prim_copyarea: STATUS[80026127] src[1a8:80] dst[1a8:70] wh[8:10] radeonfb_prim_copyarea: STATUS[80026121] src[1b8:80] dst[1b8:70] wh[88:10] radeonfb_prim_copyarea: STATUS[8002611b] src[248:80] dst[248:70] wh[68:10] ---------------------------------------- When things are going fine the copies complete before the next ROP is even issued, but all of a sudden the 2D unit becomes active (bit 17 in RBBM_STATUS) and the FIFO retry (bit 13) and FIFO pipeline busy (bit 14) are set as well. The FIFO begins to backup until it becomes full. What happens next is the radeon_fifo_wait() times out, and we access the chip illegally leading to a bus error which usually wedges the box. None of this makes it to the console screen, of course :-) radeon_fifo_wait() should be modified to reset the accelerator when this timeout happens instead of programming the chip anyways. ---------------------------------------- radeonfb: FIFO Timeout ! ERROR(0): Cheetah error trap taken afsr[0010080005000000] afar[000007f900800e40] TL1(0) ERROR(0): TPC[595114] TNPC[595118] O7[459788] TSTATE[11009601] ERROR(0): TPC ERROR(0): M_SYND(0), E_SYND(0), Privileged ERROR(0): Highest priority error (0000080000000000) "Bus error response from system bus" ERROR(0): D-cache idx[0] tag[0000000000000000] utag[0000000000000000] stag[0000000000000000] ERROR(0): D-cache data0[0000000000000000] data1[0000000000000000] data2[0000000000000000] data3[0000000000000000] ERROR(0): I-cache idx[0] tag[0000000000000000] utag[0000000000000000] stag[0000000000000000] u[0000000000000000] l[00\ ERROR(0): I-cache INSN0[0000000000000000] INSN1[0000000000000000] INSN2[0000000000000000] INSN3[0000000000000000] ERROR(0): I-cache INSN4[0000000000000000] INSN5[0000000000000000] INSN6[0000000000000000] INSN7[0000000000000000] ERROR(0): E-cache idx[800e40] tag[000000000e049f4c] ERROR(0): E-cache data0[fffff8127d300180] data1[00000000004b5384] data2[0000000000000000] data3[0000000000000000] Ker:xnel panic - not syncing: Irrecoverable deferred error trap. ---------------------------------------- Another quirk is that these copyarea calls will not happen until the first drivers/char/vt.c:redraw_screen() occurs. This will only happen if you 1) VC switch or 2) run "consolechars" or 3) unblank the screen. This seems to happen because until a redraw_screen() the screen scrolling method used by fbcon is not finalized yet. I've seen this with other fb drivers too. So if all you do is boot straight into X you will never see this bug on the relevant chips. Signed-off-by: David S. Miller Signed-off-by: Benjamin Herrenschmidt Cc: [2.6.25.x, 2.6.26.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/aty/radeon_accel.c | 8 ++++++++ include/video/radeon.h | 4 ++++ 2 files changed, 12 insertions(+) (limited to 'include') diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c index 4d13f68436e..aa95f835024 100644 --- a/drivers/video/aty/radeon_accel.c +++ b/drivers/video/aty/radeon_accel.c @@ -55,6 +55,10 @@ static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo, OUTREG(DP_WRITE_MSK, 0xffffffff); OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM)); + radeon_fifo_wait(2); + OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); + OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); + radeon_fifo_wait(2); OUTREG(DST_Y_X, (region->dy << 16) | region->dx); OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height); @@ -116,6 +120,10 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo, OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0) | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0)); + radeon_fifo_wait(2); + OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL); + OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE)); + radeon_fifo_wait(3); OUTREG(SRC_Y_X, (sy << 16) | sx); OUTREG(DST_Y_X, (dy << 16) | dx); diff --git a/include/video/radeon.h b/include/video/radeon.h index 95a1f2038b1..099ffa5e5be 100644 --- a/include/video/radeon.h +++ b/include/video/radeon.h @@ -742,6 +742,10 @@ #define SOFT_RESET_RB (1 << 6) #define SOFT_RESET_HDP (1 << 7) +/* WAIT_UNTIL bit constants */ +#define WAIT_DMA_GUI_IDLE (1 << 9) +#define WAIT_2D_IDLECLEAN (1 << 16) + /* SURFACE_CNTL bit consants */ #define SURF_TRANSLATION_DIS (1 << 8) #define NONSURF_AP0_SWP_16BPP (1 << 20) -- cgit v1.2.3-70-g09d2 From 10546355323e4826d13e62f85ac6198385a817a9 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 12 Aug 2008 15:08:55 -0700 Subject: matrox maven: convert to a new-style i2c driver The legacy i2c model is going away soon, so switch to the new model. Signed-off-by: Jean Delvare Acked-by: Krzysztof Helt Cc: Petr Vandrovec Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/matrox/i2c-matroxfb.c | 12 ++++- drivers/video/matrox/matroxfb_maven.c | 95 ++++++++++++++--------------------- include/linux/i2c-id.h | 2 - 3 files changed, 50 insertions(+), 59 deletions(-) (limited to 'include') diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c index 9478439b73d..c14e3e2212b 100644 --- a/drivers/video/matrox/i2c-matroxfb.c +++ b/drivers/video/matrox/i2c-matroxfb.c @@ -107,7 +107,6 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo, b->mask.data = data; b->mask.clock = clock; b->adapter.owner = THIS_MODULE; - b->adapter.id = I2C_HW_B_G400; snprintf(b->adapter.name, sizeof(b->adapter.name), name, minfo->fbcon.node); i2c_set_adapdata(&b->adapter, b); @@ -182,6 +181,17 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) { MAT_DATA, MAT_CLK, "MAVEN:fb%u", 0); if (err) printk(KERN_INFO "i2c-matroxfb: Could not register Maven i2c bus. Continuing anyway.\n"); + else { + struct i2c_board_info maven_info = { + I2C_BOARD_INFO("maven", 0x1b), + }; + unsigned short const addr_list[2] = { + 0x1b, I2C_CLIENT_END + }; + + i2c_new_probed_device(&m2info->maven.adapter, + &maven_info, addr_list); + } } return m2info; fail_ddc1:; diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c index 2ad06b0125c..042408a8c63 100644 --- a/drivers/video/matrox/matroxfb_maven.c +++ b/drivers/video/matrox/matroxfb_maven.c @@ -19,8 +19,6 @@ #include #include -#define MAVEN_I2CID (0x1B) - #define MGATVO_B 1 #define MGATVO_C 2 @@ -128,7 +126,7 @@ static int get_ctrl_id(__u32 v4l2_id) { struct maven_data { struct matrox_fb_info* primary_head; - struct i2c_client client; + struct i2c_client *client; int version; }; @@ -974,7 +972,7 @@ static inline int maven_compute_timming(struct maven_data* md, static int maven_program_timming(struct maven_data* md, const struct mavenregs* m) { - struct i2c_client* c = &md->client; + struct i2c_client *c = md->client; if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) { LR(0x80); @@ -1011,7 +1009,7 @@ static int maven_program_timming(struct maven_data* md, } static inline int maven_resync(struct maven_data* md) { - struct i2c_client* c = &md->client; + struct i2c_client *c = md->client; maven_set_reg(c, 0x95, 0x20); /* start whole thing */ return 0; } @@ -1069,48 +1067,48 @@ static int maven_set_control (struct maven_data* md, maven_compute_bwlevel(md, &blacklevel, &whitelevel); blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8); whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8); - maven_set_reg_pair(&md->client, 0x0e, blacklevel); - maven_set_reg_pair(&md->client, 0x1e, whitelevel); + maven_set_reg_pair(md->client, 0x0e, blacklevel); + maven_set_reg_pair(md->client, 0x1e, whitelevel); } break; case V4L2_CID_SATURATION: { - maven_set_reg(&md->client, 0x20, p->value); - maven_set_reg(&md->client, 0x22, p->value); + maven_set_reg(md->client, 0x20, p->value); + maven_set_reg(md->client, 0x22, p->value); } break; case V4L2_CID_HUE: { - maven_set_reg(&md->client, 0x25, p->value); + maven_set_reg(md->client, 0x25, p->value); } break; case V4L2_CID_GAMMA: { const struct maven_gamma* g; g = maven_compute_gamma(md); - maven_set_reg(&md->client, 0x83, g->reg83); - maven_set_reg(&md->client, 0x84, g->reg84); - maven_set_reg(&md->client, 0x85, g->reg85); - maven_set_reg(&md->client, 0x86, g->reg86); - maven_set_reg(&md->client, 0x87, g->reg87); - maven_set_reg(&md->client, 0x88, g->reg88); - maven_set_reg(&md->client, 0x89, g->reg89); - maven_set_reg(&md->client, 0x8a, g->reg8a); - maven_set_reg(&md->client, 0x8b, g->reg8b); + maven_set_reg(md->client, 0x83, g->reg83); + maven_set_reg(md->client, 0x84, g->reg84); + maven_set_reg(md->client, 0x85, g->reg85); + maven_set_reg(md->client, 0x86, g->reg86); + maven_set_reg(md->client, 0x87, g->reg87); + maven_set_reg(md->client, 0x88, g->reg88); + maven_set_reg(md->client, 0x89, g->reg89); + maven_set_reg(md->client, 0x8a, g->reg8a); + maven_set_reg(md->client, 0x8b, g->reg8b); } break; case MATROXFB_CID_TESTOUT: { unsigned char val - = maven_get_reg(&md->client,0x8d); + = maven_get_reg(md->client, 0x8d); if (p->value) val |= 0x10; else val &= ~0x10; - maven_set_reg(&md->client, 0x8d, val); + maven_set_reg(md->client, 0x8d, val); } break; case MATROXFB_CID_DEFLICKER: { - maven_set_reg(&md->client, 0x93, maven_compute_deflicker(md)); + maven_set_reg(md->client, 0x93, maven_compute_deflicker(md)); } break; } @@ -1189,6 +1187,7 @@ static int maven_init_client(struct i2c_client* clnt) { MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo); md->primary_head = MINFO; + md->client = clnt; down_write(&ACCESS_FBINFO(altout.lock)); ACCESS_FBINFO(outputs[1]).output = &maven_altout; ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src; @@ -1232,14 +1231,11 @@ static int maven_shutdown_client(struct i2c_client* clnt) { return 0; } -static const unsigned short normal_i2c[] = { MAVEN_I2CID, I2C_CLIENT_END }; -I2C_CLIENT_INSMOD; - -static struct i2c_driver maven_driver; - -static int maven_detect_client(struct i2c_adapter* adapter, int address, int kind) { - int err = 0; - struct i2c_client* new_client; +static int maven_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = client->adapter; + int err = -ENODEV; struct maven_data* data; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA | @@ -1250,50 +1246,37 @@ static int maven_detect_client(struct i2c_adapter* adapter, int address, int kin err = -ENOMEM; goto ERROR0; } - new_client = &data->client; - i2c_set_clientdata(new_client, data); - new_client->addr = address; - new_client->adapter = adapter; - new_client->driver = &maven_driver; - new_client->flags = 0; - strlcpy(new_client->name, "maven", I2C_NAME_SIZE); - if ((err = i2c_attach_client(new_client))) - goto ERROR3; - err = maven_init_client(new_client); + i2c_set_clientdata(client, data); + err = maven_init_client(client); if (err) goto ERROR4; return 0; ERROR4:; - i2c_detach_client(new_client); -ERROR3:; kfree(data); ERROR0:; return err; } -static int maven_attach_adapter(struct i2c_adapter* adapter) { - if (adapter->id == I2C_HW_B_G400) - return i2c_probe(adapter, &addr_data, &maven_detect_client); - return 0; -} - -static int maven_detach_client(struct i2c_client* client) { - int err; - - if ((err = i2c_detach_client(client))) - return err; +static int maven_remove(struct i2c_client *client) +{ maven_shutdown_client(client); kfree(i2c_get_clientdata(client)); return 0; } +static const struct i2c_device_id maven_id[] = { + { "maven", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, maven_id); + static struct i2c_driver maven_driver={ .driver = { .name = "maven", }, - .id = I2C_DRIVERID_MGATVO, - .attach_adapter = maven_attach_adapter, - .detach_client = maven_detach_client, + .probe = maven_probe, + .remove = maven_remove, + .id_table = maven_id, }; static int __init matroxfb_maven_init(void) diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 4862398e05b..bf34c5f4c05 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -39,7 +39,6 @@ #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ #define I2C_DRIVERID_SAA7185B 13 /* video encoder */ #define I2C_DRIVERID_SAA7110 22 /* video decoder */ -#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */ #define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ #define I2C_DRIVERID_PCF8583 25 /* real time clock */ #define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */ @@ -95,7 +94,6 @@ #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ #define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */ #define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */ -#define I2C_HW_B_G400 0x010009 /* Matrox G400 */ #define I2C_HW_B_I810 0x01000a /* Intel I810 */ #define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */ #define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */ -- cgit v1.2.3-70-g09d2 From ea757acad5a5183c65a3e1b28b49a5978fe6a052 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Tue, 12 Aug 2008 15:08:57 -0700 Subject: atmel_lcdfb: add board parameter specify framebuffer memory size Specify how much physically continuous, DMA capable memory will be allocated at driver initialization time. This allow to create framebuffer device with larger virtual resolution. Combine with y-panning this can be used to implement double buffering acceleration method. Signed-off-by: Stanislaw Gruszka Acked-by: Haavard Skinnemoen Acked-by: Krzysztof Helt Cc: Nicolas Ferre Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/atmel_lcdfb.c | 7 +++++-- include/video/atmel_lcdc.h | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 16e47eb2ff5..9c5925927ec 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -242,9 +242,11 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo) { struct fb_info *info = sinfo->info; struct fb_var_screeninfo *var = &info->var; + unsigned int smem_len; - info->fix.smem_len = (var->xres_virtual * var->yres_virtual - * ((var->bits_per_pixel + 7) / 8)); + smem_len = (var->xres_virtual * var->yres_virtual + * ((var->bits_per_pixel + 7) / 8)); + info->fix.smem_len = max(smem_len, sinfo->smem_len); info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len, (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL); @@ -796,6 +798,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev) sinfo->default_monspecs = pdata_sinfo->default_monspecs; sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control; sinfo->guard_time = pdata_sinfo->guard_time; + sinfo->smem_len = pdata_sinfo->smem_len; sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight; sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode; } else { diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h index 613173b5db6..920c4e9cb93 100644 --- a/include/video/atmel_lcdc.h +++ b/include/video/atmel_lcdc.h @@ -41,6 +41,7 @@ struct atmel_lcdfb_info { struct work_struct task; unsigned int guard_time; + unsigned int smem_len; struct platform_device *pdev; struct clk *bus_clk; struct clk *lcdc_clk; -- cgit v1.2.3-70-g09d2 From 070cb06593006e7d565d4763380f3edd8dbdc134 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Tue, 12 Aug 2008 15:08:59 -0700 Subject: move kernel-doc comment for might_sleep directly before its defining block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Uwe Kleine-König Cc: Ingo Molnar Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index aaa998f65c7..2651f805ba6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -108,6 +108,13 @@ struct completion; struct pt_regs; struct user; +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int _cond_resched(void); +# define might_resched() _cond_resched() +#else +# define might_resched() do { } while (0) +#endif + /** * might_sleep - annotation for functions that can sleep * @@ -118,13 +125,6 @@ struct user; * be bitten later when the calling function happens to sleep when it is not * supposed to. */ -#ifdef CONFIG_PREEMPT_VOLUNTARY -extern int _cond_resched(void); -# define might_resched() _cond_resched() -#else -# define might_resched() do { } while (0) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line); # define might_sleep() \ -- cgit v1.2.3-70-g09d2 From 50ac2d694f2dd1658341cf97bcf2ffb836d772cb Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 12 Aug 2008 15:09:02 -0700 Subject: seq_file: add seq_cpumask(), seq_nodemask() Short enough reads from /proc/irq/*/smp_affinity return -EINVAL for no good reason. This became noticed with NR_CPUS=4096 patches, when length of printed representation of cpumask becase 1152, but cat(1) continued to read with 1024-byte chunks. bitmap_scnprintf() in good faith fills buffer, returns 1023, check returns -EINVAL. Fix it by switching to seq_file, so handler will just fill buffer and doesn't care about offsets, length, filling EOF and all this crap. For that add seq_bitmap(), and wrappers around it -- seq_cpumask() and seq_nodemask(). Signed-off-by: Alexey Dobriyan Reviewed-by: Paul Jackson Cc: Mike Travis Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/seq_file.c | 14 ++++++++++++++ include/linux/bitmap.h | 1 + include/linux/seq_file.h | 12 ++++++++++++ lib/bitmap.c | 11 +++++++++++ 4 files changed, 38 insertions(+) (limited to 'include') diff --git a/fs/seq_file.c b/fs/seq_file.c index 3f54dbd6c49..5d54205e486 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -443,6 +443,20 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc) return -1; } +int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits) +{ + size_t len = bitmap_scnprintf_len(nr_bits); + + if (m->count + len < m->size) { + bitmap_scnprintf(m->buf + m->count, m->size - m->count, + bits, nr_bits); + m->count += len; + return 0; + } + m->count = m->size; + return -1; +} + static void *single_start(struct seq_file *p, loff_t *pos) { return NULL + (*pos == 0); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 1abfe664c44..89781fd4885 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -110,6 +110,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); +extern int bitmap_scnprintf_len(unsigned int nr_bits); extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index a66304a0995..a1783b229ef 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -4,6 +4,8 @@ #include #include #include +#include +#include struct seq_operations; struct file; @@ -47,6 +49,16 @@ int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); +int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) +{ + return seq_bitmap(m, mask->bits, NR_CPUS); +} + +static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) +{ + return seq_bitmap(m, mask->bits, MAX_NUMNODES); +} int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_release(struct inode *, struct file *); diff --git a/lib/bitmap.c b/lib/bitmap.c index 482df94ea21..06fb57c86de 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -315,6 +315,17 @@ int bitmap_scnprintf(char *buf, unsigned int buflen, } EXPORT_SYMBOL(bitmap_scnprintf); +/** + * bitmap_scnprintf_len - return buffer length needed to convert + * bitmap to an ASCII hex string + * @nr_bits: number of bits to be converted + */ +int bitmap_scnprintf_len(unsigned int nr_bits) +{ + unsigned int nr_nibbles = ALIGN(nr_bits, 4) / 4; + return nr_nibbles + ALIGN(nr_nibbles, CHUNKSZ / 4) / (CHUNKSZ / 4) - 1; +} + /** * __bitmap_parse - convert an ASCII hex string into a bitmap. * @buf: pointer to buffer containing string. -- cgit v1.2.3-70-g09d2 From 40c9f22210f2d22f45d4fb430c94f472d19407d6 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Tue, 12 Aug 2008 15:09:04 -0700 Subject: byteorder: add a new include/linux/swab.h to define byteswapping functions Collect the implementations from include/linux/byteorder/swab.h, swabb.h in swab.h The functionality provided covers: u16 swab16(u16 val) - return a byteswapped 16 bit value u32 swab32(u32 val) - return a byteswapped 32 bit value u64 swab64(u64 val) - return a byteswapped 64 bit value u32 swahw32(u32 val) - return a wordswapped 32 bit value u32 swahb32(u32 val) - return a high/low byteswapped 32 bit value Similar to above, but return swapped value from a naturally-aligned pointer u16 swab16p(u16 *p) u32 swab32p(u32 *p) u64 swab64p(u64 *p) u32 swahw32p(u32 *p) u32 swahb32p(u32 *p) Similar to above, but swap the value in-place (in-situ) void swab16s(u16 *p) void swab32s(u32 *p) void swab64s(u64 *p) void swahw32s(u32 *p) void swahb32s(u32 *p) Arches can override any of these with an optimized version by defining an inline in their asm/byteorder.h (example given for swab16()): u16 __arch_swab16() {} #define __arch_swab16 __arch_swab16 Signed-off-by: Harvey Harrison Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swab.h | 309 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 309 insertions(+) create mode 100644 include/linux/swab.h (limited to 'include') diff --git a/include/linux/swab.h b/include/linux/swab.h new file mode 100644 index 00000000000..270d5c208a8 --- /dev/null +++ b/include/linux/swab.h @@ -0,0 +1,309 @@ +#ifndef _LINUX_SWAB_H +#define _LINUX_SWAB_H + +#include +#include +#include + +/* + * casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define __const_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define __const_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define __const_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define __const_swahw32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ + (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) + +#define __const_swahb32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ + (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) + +/* + * Implement the following as inlines, but define the interface using + * macros to allow constant folding when possible: + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +static inline __attribute_const__ __u16 ___swab16(__u16 val) +{ +#ifdef __arch_swab16 + return __arch_swab16(val); +#elif defined(__arch_swab16p) + return __arch_swab16p(&val); +#else + return __const_swab16(val); +#endif +} + +static inline __attribute_const__ __u32 ___swab32(__u32 val) +{ +#ifdef __arch_swab32 + return __arch_swab32(val); +#elif defined(__arch_swab32p) + return __arch_swab32p(&val); +#else + return __const_swab32(val); +#endif +} + +static inline __attribute_const__ __u64 ___swab64(__u64 val) +{ +#ifdef __arch_swab64 + return __arch_swab64(val); +#elif defined(__arch_swab64p) + return __arch_swab64p(&val); +#elif defined(__SWAB_64_THRU_32__) + __u32 h = val >> 32; + __u32 l = val & ((1ULL << 32) - 1); + return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h))); +#else + return __const_swab64(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahw32(__u32 val) +{ +#ifdef __arch_swahw32 + return __arch_swahw32(val); +#elif defined(__arch_swahw32p) + return __arch_swahw32p(&val); +#else + return __const_swahw32(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahb32(__u32 val) +{ +#ifdef __arch_swahb32 + return __arch_swahb32(val); +#elif defined(__arch_swahb32p) + return __arch_swahb32p(&val); +#else + return __const_swahb32(val); +#endif +} + +/** + * __swab16 - return a byteswapped 16-bit value + * @x: value to byteswap + */ +#define __swab16(x) \ + (__builtin_constant_p((__u16)(x)) ? \ + __const_swab16((x)) : \ + ___swab16((x))) + +/** + * __swab32 - return a byteswapped 32-bit value + * @x: value to byteswap + */ +#define __swab32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swab32((x)) : \ + ___swab32((x))) + +/** + * __swab64 - return a byteswapped 64-bit value + * @x: value to byteswap + */ +#define __swab64(x) \ + (__builtin_constant_p((__u64)(x)) ? \ + __const_swab64((x)) : \ + ___swab64((x))) + +/** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap + * + * __swahw32(0x12340000) is 0x00001234 + */ +#define __swahw32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahw32((x)) : \ + ___swahw32((x))) + +/** + * __swahb32 - return a high and low byte-swapped 32-bit value + * @x: value to byteswap + * + * __swahb32(0x12345678) is 0x34127856 + */ +#define __swahb32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahb32((x)) : \ + ___swahb32((x))) + +/** + * __swab16p - return a byteswapped 16-bit value from a pointer + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline __u16 __swab16p(const __u16 *p) +{ +#ifdef __arch_swab16p + return __arch_swab16p(p); +#else + return __swab16(*p); +#endif +} + +/** + * __swab32p - return a byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline __u32 __swab32p(const __u32 *p) +{ +#ifdef __arch_swab32p + return __arch_swab32p(p); +#else + return __swab32(*p); +#endif +} + +/** + * __swab64p - return a byteswapped 64-bit value from a pointer + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline __u64 __swab64p(const __u64 *p) +{ +#ifdef __arch_swab64p + return __arch_swab64p(p); +#else + return __swab64(*p); +#endif +} + +/** + * __swahw32p - return a wordswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping. + */ +static inline __u32 __swahw32p(const __u32 *p) +{ +#ifdef __arch_swahw32p + return __arch_swahw32p(p); +#else + return __swahw32(*p); +#endif +} + +/** + * __swahb32p - return a high and low byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high/low byteswapping. + */ +static inline __u32 __swahb32p(const __u32 *p) +{ +#ifdef __arch_swahb32p + return __arch_swahb32p(p); +#else + return __swahb32(*p); +#endif +} + +/** + * __swab16s - byteswap a 16-bit value in-place + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline void __swab16s(__u16 *p) +{ +#ifdef __arch_swab16s + __arch_swab16s(p); +#else + *p = __swab16p(p); +#endif +} +/** + * __swab32s - byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline void __swab32s(__u32 *p) +{ +#ifdef __arch_swab32s + __arch_swab32s(p); +#else + *p = __swab32p(p); +#endif +} + +/** + * __swab64s - byteswap a 64-bit value in-place + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline void __swab64s(__u64 *p) +{ +#ifdef __arch_swab64s + __arch_swab64s(p); +#else + *p = __swab64p(p); +#endif +} + +/** + * __swahw32s - wordswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping + */ +static inline void __swahw32s(__u32 *p) +{ +#ifdef __arch_swahw32s + __arch_swahw32s(p); +#else + *p = __swahw32p(p); +#endif +} + +/** + * __swahb32s - high and low byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high and low byte swapping + */ +static inline void __swahb32s(__u32 *p) +{ +#ifdef __arch_swahb32s + __arch_swahb32s(p); +#else + *p = __swahb32p(p); +#endif +} + +#ifdef __KERNEL__ +# define swab16 __swab16 +# define swab32 __swab32 +# define swab64 __swab64 +# define swahw32 __swahw32 +# define swahb32 __swahb32 +# define swab16p __swab16p +# define swab32p __swab32p +# define swab64p __swab64p +# define swahw32p __swahw32p +# define swahb32p __swahb32p +# define swab16s __swab16s +# define swab32s __swab32s +# define swab64s __swab64s +# define swahw32s __swahw32s +# define swahb32s __swahb32s +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SWAB_H */ -- cgit v1.2.3-70-g09d2 From bc2aa80e18a1b43ea2b8066500006b729c4ba4a7 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Tue, 12 Aug 2008 15:09:05 -0700 Subject: byteorder: add include/linux/byteorder.h to define endian helpers Signed-off-by: Harvey Harrison Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/byteorder.h | 372 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 372 insertions(+) create mode 100644 include/linux/byteorder.h (limited to 'include') diff --git a/include/linux/byteorder.h b/include/linux/byteorder.h new file mode 100644 index 00000000000..29f002d73d9 --- /dev/null +++ b/include/linux/byteorder.h @@ -0,0 +1,372 @@ +#ifndef _LINUX_BYTEORDER_H +#define _LINUX_BYTEORDER_H + +#include +#include + +#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define one endianness +#endif + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define arch endianness +#endif + +#ifdef __LITTLE_ENDIAN +# undef __LITTLE_ENDIAN +# define __LITTLE_ENDIAN 1234 +#endif + +#ifdef __BIG_ENDIAN +# undef __BIG_ENDIAN +# define __BIG_ENDIAN 4321 +#endif + +#if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD) +# define __LITTLE_ENDIAN_BITFIELD +#endif + +#if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD) +# define __BIG_ENDIAN_BITFIELD +#endif + +#ifdef __LITTLE_ENDIAN +# define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +# define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +# define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) + +# define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)__swab16(x)) +# define __cpu_to_be32(x) ((__force __be32)__swab32(x)) +# define __cpu_to_be64(x) ((__force __be64)__swab64(x)) +#endif + +#ifdef __BIG_ENDIAN +# define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) +# define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) +# define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) + +# define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)__swab16(x)) +# define __cpu_to_le32(x) ((__force __le32)__swab32(x)) +# define __cpu_to_le64(x) ((__force __le64)__swab64(x)) +#endif + +/* + * These helpers could be phased out over time as the base version + * handles constant folding. + */ +#define __constant_htonl(x) __cpu_to_be32(x) +#define __constant_ntohl(x) __be32_to_cpu(x) +#define __constant_htons(x) __cpu_to_be16(x) +#define __constant_ntohs(x) __be16_to_cpu(x) + +#define __constant_le16_to_cpu(x) __le16_to_cpu(x) +#define __constant_le32_to_cpu(x) __le32_to_cpu(x) +#define __constant_le64_to_cpu(x) __le64_to_cpu(x) +#define __constant_be16_to_cpu(x) __be16_to_cpu(x) +#define __constant_be32_to_cpu(x) __be32_to_cpu(x) +#define __constant_be64_to_cpu(x) __be64_to_cpu(x) + +#define __constant_cpu_to_le16(x) __cpu_to_le16(x) +#define __constant_cpu_to_le32(x) __cpu_to_le32(x) +#define __constant_cpu_to_le64(x) __cpu_to_le64(x) +#define __constant_cpu_to_be16(x) __cpu_to_be16(x) +#define __constant_cpu_to_be32(x) __cpu_to_be32(x) +#define __constant_cpu_to_be64(x) __cpu_to_be64(x) + +static inline void __le16_to_cpus(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_le16s(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __le32_to_cpus(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_le32s(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __le64_to_cpus(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_le64s(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __be16_to_cpus(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_be16s(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __be32_to_cpus(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_be32s(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __be64_to_cpus(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_be64s(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline __u16 __le16_to_cpup(const __le16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __le32_to_cpup(const __le32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __le64_to_cpup(const __le64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le16)*p; +#else + return (__force __le16)__swab16p(p); +#endif +} + +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le32)*p; +#else + return (__force __le32)__swab32p(p); +#endif +} + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le64)*p; +#else + return (__force __le64)__swab64p(p); +#endif +} + +static inline __u16 __be16_to_cpup(const __be16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __be32_to_cpup(const __be32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __be64_to_cpup(const __be64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be16)*p; +#else + return (__force __be16)__swab16p(p); +#endif +} + +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be32)*p; +#else + return (__force __be32)__swab32p(p); +#endif +} + +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be64)*p; +#else + return (__force __be64)__swab64p(p); +#endif +} + +#ifdef __KERNEL__ + +# define le16_to_cpu __le16_to_cpu +# define le32_to_cpu __le32_to_cpu +# define le64_to_cpu __le64_to_cpu +# define be16_to_cpu __be16_to_cpu +# define be32_to_cpu __be32_to_cpu +# define be64_to_cpu __be64_to_cpu +# define cpu_to_le16 __cpu_to_le16 +# define cpu_to_le32 __cpu_to_le32 +# define cpu_to_le64 __cpu_to_le64 +# define cpu_to_be16 __cpu_to_be16 +# define cpu_to_be32 __cpu_to_be32 +# define cpu_to_be64 __cpu_to_be64 + +# define le16_to_cpup __le16_to_cpup +# define le32_to_cpup __le32_to_cpup +# define le64_to_cpup __le64_to_cpup +# define be16_to_cpup __be16_to_cpup +# define be32_to_cpup __be32_to_cpup +# define be64_to_cpup __be64_to_cpup +# define cpu_to_le16p __cpu_to_le16p +# define cpu_to_le32p __cpu_to_le32p +# define cpu_to_le64p __cpu_to_le64p +# define cpu_to_be16p __cpu_to_be16p +# define cpu_to_be32p __cpu_to_be32p +# define cpu_to_be64p __cpu_to_be64p + +# define le16_to_cpus __le16_to_cpus +# define le32_to_cpus __le32_to_cpus +# define le64_to_cpus __le64_to_cpus +# define be16_to_cpus __be16_to_cpus +# define be32_to_cpus __be32_to_cpus +# define be64_to_cpus __be64_to_cpus +# define cpu_to_le16s __cpu_to_le16s +# define cpu_to_le32s __cpu_to_le32s +# define cpu_to_le64s __cpu_to_le64s +# define cpu_to_be16s __cpu_to_be16s +# define cpu_to_be32s __cpu_to_be32s +# define cpu_to_be64s __cpu_to_be64s + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ +# undef ntohl +# undef ntohs +# undef htonl +# undef htons + +# define ___htonl(x) __cpu_to_be32(x) +# define ___htons(x) __cpu_to_be16(x) +# define ___ntohl(x) __be32_to_cpu(x) +# define ___ntohs(x) __be16_to_cpu(x) + +# define htonl(x) ___htonl(x) +# define ntohl(x) ___ntohl(x) +# define htons(x) ___htons(x) +# define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpup(var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpup(var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpup(var) + val); +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpup(var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpup(var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpup(var) + val); +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_BYTEORDER_H */ -- cgit v1.2.3-70-g09d2 From 5f8c3c8edff426fd87098f057688463107fcd9ce Mon Sep 17 00:00:00 2001 From: Michael Abbott Date: Tue, 12 Aug 2008 15:09:11 -0700 Subject: Make ioctl.h compatible with userland The attached patch seems to already exist in a number of branches -- it keeps popping up on Google for me, and is certainly already in Debian -- but is strangely absent from mainstream. The problem appears to be that the patched file ends up as part of the target toolchain, but unfortunately the gcc constant folding doesn't appear to eliminate the __invalid_size_argument_for_IOC value early enough. Certainly compiling C++ programs which use _IO... macros as constants fails without this patch. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/ioctl.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h index 86418138557..15828b2d663 100644 --- a/include/asm-generic/ioctl.h +++ b/include/asm-generic/ioctl.h @@ -68,12 +68,16 @@ ((nr) << _IOC_NRSHIFT) | \ ((size) << _IOC_SIZESHIFT)) +#ifdef __KERNEL__ /* provoke compile error for invalid uses of size argument */ extern unsigned int __invalid_size_argument_for_IOC; #define _IOC_TYPECHECK(t) \ ((sizeof(t) == sizeof(t[1]) && \ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ sizeof(t) : __invalid_size_argument_for_IOC) +#else +#define _IOC_TYPECHECK(t) (sizeof(t)) +#endif /* used to create numbers */ #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) -- cgit v1.2.3-70-g09d2 From 31bad9246b5e17d547430697791acca5e9712333 Mon Sep 17 00:00:00 2001 From: Bernhard Walle Date: Tue, 12 Aug 2008 15:09:14 -0700 Subject: firmware/memmap: cleanup Various cleanup the drivers/firmware/memmap (after review by AKPM): - fix kdoc to conform to the standard - move kdoc from header to implementation files - remove superfluous WARN_ON() after kmalloc() - WARN_ON(x); if (!x) -> if(!WARN_ON(x)) - improve some comments Signed-off-by: Bernhard Walle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/firmware/memmap.c | 61 +++++++++++++++++++++++++++++++------------- include/linux/firmware-map.h | 26 ------------------- 2 files changed, 43 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 001622eb86f..3bf8ee120d4 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -84,20 +84,23 @@ static struct kobj_type memmap_ktype = { */ /* - * Firmware memory map entries + * Firmware memory map entries. No locking is needed because the + * firmware_map_add() and firmware_map_add_early() functions are called + * in firmware initialisation code in one single thread of execution. */ static LIST_HEAD(map_entries); /** - * Common implementation of firmware_map_add() and firmware_map_add_early() - * which expects a pre-allocated struct firmware_map_entry. - * + * firmware_map_add_entry() - Does the real work to add a firmware memmap entry. * @start: Start of the memory range. * @end: End of the memory range (inclusive). * @type: Type of the memory range. * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised * entry. - */ + * + * Common implementation of firmware_map_add() and firmware_map_add_early() + * which expects a pre-allocated struct firmware_map_entry. + **/ static int firmware_map_add_entry(resource_size_t start, resource_size_t end, const char *type, struct firmware_map_entry *entry) @@ -115,33 +118,52 @@ static int firmware_map_add_entry(resource_size_t start, resource_size_t end, return 0; } -/* - * See for documentation. - */ +/** + * firmware_map_add() - Adds a firmware mapping entry. + * @start: Start of the memory range. + * @end: End of the memory range (inclusive). + * @type: Type of the memory range. + * + * This function uses kmalloc() for memory + * allocation. Use firmware_map_add_early() if you want to use the bootmem + * allocator. + * + * That function must be called before late_initcall. + * + * Returns 0 on success, or -ENOMEM if no memory could be allocated. + **/ int firmware_map_add(resource_size_t start, resource_size_t end, const char *type) { struct firmware_map_entry *entry; entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); - WARN_ON(!entry); if (!entry) return -ENOMEM; return firmware_map_add_entry(start, end, type, entry); } -/* - * See for documentation. - */ +/** + * firmware_map_add_early() - Adds a firmware mapping entry. + * @start: Start of the memory range. + * @end: End of the memory range (inclusive). + * @type: Type of the memory range. + * + * Adds a firmware mapping entry. This function uses the bootmem allocator + * for memory allocation. Use firmware_map_add() if you want to use kmalloc(). + * + * That function must be called before late_initcall. + * + * Returns 0 on success, or -ENOMEM if no memory could be allocated. + **/ int __init firmware_map_add_early(resource_size_t start, resource_size_t end, const char *type) { struct firmware_map_entry *entry; entry = alloc_bootmem_low(sizeof(struct firmware_map_entry)); - WARN_ON(!entry); - if (!entry) + if (WARN_ON(!entry)) return -ENOMEM; return firmware_map_add_entry(start, end, type, entry); @@ -183,7 +205,10 @@ static ssize_t memmap_attr_show(struct kobject *kobj, /* * Initialises stuff and adds the entries in the map_entries list to * sysfs. Important is that firmware_map_add() and firmware_map_add_early() - * must be called before late_initcall. + * must be called before late_initcall. That's just because that function + * is called as late_initcall() function, which means that if you call + * firmware_map_add() or firmware_map_add_early() afterwards, the entries + * are not added to sysfs. */ static int __init memmap_init(void) { @@ -192,13 +217,13 @@ static int __init memmap_init(void) struct kset *memmap_kset; memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj); - WARN_ON(!memmap_kset); - if (!memmap_kset) + if (WARN_ON(!memmap_kset)) return -ENOMEM; list_for_each_entry(entry, &map_entries, list) { entry->kobj.kset = memmap_kset; - kobject_add(&entry->kobj, NULL, "%d", i++); + if (kobject_add(&entry->kobj, NULL, "%d", i++)) + kobject_put(&entry->kobj); } return 0; diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index acbdbcc1605..6e199c8dfac 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h @@ -24,34 +24,8 @@ */ #ifdef CONFIG_FIRMWARE_MEMMAP -/** - * Adds a firmware mapping entry. This function uses kmalloc() for memory - * allocation. Use firmware_map_add_early() if you want to use the bootmem - * allocator. - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add(resource_size_t start, resource_size_t end, const char *type); - -/** - * Adds a firmware mapping entry. This function uses the bootmem allocator - * for memory allocation. Use firmware_map_add() if you want to use kmalloc(). - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add_early(resource_size_t start, resource_size_t end, const char *type); -- cgit v1.2.3-70-g09d2 From 39d2f1ab2a36ac527a6c41cfe689f50c239eaca3 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Wed, 13 Aug 2008 16:40:43 +1000 Subject: [XFS] extend completions to provide XFS object flush requirements XFS object flushing doesn't quite match existing completion semantics. It mixed exclusive access with completion. That is, we need to mark an object as being flushed before flushing it to disk, and then block any other attempt to flush it until the completion occurs. We do this but adding an extra count to the completion before we start using them. However, we still need to determine if there is a completion in progress, and allow no-blocking attempts fo completions to decrement the count. To do this we introduce: int try_wait_for_completion(struct completion *x) returns a failure status if done == 0, otherwise decrements done to zero and returns a "started" status. This is provided to allow counted completions to begin safely while holding object locks in inverted order. int completion_done(struct completion *x) returns 1 if there is no waiter, 0 if there is a waiter (i.e. a completion in progress). This replaces the use of semaphores for providing this exclusion and completion mechanism. SGI-PV: 981498 SGI-Modid: xfs-linux-melb:xfs-kern:31816a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- include/linux/completion.h | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'include') diff --git a/include/linux/completion.h b/include/linux/completion.h index d2961b66d53..57faa60de9b 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -55,4 +55,49 @@ extern void complete_all(struct completion *); #define INIT_COMPLETION(x) ((x).done = 0) + +/** + * try_wait_for_completion - try to decrement a completion without blocking + * @x: completion structure + * + * Returns: 0 if a decrement cannot be done without blocking + * 1 if a decrement succeeded. + * + * If a completion is being used as a counting completion, + * attempt to decrement the counter without blocking. This + * enables us to avoid waiting if the resource the completion + * is protecting is not available. + */ +static inline bool try_wait_for_completion(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + else + x->done--; + spin_unlock_irq(&x->wait.lock); + return ret; +} + +/** + * completion_done - Test to see if a completion has any waiters + * @x: completion structure + * + * Returns: 0 if there are waiters (wait_for_completion() in progress) + * 1 if there are no waiters. + * + */ +static inline bool completion_done(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + spin_unlock_irq(&x->wait.lock); + return ret; +} + #endif -- cgit v1.2.3-70-g09d2 From 83f36f3f35f4f83fa346bfff58a5deabc78370e5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 13 Aug 2008 02:13:34 -0700 Subject: pkt_sched: Add queue stopped test back to qdisc_run(). Based upon a bug report by Andrew Gallatin on netdev with subject "CPU utilization increased in 2.6.27rc" In commit 37437bb2e1ae8af470dfcd5b4ff454110894ccaf ("pkt_sched: Schedule qdiscs instead of netdev_queue.") the test of the queue being stopped was erroneously removed from qdisc_run(). When the TX queue of the device fills up, this omission causes lots of extraneous useless work to be queued up to softirq context, where we'll just return immediately because the device is still stuffed up. Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 6affcfaa123..853fe83d9f3 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -89,7 +89,10 @@ extern void __qdisc_run(struct Qdisc *q); static inline void qdisc_run(struct Qdisc *q) { - if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) + struct netdev_queue *txq = q->dev_queue; + + if (!netif_tx_queue_stopped(txq) && + !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) __qdisc_run(q); } -- cgit v1.2.3-70-g09d2 From 83ac794f15d2311d8e9e641c73618011f2f7f0a1 Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Wed, 13 Aug 2008 02:34:39 -0700 Subject: ipv6: ip6_route.h cleanup. This patch removes rt6_lock declaration from include/net/ip6_route.h as it is unused. Signed-off-by: Rami Rosen Signed-off-by: David S. Miller --- include/net/ip6_route.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 2f8b3c06a10..49d085649cc 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -118,7 +118,6 @@ extern int rt6_dump_route(struct rt6_info *rt, void *p_arg); extern void rt6_ifdown(struct net *net, struct net_device *dev); extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); -extern rwlock_t rt6_lock; /* * Store a destination cache entry in a socket -- cgit v1.2.3-70-g09d2 From 6bf90b2bf4084a64bbcf96a0b93dc64c77288028 Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Wed, 13 Aug 2008 02:35:39 -0700 Subject: ipv6: Kill unused ip6_prohibit_entry and ip6_blk_hole_entry declarations. This patch removes ip6_prohibit_entry and ip6_blk_hole_entry declarations from include/net/ip6_route.h as they are unused. Signed-off-by: Rami Rosen Signed-off-by: David S. Miller --- include/net/ip6_route.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include') diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 49d085649cc..bc391ba101e 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -38,11 +38,6 @@ struct route_info { #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 -#ifdef CONFIG_IPV6_MULTIPLE_TABLES -extern struct rt6_info *ip6_prohibit_entry; -extern struct rt6_info *ip6_blk_hole_entry; -#endif - extern void ip6_route_input(struct sk_buff *skb); extern struct dst_entry * ip6_route_output(struct net *net, -- cgit v1.2.3-70-g09d2 From 0ed89b06e49c326bff81d81f24b9ba955eb912d5 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 13 Aug 2008 10:17:24 +0200 Subject: x86: propagate new nonpanic bootmem macros to CONFIG_HAVE_ARCH_BOOTMEM_NODE Commit 74768ed833344b "page allocator: use no-panic variant of alloc_bootmem() in alloc_large_system_hash()" introduced two new _nopanic macros which are undefined for CONFIG_HAVE_ARCH_BOOTMEM_NODE. Signed-off-by: Johannes Weiner Acked-by: "Jan Beulich" Signed-off-by: Ingo Molnar --- include/asm-x86/mmzone_32.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index b2298a22756..5862e646065 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h @@ -97,10 +97,16 @@ static inline int pfn_valid(int pfn) reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags)) #define alloc_bootmem(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ + __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \ + __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) #define alloc_bootmem_node(pgdat, x) \ -- cgit v1.2.3-70-g09d2 From 318e5313923197e71a94f7b18835151649384b7f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 5 Aug 2008 13:34:30 +0800 Subject: crypto: hash - Add missing top-level functions The top-level functions init/update/final were missing for ahash. Signed-off-by: Herbert Xu --- include/crypto/hash.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'include') diff --git a/include/crypto/hash.h b/include/crypto/hash.h index d12498ec8a4..ee48ef8fb2e 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -101,6 +101,24 @@ static inline int crypto_ahash_digest(struct ahash_request *req) return crt->digest(req); } +static inline int crypto_ahash_init(struct ahash_request *req) +{ + struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); + return crt->init(req); +} + +static inline int crypto_ahash_update(struct ahash_request *req) +{ + struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); + return crt->update(req); +} + +static inline int crypto_ahash_final(struct ahash_request *req) +{ + struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); + return crt->final(req); +} + static inline void ahash_request_set_tfm(struct ahash_request *req, struct crypto_ahash *tfm) { -- cgit v1.2.3-70-g09d2 From e49140120c88eb99db1a9172d9ac224c0f2bbdd2 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Wed, 13 Aug 2008 22:02:26 +1000 Subject: crypto: padlock - fix VIA PadLock instruction usage with irq_ts_save/restore() Wolfgang Walter reported this oops on his via C3 using padlock for AES-encryption: ################################################################## BUG: unable to handle kernel NULL pointer dereference at 000001f0 IP: [] __switch_to+0x30/0x117 *pde = 00000000 Oops: 0002 [#1] PREEMPT Modules linked in: Pid: 2071, comm: sleep Not tainted (2.6.26 #11) EIP: 0060:[] EFLAGS: 00010002 CPU: 0 EIP is at __switch_to+0x30/0x117 EAX: 00000000 EBX: c0493300 ECX: dc48dd00 EDX: c0493300 ESI: dc48dd00 EDI: c0493530 EBP: c04cff8c ESP: c04cff7c DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068 Process sleep (pid: 2071, ti=c04ce000 task=dc48dd00 task.ti=d2fe6000) Stack: dc48df30 c0493300 00000000 00000000 d2fe7f44 c03b5b43 c04cffc8 00000046 c0131856 0000005a dc472d3c c0493300 c0493470 d983ae00 00002696 00000000 c0239f54 00000000 c04c4000 c04cffd8 c01025fe c04f3740 00049800 c04cffe0 Call Trace: [] ? schedule+0x285/0x2ff [] ? pm_qos_requirement+0x3c/0x53 [] ? acpi_processor_idle+0x0/0x434 [] ? cpu_idle+0x73/0x7f [] ? rest_init+0x61/0x63 ======================= Wolfgang also found out that adding kernel_fpu_begin() and kernel_fpu_end() around the padlock instructions fix the oops. Suresh wrote: These padlock instructions though don't use/touch SSE registers, but it behaves similar to other SSE instructions. For example, it might cause DNA faults when cr0.ts is set. While this is a spurious DNA trap, it might cause oops with the recent fpu code changes. This is the code sequence that is probably causing this problem: a) new app is getting exec'd and it is somewhere in between start_thread() and flush_old_exec() in the load_xyz_binary() b) At pont "a", task's fpu state (like TS_USEDFPU, used_math() etc) is cleared. c) Now we get an interrupt/softirq which starts using these encrypt/decrypt routines in the network stack. This generates a math fault (as cr0.ts is '1') which sets TS_USEDFPU and restores the math that is in the task's xstate. d) Return to exec code path, which does start_thread() which does free_thread_xstate() and sets xstate pointer to NULL while the TS_USEDFPU is still set. e) At the next context switch from the new exec'd task to another task, we have a scenarios where TS_USEDFPU is set but xstate pointer is null. This can cause an oops during unlazy_fpu() in __switch_to() Now: 1) This should happen with or with out pre-emption. Viro also encountered similar problem with out CONFIG_PREEMPT. 2) kernel_fpu_begin() and kernel_fpu_end() will fix this problem, because kernel_fpu_begin() will manually do a clts() and won't run in to the situation of setting TS_USEDFPU in step "c" above. 3) This was working before the fpu changes, because its a spurious math fault which doesn't corrupt any fpu/sse registers and the task's math state was always in an allocated state. With out the recent lazy fpu allocation changes, while we don't see oops, there is a possible race still present in older kernels(for example, while kernel is using kernel_fpu_begin() in some optimized clear/copy page and an interrupt/softirq happens which uses these padlock instructions generating DNA fault). This is the failing scenario that existed even before the lazy fpu allocation changes: 0. CPU's TS flag is set 1. kernel using FPU in some optimized copy routine and while doing kernel_fpu_begin() takes an interrupt just before doing clts() 2. Takes an interrupt and ipsec uses padlock instruction. And we take a DNA fault as TS flag is still set. 3. We handle the DNA fault and set TS_USEDFPU and clear cr0.ts 4. We complete the padlock routine 5. Go back to step-1, which resumes clts() in kernel_fpu_begin(), finishes the optimized copy routine and does kernel_fpu_end(). At this point, we have cr0.ts again set to '1' but the task's TS_USEFPU is stilll set and not cleared. 6. Now kernel resumes its user operation. And at the next context switch, kernel sees it has do a FP save as TS_USEDFPU is still set and then will do a unlazy_fpu() in __switch_to(). unlazy_fpu() will take a DNA fault, as cr0.ts is '1' and now, because we are in __switch_to(), math_state_restore() will get confused and will restore the next task's FP state and will save it in prev tasks's FP state. Remember, in __switch_to() we are already on the stack of the next task but take a DNA fault for the prev task. This causes the fpu leakage. Fix the padlock instruction usage by calling them inside the context of new routines irq_ts_save/restore(), which clear/restore cr0.ts manually in the interrupt context. This will not generate spurious DNA in the context of the interrupt which will fix the oops encountered and the possible FPU leakage issue. Reported-and-bisected-by: Wolfgang Walter Signed-off-by: Suresh Siddha Signed-off-by: Herbert Xu --- drivers/char/hw_random/via-rng.c | 8 ++++++++ drivers/crypto/padlock-aes.c | 28 +++++++++++++++++++++++++++- drivers/crypto/padlock-sha.c | 9 +++++++++ include/asm-x86/i387.h | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 76 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index f7feae4ebb5..128202e18fc 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -31,6 +31,7 @@ #include #include #include +#include #define PFX KBUILD_MODNAME ": " @@ -67,16 +68,23 @@ enum { * Another possible performance boost may come from simply buffering * until we have 4 bytes, thus returning a u32 at a time, * instead of the current u8-at-a-time. + * + * Padlock instructions can generate a spurious DNA fault, so + * we have to call them in the context of irq_ts_save/restore() */ static inline u32 xstore(u32 *addr, u32 edx_in) { u32 eax_out; + int ts_state; + + ts_state = irq_ts_save(); asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" :"=m"(*addr), "=a"(eax_out) :"D"(addr), "d"(edx_in)); + irq_ts_restore(ts_state); return eax_out; } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 54a2a166e56..bf2917d197a 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "padlock.h" /* Control word. */ @@ -141,6 +142,12 @@ static inline void padlock_reset_key(void) asm volatile ("pushfl; popfl"); } +/* + * While the padlock instructions don't use FP/SSE registers, they + * generate a spurious DNA fault when cr0.ts is '1'. These instructions + * should be used only inside the irq_ts_save/restore() context + */ + static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, void *control_word) { @@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); + int ts_state; padlock_reset_key(); + + ts_state = irq_ts_save(); aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); + irq_ts_restore(ts_state); } static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct aes_ctx *ctx = aes_ctx(tfm); + int ts_state; padlock_reset_key(); + + ts_state = irq_ts_save(); aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); + irq_ts_restore(ts_state); } static struct crypto_alg aes_alg = { @@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; + int ts_state; padlock_reset_key(); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, &ctx->cword.encrypt, @@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } + irq_ts_restore(ts_state); return err; } @@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; + int ts_state; padlock_reset_key(); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, &ctx->cword.decrypt, @@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - + irq_ts_restore(ts_state); return err; } @@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; + int ts_state; padlock_reset_key(); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->E, @@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc, nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } + irq_ts_restore(ts_state); return err; } @@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); struct blkcipher_walk walk; int err; + int ts_state; padlock_reset_key(); blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + ts_state = irq_ts_save(); while ((nbytes = walk.nbytes)) { padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, ctx->D, walk.iv, &ctx->cword.decrypt, @@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_done(desc, &walk, nbytes); } + irq_ts_restore(ts_state); return err; } diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 40d5680fa01..a7fbadebf62 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "padlock.h" #define SHA1_DEFAULT_FALLBACK "sha1-generic" @@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *in, char *out, int count) * PadLock microcode needs it that big. */ char buf[128+16]; char *result = NEAREST_ALIGNED(buf); + int ts_state; ((uint32_t *)result)[0] = SHA1_H0; ((uint32_t *)result)[1] = SHA1_H1; @@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *in, char *out, int count) ((uint32_t *)result)[3] = SHA1_H3; ((uint32_t *)result)[4] = SHA1_H4; + /* prevent taking the spurious DNA fault with padlock. */ + ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ : "+S"(in), "+D"(result) : "c"(count), "a"(0)); + irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); } @@ -123,6 +128,7 @@ static void padlock_do_sha256(const char *in, char *out, int count) * PadLock microcode needs it that big. */ char buf[128+16]; char *result = NEAREST_ALIGNED(buf); + int ts_state; ((uint32_t *)result)[0] = SHA256_H0; ((uint32_t *)result)[1] = SHA256_H1; @@ -133,9 +139,12 @@ static void padlock_do_sha256(const char *in, char *out, int count) ((uint32_t *)result)[6] = SHA256_H6; ((uint32_t *)result)[7] = SHA256_H7; + /* prevent taking the spurious DNA fault with padlock. */ + ts_state = irq_ts_save(); asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ : "+S"(in), "+D"(result) : "c"(count), "a"(0)); + irq_ts_restore(ts_state); padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); } diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 96fa8449ff1..6d3b2106341 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -236,6 +237,37 @@ static inline void kernel_fpu_end(void) preempt_enable(); } +/* + * Some instructions like VIA's padlock instructions generate a spurious + * DNA fault but don't modify SSE registers. And these instructions + * get used from interrupt context aswell. To prevent these kernel instructions + * in interrupt context interact wrongly with other user/kernel fpu usage, we + * should use them only in the context of irq_ts_save/restore() + */ +static inline int irq_ts_save(void) +{ + /* + * If we are in process context, we are ok to take a spurious DNA fault. + * Otherwise, doing clts() in process context require pre-emption to + * be disabled or some heavy lifting like kernel_fpu_begin() + */ + if (!in_interrupt()) + return 0; + + if (read_cr0() & X86_CR0_TS) { + clts(); + return 1; + } + + return 0; +} + +static inline void irq_ts_restore(int TS_state) +{ + if (TS_state) + stts(); +} + #ifdef CONFIG_X86_64 static inline void save_init_fpu(struct task_struct *tsk) -- cgit v1.2.3-70-g09d2 From 758db3f2118703a1e36374dae5d58bed963e7e0d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 13 Aug 2008 14:26:22 -0700 Subject: [h8300] move include/asm-h8300 to arch/h8300/include/asm Done as a script (well, a single "git mv" actually) on request from Yoshinori Sato as a way to avoid a huge diff. Requested-by: Yoshinori Sato Cc: Sam Ravnborg Cc: Andrew Morton Signed-off-by: Linus Torvalds --- arch/h8300/include/asm/Kbuild | 1 + arch/h8300/include/asm/a.out.h | 20 ++ arch/h8300/include/asm/atomic.h | 144 ++++++++++++ arch/h8300/include/asm/auxvec.h | 4 + arch/h8300/include/asm/bitops.h | 212 +++++++++++++++++ arch/h8300/include/asm/bootinfo.h | 2 + arch/h8300/include/asm/bug.h | 4 + arch/h8300/include/asm/bugs.h | 16 ++ arch/h8300/include/asm/byteorder.h | 13 ++ arch/h8300/include/asm/cache.h | 12 + arch/h8300/include/asm/cachectl.h | 14 ++ arch/h8300/include/asm/cacheflush.h | 39 ++++ arch/h8300/include/asm/checksum.h | 102 ++++++++ arch/h8300/include/asm/cputime.h | 6 + arch/h8300/include/asm/current.h | 25 ++ arch/h8300/include/asm/dbg.h | 2 + arch/h8300/include/asm/delay.h | 38 +++ arch/h8300/include/asm/device.h | 7 + arch/h8300/include/asm/div64.h | 1 + arch/h8300/include/asm/dma.h | 15 ++ arch/h8300/include/asm/elf.h | 104 +++++++++ arch/h8300/include/asm/emergency-restart.h | 6 + arch/h8300/include/asm/errno.h | 6 + arch/h8300/include/asm/fb.h | 12 + arch/h8300/include/asm/fcntl.h | 11 + arch/h8300/include/asm/flat.h | 27 +++ arch/h8300/include/asm/fpu.h | 1 + arch/h8300/include/asm/futex.h | 6 + arch/h8300/include/asm/gpio.h | 52 +++++ arch/h8300/include/asm/hardirq.h | 28 +++ arch/h8300/include/asm/hw_irq.h | 1 + arch/h8300/include/asm/io.h | 324 +++++++++++++++++++++++++ arch/h8300/include/asm/ioctl.h | 1 + arch/h8300/include/asm/ioctls.h | 85 +++++++ arch/h8300/include/asm/ipcbuf.h | 29 +++ arch/h8300/include/asm/irq.h | 49 ++++ arch/h8300/include/asm/irq_regs.h | 1 + arch/h8300/include/asm/kdebug.h | 1 + arch/h8300/include/asm/kmap_types.h | 21 ++ arch/h8300/include/asm/linkage.h | 8 + arch/h8300/include/asm/local.h | 6 + arch/h8300/include/asm/mc146818rtc.h | 9 + arch/h8300/include/asm/md.h | 13 ++ arch/h8300/include/asm/mman.h | 17 ++ arch/h8300/include/asm/mmu.h | 11 + arch/h8300/include/asm/mmu_context.h | 32 +++ arch/h8300/include/asm/module.h | 13 ++ arch/h8300/include/asm/msgbuf.h | 31 +++ arch/h8300/include/asm/mutex.h | 9 + arch/h8300/include/asm/page.h | 78 +++++++ arch/h8300/include/asm/page_offset.h | 3 + arch/h8300/include/asm/param.h | 20 ++ arch/h8300/include/asm/pci.h | 25 ++ arch/h8300/include/asm/percpu.h | 6 + arch/h8300/include/asm/pgalloc.h | 8 + arch/h8300/include/asm/pgtable.h | 73 ++++++ arch/h8300/include/asm/poll.h | 11 + arch/h8300/include/asm/posix_types.h | 60 +++++ arch/h8300/include/asm/processor.h | 140 +++++++++++ arch/h8300/include/asm/ptrace.h | 64 +++++ arch/h8300/include/asm/regs267x.h | 336 ++++++++++++++++++++++++++ arch/h8300/include/asm/regs306x.h | 212 +++++++++++++++++ arch/h8300/include/asm/resource.h | 6 + arch/h8300/include/asm/scatterlist.h | 18 ++ arch/h8300/include/asm/sections.h | 6 + arch/h8300/include/asm/segment.h | 49 ++++ arch/h8300/include/asm/sembuf.h | 25 ++ arch/h8300/include/asm/setup.h | 6 + arch/h8300/include/asm/sh_bios.h | 29 +++ arch/h8300/include/asm/shm.h | 31 +++ arch/h8300/include/asm/shmbuf.h | 42 ++++ arch/h8300/include/asm/shmparam.h | 6 + arch/h8300/include/asm/sigcontext.h | 18 ++ arch/h8300/include/asm/siginfo.h | 6 + arch/h8300/include/asm/signal.h | 161 +++++++++++++ arch/h8300/include/asm/smp.h | 1 + arch/h8300/include/asm/socket.h | 57 +++++ arch/h8300/include/asm/sockios.h | 13 ++ arch/h8300/include/asm/spinlock.h | 6 + arch/h8300/include/asm/stat.h | 78 +++++++ arch/h8300/include/asm/statfs.h | 6 + arch/h8300/include/asm/string.h | 44 ++++ arch/h8300/include/asm/system.h | 158 +++++++++++++ arch/h8300/include/asm/target_time.h | 4 + arch/h8300/include/asm/termbits.h | 200 ++++++++++++++++ arch/h8300/include/asm/termios.h | 92 ++++++++ arch/h8300/include/asm/thread_info.h | 104 +++++++++ arch/h8300/include/asm/timex.h | 19 ++ arch/h8300/include/asm/tlb.h | 23 ++ arch/h8300/include/asm/tlbflush.h | 55 +++++ arch/h8300/include/asm/topology.h | 6 + arch/h8300/include/asm/traps.h | 37 +++ arch/h8300/include/asm/types.h | 33 +++ arch/h8300/include/asm/uaccess.h | 162 +++++++++++++ arch/h8300/include/asm/ucontext.h | 12 + arch/h8300/include/asm/unaligned.h | 11 + arch/h8300/include/asm/unistd.h | 364 +++++++++++++++++++++++++++++ arch/h8300/include/asm/user.h | 75 ++++++ arch/h8300/include/asm/virtconvert.h | 20 ++ include/asm-h8300/Kbuild | 1 - include/asm-h8300/a.out.h | 20 -- include/asm-h8300/atomic.h | 144 ------------ include/asm-h8300/auxvec.h | 4 - include/asm-h8300/bitops.h | 212 ----------------- include/asm-h8300/bootinfo.h | 2 - include/asm-h8300/bug.h | 4 - include/asm-h8300/bugs.h | 16 -- include/asm-h8300/byteorder.h | 13 -- include/asm-h8300/cache.h | 12 - include/asm-h8300/cachectl.h | 14 -- include/asm-h8300/cacheflush.h | 39 ---- include/asm-h8300/checksum.h | 102 -------- include/asm-h8300/cputime.h | 6 - include/asm-h8300/current.h | 25 -- include/asm-h8300/dbg.h | 2 - include/asm-h8300/delay.h | 38 --- include/asm-h8300/device.h | 7 - include/asm-h8300/div64.h | 1 - include/asm-h8300/dma.h | 15 -- include/asm-h8300/elf.h | 104 --------- include/asm-h8300/emergency-restart.h | 6 - include/asm-h8300/errno.h | 6 - include/asm-h8300/fb.h | 12 - include/asm-h8300/fcntl.h | 11 - include/asm-h8300/flat.h | 27 --- include/asm-h8300/fpu.h | 1 - include/asm-h8300/futex.h | 6 - include/asm-h8300/gpio.h | 52 ----- include/asm-h8300/hardirq.h | 28 --- include/asm-h8300/hw_irq.h | 1 - include/asm-h8300/io.h | 324 ------------------------- include/asm-h8300/ioctl.h | 1 - include/asm-h8300/ioctls.h | 85 ------- include/asm-h8300/ipcbuf.h | 29 --- include/asm-h8300/irq.h | 49 ---- include/asm-h8300/irq_regs.h | 1 - include/asm-h8300/kdebug.h | 1 - include/asm-h8300/kmap_types.h | 21 -- include/asm-h8300/linkage.h | 8 - include/asm-h8300/local.h | 6 - include/asm-h8300/mc146818rtc.h | 9 - include/asm-h8300/md.h | 13 -- include/asm-h8300/mman.h | 17 -- include/asm-h8300/mmu.h | 11 - include/asm-h8300/mmu_context.h | 32 --- include/asm-h8300/module.h | 13 -- include/asm-h8300/msgbuf.h | 31 --- include/asm-h8300/mutex.h | 9 - include/asm-h8300/page.h | 78 ------- include/asm-h8300/page_offset.h | 3 - include/asm-h8300/param.h | 20 -- include/asm-h8300/pci.h | 25 -- include/asm-h8300/percpu.h | 6 - include/asm-h8300/pgalloc.h | 8 - include/asm-h8300/pgtable.h | 73 ------ include/asm-h8300/poll.h | 11 - include/asm-h8300/posix_types.h | 60 ----- include/asm-h8300/processor.h | 140 ----------- include/asm-h8300/ptrace.h | 64 ----- include/asm-h8300/regs267x.h | 336 -------------------------- include/asm-h8300/regs306x.h | 212 ----------------- include/asm-h8300/resource.h | 6 - include/asm-h8300/scatterlist.h | 18 -- include/asm-h8300/sections.h | 6 - include/asm-h8300/segment.h | 49 ---- include/asm-h8300/sembuf.h | 25 -- include/asm-h8300/setup.h | 6 - include/asm-h8300/sh_bios.h | 29 --- include/asm-h8300/shm.h | 31 --- include/asm-h8300/shmbuf.h | 42 ---- include/asm-h8300/shmparam.h | 6 - include/asm-h8300/sigcontext.h | 18 -- include/asm-h8300/siginfo.h | 6 - include/asm-h8300/signal.h | 161 ------------- include/asm-h8300/smp.h | 1 - include/asm-h8300/socket.h | 57 ----- include/asm-h8300/sockios.h | 13 -- include/asm-h8300/spinlock.h | 6 - include/asm-h8300/stat.h | 78 ------- include/asm-h8300/statfs.h | 6 - include/asm-h8300/string.h | 44 ---- include/asm-h8300/system.h | 158 ------------- include/asm-h8300/target_time.h | 4 - include/asm-h8300/termbits.h | 200 ---------------- include/asm-h8300/termios.h | 92 -------- include/asm-h8300/thread_info.h | 104 --------- include/asm-h8300/timex.h | 19 -- include/asm-h8300/tlb.h | 23 -- include/asm-h8300/tlbflush.h | 55 ----- include/asm-h8300/topology.h | 6 - include/asm-h8300/traps.h | 37 --- include/asm-h8300/types.h | 33 --- include/asm-h8300/uaccess.h | 162 ------------- include/asm-h8300/ucontext.h | 12 - include/asm-h8300/unaligned.h | 11 - include/asm-h8300/unistd.h | 364 ----------------------------- include/asm-h8300/user.h | 75 ------ include/asm-h8300/virtconvert.h | 20 -- 198 files changed, 4610 insertions(+), 4610 deletions(-) create mode 100644 arch/h8300/include/asm/Kbuild create mode 100644 arch/h8300/include/asm/a.out.h create mode 100644 arch/h8300/include/asm/atomic.h create mode 100644 arch/h8300/include/asm/auxvec.h create mode 100644 arch/h8300/include/asm/bitops.h create mode 100644 arch/h8300/include/asm/bootinfo.h create mode 100644 arch/h8300/include/asm/bug.h create mode 100644 arch/h8300/include/asm/bugs.h create mode 100644 arch/h8300/include/asm/byteorder.h create mode 100644 arch/h8300/include/asm/cache.h create mode 100644 arch/h8300/include/asm/cachectl.h create mode 100644 arch/h8300/include/asm/cacheflush.h create mode 100644 arch/h8300/include/asm/checksum.h create mode 100644 arch/h8300/include/asm/cputime.h create mode 100644 arch/h8300/include/asm/current.h create mode 100644 arch/h8300/include/asm/dbg.h create mode 100644 arch/h8300/include/asm/delay.h create mode 100644 arch/h8300/include/asm/device.h create mode 100644 arch/h8300/include/asm/div64.h create mode 100644 arch/h8300/include/asm/dma.h create mode 100644 arch/h8300/include/asm/elf.h create mode 100644 arch/h8300/include/asm/emergency-restart.h create mode 100644 arch/h8300/include/asm/errno.h create mode 100644 arch/h8300/include/asm/fb.h create mode 100644 arch/h8300/include/asm/fcntl.h create mode 100644 arch/h8300/include/asm/flat.h create mode 100644 arch/h8300/include/asm/fpu.h create mode 100644 arch/h8300/include/asm/futex.h create mode 100644 arch/h8300/include/asm/gpio.h create mode 100644 arch/h8300/include/asm/hardirq.h create mode 100644 arch/h8300/include/asm/hw_irq.h create mode 100644 arch/h8300/include/asm/io.h create mode 100644 arch/h8300/include/asm/ioctl.h create mode 100644 arch/h8300/include/asm/ioctls.h create mode 100644 arch/h8300/include/asm/ipcbuf.h create mode 100644 arch/h8300/include/asm/irq.h create mode 100644 arch/h8300/include/asm/irq_regs.h create mode 100644 arch/h8300/include/asm/kdebug.h create mode 100644 arch/h8300/include/asm/kmap_types.h create mode 100644 arch/h8300/include/asm/linkage.h create mode 100644 arch/h8300/include/asm/local.h create mode 100644 arch/h8300/include/asm/mc146818rtc.h create mode 100644 arch/h8300/include/asm/md.h create mode 100644 arch/h8300/include/asm/mman.h create mode 100644 arch/h8300/include/asm/mmu.h create mode 100644 arch/h8300/include/asm/mmu_context.h create mode 100644 arch/h8300/include/asm/module.h create mode 100644 arch/h8300/include/asm/msgbuf.h create mode 100644 arch/h8300/include/asm/mutex.h create mode 100644 arch/h8300/include/asm/page.h create mode 100644 arch/h8300/include/asm/page_offset.h create mode 100644 arch/h8300/include/asm/param.h create mode 100644 arch/h8300/include/asm/pci.h create mode 100644 arch/h8300/include/asm/percpu.h create mode 100644 arch/h8300/include/asm/pgalloc.h create mode 100644 arch/h8300/include/asm/pgtable.h create mode 100644 arch/h8300/include/asm/poll.h create mode 100644 arch/h8300/include/asm/posix_types.h create mode 100644 arch/h8300/include/asm/processor.h create mode 100644 arch/h8300/include/asm/ptrace.h create mode 100644 arch/h8300/include/asm/regs267x.h create mode 100644 arch/h8300/include/asm/regs306x.h create mode 100644 arch/h8300/include/asm/resource.h create mode 100644 arch/h8300/include/asm/scatterlist.h create mode 100644 arch/h8300/include/asm/sections.h create mode 100644 arch/h8300/include/asm/segment.h create mode 100644 arch/h8300/include/asm/sembuf.h create mode 100644 arch/h8300/include/asm/setup.h create mode 100644 arch/h8300/include/asm/sh_bios.h create mode 100644 arch/h8300/include/asm/shm.h create mode 100644 arch/h8300/include/asm/shmbuf.h create mode 100644 arch/h8300/include/asm/shmparam.h create mode 100644 arch/h8300/include/asm/sigcontext.h create mode 100644 arch/h8300/include/asm/siginfo.h create mode 100644 arch/h8300/include/asm/signal.h create mode 100644 arch/h8300/include/asm/smp.h create mode 100644 arch/h8300/include/asm/socket.h create mode 100644 arch/h8300/include/asm/sockios.h create mode 100644 arch/h8300/include/asm/spinlock.h create mode 100644 arch/h8300/include/asm/stat.h create mode 100644 arch/h8300/include/asm/statfs.h create mode 100644 arch/h8300/include/asm/string.h create mode 100644 arch/h8300/include/asm/system.h create mode 100644 arch/h8300/include/asm/target_time.h create mode 100644 arch/h8300/include/asm/termbits.h create mode 100644 arch/h8300/include/asm/termios.h create mode 100644 arch/h8300/include/asm/thread_info.h create mode 100644 arch/h8300/include/asm/timex.h create mode 100644 arch/h8300/include/asm/tlb.h create mode 100644 arch/h8300/include/asm/tlbflush.h create mode 100644 arch/h8300/include/asm/topology.h create mode 100644 arch/h8300/include/asm/traps.h create mode 100644 arch/h8300/include/asm/types.h create mode 100644 arch/h8300/include/asm/uaccess.h create mode 100644 arch/h8300/include/asm/ucontext.h create mode 100644 arch/h8300/include/asm/unaligned.h create mode 100644 arch/h8300/include/asm/unistd.h create mode 100644 arch/h8300/include/asm/user.h create mode 100644 arch/h8300/include/asm/virtconvert.h delete mode 100644 include/asm-h8300/Kbuild delete mode 100644 include/asm-h8300/a.out.h delete mode 100644 include/asm-h8300/atomic.h delete mode 100644 include/asm-h8300/auxvec.h delete mode 100644 include/asm-h8300/bitops.h delete mode 100644 include/asm-h8300/bootinfo.h delete mode 100644 include/asm-h8300/bug.h delete mode 100644 include/asm-h8300/bugs.h delete mode 100644 include/asm-h8300/byteorder.h delete mode 100644 include/asm-h8300/cache.h delete mode 100644 include/asm-h8300/cachectl.h delete mode 100644 include/asm-h8300/cacheflush.h delete mode 100644 include/asm-h8300/checksum.h delete mode 100644 include/asm-h8300/cputime.h delete mode 100644 include/asm-h8300/current.h delete mode 100644 include/asm-h8300/dbg.h delete mode 100644 include/asm-h8300/delay.h delete mode 100644 include/asm-h8300/device.h delete mode 100644 include/asm-h8300/div64.h delete mode 100644 include/asm-h8300/dma.h delete mode 100644 include/asm-h8300/elf.h delete mode 100644 include/asm-h8300/emergency-restart.h delete mode 100644 include/asm-h8300/errno.h delete mode 100644 include/asm-h8300/fb.h delete mode 100644 include/asm-h8300/fcntl.h delete mode 100644 include/asm-h8300/flat.h delete mode 100644 include/asm-h8300/fpu.h delete mode 100644 include/asm-h8300/futex.h delete mode 100644 include/asm-h8300/gpio.h delete mode 100644 include/asm-h8300/hardirq.h delete mode 100644 include/asm-h8300/hw_irq.h delete mode 100644 include/asm-h8300/io.h delete mode 100644 include/asm-h8300/ioctl.h delete mode 100644 include/asm-h8300/ioctls.h delete mode 100644 include/asm-h8300/ipcbuf.h delete mode 100644 include/asm-h8300/irq.h delete mode 100644 include/asm-h8300/irq_regs.h delete mode 100644 include/asm-h8300/kdebug.h delete mode 100644 include/asm-h8300/kmap_types.h delete mode 100644 include/asm-h8300/linkage.h delete mode 100644 include/asm-h8300/local.h delete mode 100644 include/asm-h8300/mc146818rtc.h delete mode 100644 include/asm-h8300/md.h delete mode 100644 include/asm-h8300/mman.h delete mode 100644 include/asm-h8300/mmu.h delete mode 100644 include/asm-h8300/mmu_context.h delete mode 100644 include/asm-h8300/module.h delete mode 100644 include/asm-h8300/msgbuf.h delete mode 100644 include/asm-h8300/mutex.h delete mode 100644 include/asm-h8300/page.h delete mode 100644 include/asm-h8300/page_offset.h delete mode 100644 include/asm-h8300/param.h delete mode 100644 include/asm-h8300/pci.h delete mode 100644 include/asm-h8300/percpu.h delete mode 100644 include/asm-h8300/pgalloc.h delete mode 100644 include/asm-h8300/pgtable.h delete mode 100644 include/asm-h8300/poll.h delete mode 100644 include/asm-h8300/posix_types.h delete mode 100644 include/asm-h8300/processor.h delete mode 100644 include/asm-h8300/ptrace.h delete mode 100644 include/asm-h8300/regs267x.h delete mode 100644 include/asm-h8300/regs306x.h delete mode 100644 include/asm-h8300/resource.h delete mode 100644 include/asm-h8300/scatterlist.h delete mode 100644 include/asm-h8300/sections.h delete mode 100644 include/asm-h8300/segment.h delete mode 100644 include/asm-h8300/sembuf.h delete mode 100644 include/asm-h8300/setup.h delete mode 100644 include/asm-h8300/sh_bios.h delete mode 100644 include/asm-h8300/shm.h delete mode 100644 include/asm-h8300/shmbuf.h delete mode 100644 include/asm-h8300/shmparam.h delete mode 100644 include/asm-h8300/sigcontext.h delete mode 100644 include/asm-h8300/siginfo.h delete mode 100644 include/asm-h8300/signal.h delete mode 100644 include/asm-h8300/smp.h delete mode 100644 include/asm-h8300/socket.h delete mode 100644 include/asm-h8300/sockios.h delete mode 100644 include/asm-h8300/spinlock.h delete mode 100644 include/asm-h8300/stat.h delete mode 100644 include/asm-h8300/statfs.h delete mode 100644 include/asm-h8300/string.h delete mode 100644 include/asm-h8300/system.h delete mode 100644 include/asm-h8300/target_time.h delete mode 100644 include/asm-h8300/termbits.h delete mode 100644 include/asm-h8300/termios.h delete mode 100644 include/asm-h8300/thread_info.h delete mode 100644 include/asm-h8300/timex.h delete mode 100644 include/asm-h8300/tlb.h delete mode 100644 include/asm-h8300/tlbflush.h delete mode 100644 include/asm-h8300/topology.h delete mode 100644 include/asm-h8300/traps.h delete mode 100644 include/asm-h8300/types.h delete mode 100644 include/asm-h8300/uaccess.h delete mode 100644 include/asm-h8300/ucontext.h delete mode 100644 include/asm-h8300/unaligned.h delete mode 100644 include/asm-h8300/unistd.h delete mode 100644 include/asm-h8300/user.h delete mode 100644 include/asm-h8300/virtconvert.h (limited to 'include') diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild new file mode 100644 index 00000000000..c68e1680da0 --- /dev/null +++ b/arch/h8300/include/asm/Kbuild @@ -0,0 +1 @@ +include include/asm-generic/Kbuild.asm diff --git a/arch/h8300/include/asm/a.out.h b/arch/h8300/include/asm/a.out.h new file mode 100644 index 00000000000..ded780f0a49 --- /dev/null +++ b/arch/h8300/include/asm/a.out.h @@ -0,0 +1,20 @@ +#ifndef __H8300_A_OUT_H__ +#define __H8300_A_OUT_H__ + +struct exec +{ + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned a_text; /* length of text, in bytes */ + unsigned a_data; /* length of data, in bytes */ + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned a_syms; /* length of symbol table data in file, in bytes */ + unsigned a_entry; /* start address */ + unsigned a_trsize; /* length of relocation info for text, in bytes */ + unsigned a_drsize; /* length of relocation info for data, in bytes */ +}; + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#endif /* __H8300_A_OUT_H__ */ diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h new file mode 100644 index 00000000000..b4cf0ea97ed --- /dev/null +++ b/arch/h8300/include/asm/atomic.h @@ -0,0 +1,144 @@ +#ifndef __ARCH_H8300_ATOMIC__ +#define __ARCH_H8300_ATOMIC__ + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + */ + +typedef struct { int counter; } atomic_t; +#define ATOMIC_INIT(i) { (i) } + +#define atomic_read(v) ((v)->counter) +#define atomic_set(v, i) (((v)->counter) = i) + +#include +#include + +static __inline__ int atomic_add_return(int i, atomic_t *v) +{ + int ret,flags; + local_irq_save(flags); + ret = v->counter += i; + local_irq_restore(flags); + return ret; +} + +#define atomic_add(i, v) atomic_add_return(i, v) +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) + +static __inline__ int atomic_sub_return(int i, atomic_t *v) +{ + int ret,flags; + local_irq_save(flags); + ret = v->counter -= i; + local_irq_restore(flags); + return ret; +} + +#define atomic_sub(i, v) atomic_sub_return(i, v) +#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0) + +static __inline__ int atomic_inc_return(atomic_t *v) +{ + int ret,flags; + local_irq_save(flags); + v->counter++; + ret = v->counter; + local_irq_restore(flags); + return ret; +} + +#define atomic_inc(v) atomic_inc_return(v) + +/* + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +static __inline__ int atomic_dec_return(atomic_t *v) +{ + int ret,flags; + local_irq_save(flags); + --v->counter; + ret = v->counter; + local_irq_restore(flags); + return ret; +} + +#define atomic_dec(v) atomic_dec_return(v) + +static __inline__ int atomic_dec_and_test(atomic_t *v) +{ + int ret,flags; + local_irq_save(flags); + --v->counter; + ret = v->counter; + local_irq_restore(flags); + return ret == 0; +} + +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (likely(ret == old)) + v->counter = new; + local_irq_restore(flags); + return ret; +} + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int atomic_add_unless(atomic_t *v, int a, int u) +{ + int ret; + unsigned long flags; + + local_irq_save(flags); + ret = v->counter; + if (ret != u) + v->counter += a; + local_irq_restore(flags); + return ret != u; +} +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) +{ + __asm__ __volatile__("stc ccr,r1l\n\t" + "orc #0x80,ccr\n\t" + "mov.l %0,er0\n\t" + "and.l %1,er0\n\t" + "mov.l er0,%0\n\t" + "ldc r1l,ccr" + : "=m" (*v) : "g" (~(mask)) :"er0","er1"); +} + +static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) +{ + __asm__ __volatile__("stc ccr,r1l\n\t" + "orc #0x80,ccr\n\t" + "mov.l %0,er0\n\t" + "or.l %1,er0\n\t" + "mov.l er0,%0\n\t" + "ldc r1l,ccr" + : "=m" (*v) : "g" (mask) :"er0","er1"); +} + +/* Atomic operations are already serializing */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#include +#endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/arch/h8300/include/asm/auxvec.h b/arch/h8300/include/asm/auxvec.h new file mode 100644 index 00000000000..1d36fe38b08 --- /dev/null +++ b/arch/h8300/include/asm/auxvec.h @@ -0,0 +1,4 @@ +#ifndef __ASMH8300_AUXVEC_H +#define __ASMH8300_AUXVEC_H + +#endif diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h new file mode 100644 index 00000000000..cb18e3b0aa9 --- /dev/null +++ b/arch/h8300/include/asm/bitops.h @@ -0,0 +1,212 @@ +#ifndef _H8300_BITOPS_H +#define _H8300_BITOPS_H + +/* + * Copyright 1992, Linus Torvalds. + * Copyright 2002, Yoshinori Sato + */ + +#include +#include + +#ifdef __KERNEL__ + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +/* + * Function prototypes to keep gcc -Wall happy + */ + +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + */ +static __inline__ unsigned long ffz(unsigned long word) +{ + unsigned long result; + + result = -1; + __asm__("1:\n\t" + "shlr.l %2\n\t" + "adds #1,%0\n\t" + "bcs 1b" + : "=r" (result) + : "0" (result),"r" (word)); + return result; +} + +#define H8300_GEN_BITOP_CONST(OP,BIT) \ + case BIT: \ + __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \ + break; + +#define H8300_GEN_BITOP(FNAME,OP) \ +static __inline__ void FNAME(int nr, volatile unsigned long* addr) \ +{ \ + volatile unsigned char *b_addr; \ + b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ + if (__builtin_constant_p(nr)) { \ + switch(nr & 7) { \ + H8300_GEN_BITOP_CONST(OP,0) \ + H8300_GEN_BITOP_CONST(OP,1) \ + H8300_GEN_BITOP_CONST(OP,2) \ + H8300_GEN_BITOP_CONST(OP,3) \ + H8300_GEN_BITOP_CONST(OP,4) \ + H8300_GEN_BITOP_CONST(OP,5) \ + H8300_GEN_BITOP_CONST(OP,6) \ + H8300_GEN_BITOP_CONST(OP,7) \ + } \ + } else { \ + __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \ + } \ +} + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +H8300_GEN_BITOP(set_bit ,"bset") +H8300_GEN_BITOP(clear_bit ,"bclr") +H8300_GEN_BITOP(change_bit,"bnot") +#define __set_bit(nr,addr) set_bit((nr),(addr)) +#define __clear_bit(nr,addr) clear_bit((nr),(addr)) +#define __change_bit(nr,addr) change_bit((nr),(addr)) + +#undef H8300_GEN_BITOP +#undef H8300_GEN_BITOP_CONST + +static __inline__ int test_bit(int nr, const unsigned long* addr) +{ + return (*((volatile unsigned char *)addr + + ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0; +} + +#define __test_bit(nr, addr) test_bit(nr, addr) + +#define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \ + case BIT: \ + __asm__("stc ccr,%w1\n\t" \ + "orc #0x80,ccr\n\t" \ + "bld #" #BIT ",@%4\n\t" \ + OP " #" #BIT ",@%4\n\t" \ + "rotxl.l %0\n\t" \ + "ldc %w1,ccr" \ + : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ + : "0" (retval),"r" (b_addr) \ + : "memory"); \ + break; + +#define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \ + case BIT: \ + __asm__("bld #" #BIT ",@%3\n\t" \ + OP " #" #BIT ",@%3\n\t" \ + "rotxl.l %0\n\t" \ + : "=r"(retval),"=m"(*b_addr) \ + : "0" (retval),"r" (b_addr) \ + : "memory"); \ + break; + +#define H8300_GEN_TEST_BITOP(FNNAME,OP) \ +static __inline__ int FNNAME(int nr, volatile void * addr) \ +{ \ + int retval = 0; \ + char ccrsave; \ + volatile unsigned char *b_addr; \ + b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ + if (__builtin_constant_p(nr)) { \ + switch(nr & 7) { \ + H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \ + H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \ + H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \ + H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \ + H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \ + H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \ + H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \ + H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \ + } \ + } else { \ + __asm__("stc ccr,%w1\n\t" \ + "orc #0x80,ccr\n\t" \ + "btst %w5,@%4\n\t" \ + OP " %w5,@%4\n\t" \ + "beq 1f\n\t" \ + "inc.l #1,%0\n" \ + "1:\n\t" \ + "ldc %w1,ccr" \ + : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ + : "0" (retval),"r" (b_addr),"r"(nr) \ + : "memory"); \ + } \ + return retval; \ +} \ + \ +static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \ +{ \ + int retval = 0; \ + volatile unsigned char *b_addr; \ + b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ + if (__builtin_constant_p(nr)) { \ + switch(nr & 7) { \ + H8300_GEN_TEST_BITOP_CONST(OP,0) \ + H8300_GEN_TEST_BITOP_CONST(OP,1) \ + H8300_GEN_TEST_BITOP_CONST(OP,2) \ + H8300_GEN_TEST_BITOP_CONST(OP,3) \ + H8300_GEN_TEST_BITOP_CONST(OP,4) \ + H8300_GEN_TEST_BITOP_CONST(OP,5) \ + H8300_GEN_TEST_BITOP_CONST(OP,6) \ + H8300_GEN_TEST_BITOP_CONST(OP,7) \ + } \ + } else { \ + __asm__("btst %w4,@%3\n\t" \ + OP " %w4,@%3\n\t" \ + "beq 1f\n\t" \ + "inc.l #1,%0\n" \ + "1:" \ + : "=r"(retval),"=m"(*b_addr) \ + : "0" (retval),"r" (b_addr),"r"(nr) \ + : "memory"); \ + } \ + return retval; \ +} + +H8300_GEN_TEST_BITOP(test_and_set_bit, "bset") +H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr") +H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") +#undef H8300_GEN_TEST_BITOP_CONST +#undef H8300_GEN_TEST_BITOP_CONST_INT +#undef H8300_GEN_TEST_BITOP + +#include + +static __inline__ unsigned long __ffs(unsigned long word) +{ + unsigned long result; + + result = -1; + __asm__("1:\n\t" + "shlr.l %2\n\t" + "adds #1,%0\n\t" + "bcc 1b" + : "=r" (result) + : "0"(result),"r"(word)); + return result; +} + +#include +#include +#include +#include +#include +#include +#include + +#endif /* __KERNEL__ */ + +#include +#include + +#endif /* _H8300_BITOPS_H */ diff --git a/arch/h8300/include/asm/bootinfo.h b/arch/h8300/include/asm/bootinfo.h new file mode 100644 index 00000000000..5bed7e7aac0 --- /dev/null +++ b/arch/h8300/include/asm/bootinfo.h @@ -0,0 +1,2 @@ + +/* Nothing for h8300 */ diff --git a/arch/h8300/include/asm/bug.h b/arch/h8300/include/asm/bug.h new file mode 100644 index 00000000000..edddf5b086e --- /dev/null +++ b/arch/h8300/include/asm/bug.h @@ -0,0 +1,4 @@ +#ifndef _H8300_BUG_H +#define _H8300_BUG_H +#include +#endif diff --git a/arch/h8300/include/asm/bugs.h b/arch/h8300/include/asm/bugs.h new file mode 100644 index 00000000000..1cb4afba6eb --- /dev/null +++ b/arch/h8300/include/asm/bugs.h @@ -0,0 +1,16 @@ +/* + * include/asm-h8300/bugs.h + * + * Copyright (C) 1994 Linus Torvalds + */ + +/* + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Needs: + * void check_bugs(void); + */ + +static void check_bugs(void) +{ +} diff --git a/arch/h8300/include/asm/byteorder.h b/arch/h8300/include/asm/byteorder.h new file mode 100644 index 00000000000..36e597d6161 --- /dev/null +++ b/arch/h8300/include/asm/byteorder.h @@ -0,0 +1,13 @@ +#ifndef _H8300_BYTEORDER_H +#define _H8300_BYTEORDER_H + +#include + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) +# define __BYTEORDER_HAS_U64__ +# define __SWAB_64_THRU_32__ +#endif + +#include + +#endif /* _H8300_BYTEORDER_H */ diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h new file mode 100644 index 00000000000..c6350283649 --- /dev/null +++ b/arch/h8300/include/asm/cache.h @@ -0,0 +1,12 @@ +#ifndef __ARCH_H8300_CACHE_H +#define __ARCH_H8300_CACHE_H + +/* bytes per L1 cache line */ +#define L1_CACHE_BYTES 4 + +/* m68k-elf-gcc 2.95.2 doesn't like these */ + +#define __cacheline_aligned +#define ____cacheline_aligned + +#endif diff --git a/arch/h8300/include/asm/cachectl.h b/arch/h8300/include/asm/cachectl.h new file mode 100644 index 00000000000..c464022d8e2 --- /dev/null +++ b/arch/h8300/include/asm/cachectl.h @@ -0,0 +1,14 @@ +#ifndef _H8300_CACHECTL_H +#define _H8300_CACHECTL_H + +/* Definitions for the cacheflush system call. */ + +#define FLUSH_SCOPE_LINE 0 /* Flush a cache line */ +#define FLUSH_SCOPE_PAGE 0 /* Flush a page */ +#define FLUSH_SCOPE_ALL 0 /* Flush the whole cache -- superuser only */ + +#define FLUSH_CACHE_DATA 0 /* Writeback and flush data cache */ +#define FLUSH_CACHE_INSN 0 /* Flush instruction cache */ +#define FLUSH_CACHE_BOTH 0 /* Flush both caches */ + +#endif /* _H8300_CACHECTL_H */ diff --git a/arch/h8300/include/asm/cacheflush.h b/arch/h8300/include/asm/cacheflush.h new file mode 100644 index 00000000000..5ffdca217b9 --- /dev/null +++ b/arch/h8300/include/asm/cacheflush.h @@ -0,0 +1,39 @@ +/* + * (C) Copyright 2002, Yoshinori Sato + */ + +#ifndef _ASM_H8300_CACHEFLUSH_H +#define _ASM_H8300_CACHEFLUSH_H + +/* + * Cache handling functions + * No Cache memory all dummy functions + */ + +#define flush_cache_all() +#define flush_cache_mm(mm) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma,a,b) +#define flush_cache_page(vma,p,pfn) +#define flush_dcache_page(page) +#define flush_dcache_mmap_lock(mapping) +#define flush_dcache_mmap_unlock(mapping) +#define flush_icache() +#define flush_icache_page(vma,page) +#define flush_icache_range(start,len) +#define flush_cache_vmap(start, end) +#define flush_cache_vunmap(start, end) +#define cache_push_v(vaddr,len) +#define cache_push(paddr,len) +#define cache_clear(paddr,len) + +#define flush_dcache_range(a,b) + +#define flush_icache_user_range(vma,page,addr,len) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* _ASM_H8300_CACHEFLUSH_H */ diff --git a/arch/h8300/include/asm/checksum.h b/arch/h8300/include/asm/checksum.h new file mode 100644 index 00000000000..98724e12508 --- /dev/null +++ b/arch/h8300/include/asm/checksum.h @@ -0,0 +1,102 @@ +#ifndef _H8300_CHECKSUM_H +#define _H8300_CHECKSUM_H + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); + + +/* + * the same as csum_partial_copy, but copies from user space. + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ + +extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *csum_err); + +__sum16 ip_fast_csum(const void *iph, unsigned int ihl); + + +/* + * Fold a partial checksum + */ + +static inline __sum16 csum_fold(__wsum sum) +{ + __asm__("mov.l %0,er0\n\t" + "add.w e0,r0\n\t" + "xor.w e0,e0\n\t" + "rotxl.w e0\n\t" + "add.w e0,r0\n\t" + "sub.w e0,e0\n\t" + "mov.l er0,%0" + : "=r"(sum) + : "0"(sum) + : "er0"); + return (__force __sum16)~sum; +} + + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ + +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + __asm__ ("sub.l er0,er0\n\t" + "add.l %2,%0\n\t" + "addx #0,r0l\n\t" + "add.l %3,%0\n\t" + "addx #0,r0l\n\t" + "add.l %4,%0\n\t" + "addx #0,r0l\n\t" + "add.l er0,%0\n\t" + "bcc 1f\n\t" + "inc.l #1,%0\n" + "1:" + : "=&r" (sum) + : "0" (sum), "r" (daddr), "r" (saddr), "r" (len + proto) + :"er0"); + return sum; +} + +static inline __sum16 +csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +extern __sum16 ip_compute_csum(const void *buff, int len); + +#endif /* _H8300_CHECKSUM_H */ diff --git a/arch/h8300/include/asm/cputime.h b/arch/h8300/include/asm/cputime.h new file mode 100644 index 00000000000..092e187c7b0 --- /dev/null +++ b/arch/h8300/include/asm/cputime.h @@ -0,0 +1,6 @@ +#ifndef __H8300_CPUTIME_H +#define __H8300_CPUTIME_H + +#include + +#endif /* __H8300_CPUTIME_H */ diff --git a/arch/h8300/include/asm/current.h b/arch/h8300/include/asm/current.h new file mode 100644 index 00000000000..57d74ee55a1 --- /dev/null +++ b/arch/h8300/include/asm/current.h @@ -0,0 +1,25 @@ +#ifndef _H8300_CURRENT_H +#define _H8300_CURRENT_H +/* + * current.h + * (C) Copyright 2000, Lineo, David McCullough + * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) + * + * rather than dedicate a register (as the m68k source does), we + * just keep a global, we should probably just change it all to be + * current and lose _current_task. + */ + +#include +#include + +struct task_struct; + +static inline struct task_struct *get_current(void) +{ + return(current_thread_info()->task); +} + +#define current get_current() + +#endif /* _H8300_CURRENT_H */ diff --git a/arch/h8300/include/asm/dbg.h b/arch/h8300/include/asm/dbg.h new file mode 100644 index 00000000000..2c6d1cbcf73 --- /dev/null +++ b/arch/h8300/include/asm/dbg.h @@ -0,0 +1,2 @@ +#define DEBUG 1 +#define BREAK asm volatile ("trap #3") diff --git a/arch/h8300/include/asm/delay.h b/arch/h8300/include/asm/delay.h new file mode 100644 index 00000000000..743beba70f8 --- /dev/null +++ b/arch/h8300/include/asm/delay.h @@ -0,0 +1,38 @@ +#ifndef _H8300_DELAY_H +#define _H8300_DELAY_H + +#include + +/* + * Copyright (C) 2002 Yoshinori Sato + * + * Delay routines, using a pre-computed "loops_per_second" value. + */ + +static inline void __delay(unsigned long loops) +{ + __asm__ __volatile__ ("1:\n\t" + "dec.l #1,%0\n\t" + "bne 1b" + :"=r" (loops):"0"(loops)); +} + +/* + * Use only for very small delays ( < 1 msec). Should probably use a + * lookup table, really, as the multiplications take much too long with + * short delays. This is a "reasonable" implementation, though (and the + * first constant multiplications gets optimized away if the delay is + * a constant) + */ + +extern unsigned long loops_per_jiffy; + +static inline void udelay(unsigned long usecs) +{ + usecs *= 4295; /* 2**32 / 1000000 */ + usecs /= (loops_per_jiffy*HZ); + if (usecs) + __delay(usecs); +} + +#endif /* _H8300_DELAY_H */ diff --git a/arch/h8300/include/asm/device.h b/arch/h8300/include/asm/device.h new file mode 100644 index 00000000000..d8f9872b0e2 --- /dev/null +++ b/arch/h8300/include/asm/device.h @@ -0,0 +1,7 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#include + diff --git a/arch/h8300/include/asm/div64.h b/arch/h8300/include/asm/div64.h new file mode 100644 index 00000000000..6cd978cefb2 --- /dev/null +++ b/arch/h8300/include/asm/div64.h @@ -0,0 +1 @@ +#include diff --git a/arch/h8300/include/asm/dma.h b/arch/h8300/include/asm/dma.h new file mode 100644 index 00000000000..3edbaaaedf5 --- /dev/null +++ b/arch/h8300/include/asm/dma.h @@ -0,0 +1,15 @@ +#ifndef _H8300_DMA_H +#define _H8300_DMA_H + + +/* + * Set number of channels of DMA on ColdFire for different implementations. + */ +#define MAX_DMA_CHANNELS 0 +#define MAX_DMA_ADDRESS PAGE_OFFSET + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ + +#endif /* _H8300_DMA_H */ diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h new file mode 100644 index 00000000000..a8b57d1f412 --- /dev/null +++ b/arch/h8300/include/asm/elf.h @@ -0,0 +1,104 @@ +#ifndef __ASMH8300_ELF_H +#define __ASMH8300_ELF_H + +/* + * ELF register definitions.. + */ + +#include +#include + +typedef unsigned long elf_greg_t; + +#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; +typedef unsigned long elf_fpregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_H8_300) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#define ELF_DATA ELFDATA2MSB +#define ELF_ARCH EM_H8_300 +#if defined(__H8300H__) +#define ELF_CORE_EFLAGS 0x810000 +#endif +#if defined(__H8300S__) +#define ELF_CORE_EFLAGS 0x820000 +#endif + +#define ELF_PLAT_INIT(_r) _r->er1 = 0 + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE 0xD0000000UL + +/* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. */ + +#define ELF_PLATFORM (NULL) + +#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX) + +#define R_H8_NONE 0 +#define R_H8_DIR32 1 +#define R_H8_DIR32_28 2 +#define R_H8_DIR32_24 3 +#define R_H8_DIR32_16 4 +#define R_H8_DIR32U 6 +#define R_H8_DIR32U_28 7 +#define R_H8_DIR32U_24 8 +#define R_H8_DIR32U_20 9 +#define R_H8_DIR32U_16 10 +#define R_H8_DIR24 11 +#define R_H8_DIR24_20 12 +#define R_H8_DIR24_16 13 +#define R_H8_DIR24U 14 +#define R_H8_DIR24U_20 15 +#define R_H8_DIR24U_16 16 +#define R_H8_DIR16 17 +#define R_H8_DIR16U 18 +#define R_H8_DIR16S_32 19 +#define R_H8_DIR16S_28 20 +#define R_H8_DIR16S_24 21 +#define R_H8_DIR16S_20 22 +#define R_H8_DIR16S 23 +#define R_H8_DIR8 24 +#define R_H8_DIR8U 25 +#define R_H8_DIR8Z_32 26 +#define R_H8_DIR8Z_28 27 +#define R_H8_DIR8Z_24 28 +#define R_H8_DIR8Z_20 29 +#define R_H8_DIR8Z_16 30 +#define R_H8_PCREL16 31 +#define R_H8_PCREL8 32 +#define R_H8_BPOS 33 +#define R_H8_PCREL32 34 +#define R_H8_GOT32O 35 +#define R_H8_GOT16O 36 +#define R_H8_DIR16A8 59 +#define R_H8_DIR16R8 60 +#define R_H8_DIR24A8 61 +#define R_H8_DIR24R8 62 +#define R_H8_DIR32A16 63 +#define R_H8_ABS32 65 +#define R_H8_ABS32A16 127 + +#endif diff --git a/arch/h8300/include/asm/emergency-restart.h b/arch/h8300/include/asm/emergency-restart.h new file mode 100644 index 00000000000..108d8c48e42 --- /dev/null +++ b/arch/h8300/include/asm/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +#include + +#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/h8300/include/asm/errno.h b/arch/h8300/include/asm/errno.h new file mode 100644 index 00000000000..0c2f5641fdc --- /dev/null +++ b/arch/h8300/include/asm/errno.h @@ -0,0 +1,6 @@ +#ifndef _H8300_ERRNO_H +#define _H8300_ERRNO_H + +#include + +#endif /* _H8300_ERRNO_H */ diff --git a/arch/h8300/include/asm/fb.h b/arch/h8300/include/asm/fb.h new file mode 100644 index 00000000000..c7df3803099 --- /dev/null +++ b/arch/h8300/include/asm/fb.h @@ -0,0 +1,12 @@ +#ifndef _ASM_FB_H_ +#define _ASM_FB_H_ +#include + +#define fb_pgprotect(...) do {} while (0) + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* _ASM_FB_H_ */ diff --git a/arch/h8300/include/asm/fcntl.h b/arch/h8300/include/asm/fcntl.h new file mode 100644 index 00000000000..1952cb2e3b0 --- /dev/null +++ b/arch/h8300/include/asm/fcntl.h @@ -0,0 +1,11 @@ +#ifndef _H8300_FCNTL_H +#define _H8300_FCNTL_H + +#define O_DIRECTORY 040000 /* must be a directory */ +#define O_NOFOLLOW 0100000 /* don't follow links */ +#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ +#define O_LARGEFILE 0400000 + +#include + +#endif /* _H8300_FCNTL_H */ diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h new file mode 100644 index 00000000000..2a873508a9a --- /dev/null +++ b/arch/h8300/include/asm/flat.h @@ -0,0 +1,27 @@ +/* + * include/asm-h8300/flat.h -- uClinux flat-format executables + */ + +#ifndef __H8300_FLAT_H__ +#define __H8300_FLAT_H__ + +#define flat_stack_align(sp) /* nothing needed */ +#define flat_argvp_envp_on_stack() 1 +#define flat_old_ram_flag(flags) 1 +#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) +#define flat_set_persistent(relval, p) 0 + +/* + * on the H8 a couple of the relocations have an instruction in the + * top byte. As there can only be 24bits of address space, we just + * always preserve that 8bits at the top, when it isn't an instruction + * is is 0 (davidm@snapgear.com) + */ + +#define flat_get_relocate_addr(rel) (rel) +#define flat_get_addr_from_rp(rp, relval, flags, persistent) \ + (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff)) +#define flat_put_addr_at_rp(rp, addr, rel) \ + put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp) + +#endif /* __H8300_FLAT_H__ */ diff --git a/arch/h8300/include/asm/fpu.h b/arch/h8300/include/asm/fpu.h new file mode 100644 index 00000000000..4fc416e80be --- /dev/null +++ b/arch/h8300/include/asm/fpu.h @@ -0,0 +1 @@ +/* Nothing do */ diff --git a/arch/h8300/include/asm/futex.h b/arch/h8300/include/asm/futex.h new file mode 100644 index 00000000000..6a332a9f099 --- /dev/null +++ b/arch/h8300/include/asm/futex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#include + +#endif diff --git a/arch/h8300/include/asm/gpio.h b/arch/h8300/include/asm/gpio.h new file mode 100644 index 00000000000..a714f0c0efb --- /dev/null +++ b/arch/h8300/include/asm/gpio.h @@ -0,0 +1,52 @@ +#ifndef _H8300_GPIO_H +#define _H8300_GPIO_H + +#define H8300_GPIO_P1 0 +#define H8300_GPIO_P2 1 +#define H8300_GPIO_P3 2 +#define H8300_GPIO_P4 3 +#define H8300_GPIO_P5 4 +#define H8300_GPIO_P6 5 +#define H8300_GPIO_P7 6 +#define H8300_GPIO_P8 7 +#define H8300_GPIO_P9 8 +#define H8300_GPIO_PA 9 +#define H8300_GPIO_PB 10 +#define H8300_GPIO_PC 11 +#define H8300_GPIO_PD 12 +#define H8300_GPIO_PE 13 +#define H8300_GPIO_PF 14 +#define H8300_GPIO_PG 15 +#define H8300_GPIO_PH 16 + +#define H8300_GPIO_B7 0x80 +#define H8300_GPIO_B6 0x40 +#define H8300_GPIO_B5 0x20 +#define H8300_GPIO_B4 0x10 +#define H8300_GPIO_B3 0x08 +#define H8300_GPIO_B2 0x04 +#define H8300_GPIO_B1 0x02 +#define H8300_GPIO_B0 0x01 + +#define H8300_GPIO_INPUT 0 +#define H8300_GPIO_OUTPUT 1 + +#define H8300_GPIO_RESERVE(port, bits) \ + h8300_reserved_gpio(port, bits) + +#define H8300_GPIO_FREE(port, bits) \ + h8300_free_gpio(port, bits) + +#define H8300_GPIO_DDR(port, bit, dir) \ + h8300_set_gpio_dir(((port) << 8) | (bit), dir) + +#define H8300_GPIO_GETDIR(port, bit) \ + h8300_get_gpio_dir(((port) << 8) | (bit)) + +extern int h8300_reserved_gpio(int port, int bits); +extern int h8300_free_gpio(int port, int bits); +extern int h8300_set_gpio_dir(int port_bit, int dir); +extern int h8300_get_gpio_dir(int port_bit); +extern int h8300_init_gpio(void); + +#endif diff --git a/arch/h8300/include/asm/hardirq.h b/arch/h8300/include/asm/hardirq.h new file mode 100644 index 00000000000..9d7f7a7462b --- /dev/null +++ b/arch/h8300/include/asm/hardirq.h @@ -0,0 +1,28 @@ +#ifndef __H8300_HARDIRQ_H +#define __H8300_HARDIRQ_H + +#include +#include +#include +#include + +typedef struct { + unsigned int __softirq_pending; +} ____cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ + +extern void ack_bad_irq(unsigned int irq); + +#define HARDIRQ_BITS 8 + +/* + * The hardirq mask has to be large enough to have + * space for potentially all IRQ sources in the system + * nesting on a single CPU: + */ +#if (1 << HARDIRQ_BITS) < NR_IRQS +# error HARDIRQ_BITS is too low! +#endif + +#endif diff --git a/arch/h8300/include/asm/hw_irq.h b/arch/h8300/include/asm/hw_irq.h new file mode 100644 index 00000000000..d75a5a1119e --- /dev/null +++ b/arch/h8300/include/asm/hw_irq.h @@ -0,0 +1 @@ +/* Do Nothing */ diff --git a/arch/h8300/include/asm/io.h b/arch/h8300/include/asm/io.h new file mode 100644 index 00000000000..26dc6ccd944 --- /dev/null +++ b/arch/h8300/include/asm/io.h @@ -0,0 +1,324 @@ +#ifndef _H8300_IO_H +#define _H8300_IO_H + +#ifdef __KERNEL__ + +#include + +#if defined(CONFIG_H83007) || defined(CONFIG_H83068) +#include +#elif defined(CONFIG_H8S2678) +#include +#else +#error UNKNOWN CPU TYPE +#endif + + +/* + * These are for ISA/PCI shared memory _only_ and should never be used + * on any other type of memory, including Zorro memory. They are meant to + * access the bus in the bus byte order which is little-endian!. + * + * readX/writeX() are used to access memory mapped devices. On some + * architectures the memory mapped IO stuff needs to be accessed + * differently. On the m68k architecture, we just read/write the + * memory location directly. + */ +/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates + * two accesses to memory, which may be undesireable for some devices. + */ + +/* + * swap functions are sometimes needed to interface little-endian hardware + */ + +static inline unsigned short _swapw(volatile unsigned short v) +{ +#ifndef H8300_IO_NOSWAP + unsigned short r; + __asm__("xor.b %w0,%x0\n\t" + "xor.b %x0,%w0\n\t" + "xor.b %w0,%x0" + :"=r"(r) + :"0"(v)); + return r; +#else + return v; +#endif +} + +static inline unsigned long _swapl(volatile unsigned long v) +{ +#ifndef H8300_IO_NOSWAP + unsigned long r; + __asm__("xor.b %w0,%x0\n\t" + "xor.b %x0,%w0\n\t" + "xor.b %w0,%x0\n\t" + "xor.w %e0,%f0\n\t" + "xor.w %f0,%e0\n\t" + "xor.w %e0,%f0\n\t" + "xor.b %w0,%x0\n\t" + "xor.b %x0,%w0\n\t" + "xor.b %w0,%x0" + :"=r"(r) + :"0"(v)); + return r; +#else + return v; +#endif +} + +#define readb(addr) \ + ({ unsigned char __v = \ + *(volatile unsigned char *)((unsigned long)(addr) & 0x00ffffff); \ + __v; }) +#define readw(addr) \ + ({ unsigned short __v = \ + *(volatile unsigned short *)((unsigned long)(addr) & 0x00ffffff); \ + __v; }) +#define readl(addr) \ + ({ unsigned long __v = \ + *(volatile unsigned long *)((unsigned long)(addr) & 0x00ffffff); \ + __v; }) + +#define writeb(b,addr) (void)((*(volatile unsigned char *) \ + ((unsigned long)(addr) & 0x00ffffff)) = (b)) +#define writew(b,addr) (void)((*(volatile unsigned short *) \ + ((unsigned long)(addr) & 0x00ffffff)) = (b)) +#define writel(b,addr) (void)((*(volatile unsigned long *) \ + ((unsigned long)(addr) & 0x00ffffff)) = (b)) +#define readb_relaxed(addr) readb(addr) +#define readw_relaxed(addr) readw(addr) +#define readl_relaxed(addr) readl(addr) + +#define __raw_readb readb +#define __raw_readw readw +#define __raw_readl readl +#define __raw_writeb writeb +#define __raw_writew writew +#define __raw_writel writel + +static inline int h8300_buswidth(unsigned int addr) +{ + return (*(volatile unsigned char *)ABWCR & (1 << ((addr >> 21) & 7))) == 0; +} + +static inline void io_outsb(unsigned int addr, const void *buf, int len) +{ + volatile unsigned char *ap_b = (volatile unsigned char *) addr; + volatile unsigned short *ap_w = (volatile unsigned short *) addr; + unsigned char *bp = (unsigned char *) buf; + + if(h8300_buswidth(addr) && (addr & 1)) { + while (len--) + *ap_w = *bp++; + } else { + while (len--) + *ap_b = *bp++; + } +} + +static inline void io_outsw(unsigned int addr, const void *buf, int len) +{ + volatile unsigned short *ap = (volatile unsigned short *) addr; + unsigned short *bp = (unsigned short *) buf; + while (len--) + *ap = _swapw(*bp++); +} + +static inline void io_outsl(unsigned int addr, const void *buf, int len) +{ + volatile unsigned long *ap = (volatile unsigned long *) addr; + unsigned long *bp = (unsigned long *) buf; + while (len--) + *ap = _swapl(*bp++); +} + +static inline void io_outsw_noswap(unsigned int addr, const void *buf, int len) +{ + volatile unsigned short *ap = (volatile unsigned short *) addr; + unsigned short *bp = (unsigned short *) buf; + while (len--) + *ap = *bp++; +} + +static inline void io_outsl_noswap(unsigned int addr, const void *buf, int len) +{ + volatile unsigned long *ap = (volatile unsigned long *) addr; + unsigned long *bp = (unsigned long *) buf; + while (len--) + *ap = *bp++; +} + +static inline void io_insb(unsigned int addr, void *buf, int len) +{ + volatile unsigned char *ap_b; + volatile unsigned short *ap_w; + unsigned char *bp = (unsigned char *) buf; + + if(h8300_buswidth(addr)) { + ap_w = (volatile unsigned short *)(addr & ~1); + while (len--) + *bp++ = *ap_w & 0xff; + } else { + ap_b = (volatile unsigned char *)addr; + while (len--) + *bp++ = *ap_b; + } +} + +static inline void io_insw(unsigned int addr, void *buf, int len) +{ + volatile unsigned short *ap = (volatile unsigned short *) addr; + unsigned short *bp = (unsigned short *) buf; + while (len--) + *bp++ = _swapw(*ap); +} + +static inline void io_insl(unsigned int addr, void *buf, int len) +{ + volatile unsigned long *ap = (volatile unsigned long *) addr; + unsigned long *bp = (unsigned long *) buf; + while (len--) + *bp++ = _swapl(*ap); +} + +static inline void io_insw_noswap(unsigned int addr, void *buf, int len) +{ + volatile unsigned short *ap = (volatile unsigned short *) addr; + unsigned short *bp = (unsigned short *) buf; + while (len--) + *bp++ = *ap; +} + +static inline void io_insl_noswap(unsigned int addr, void *buf, int len) +{ + volatile unsigned long *ap = (volatile unsigned long *) addr; + unsigned long *bp = (unsigned long *) buf; + while (len--) + *bp++ = *ap; +} + +/* + * make the short names macros so specific devices + * can override them as required + */ + +#define memset_io(a,b,c) memset((void *)(a),(b),(c)) +#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) +#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) + +#define mmiowb() + +#define inb(addr) ((h8300_buswidth(addr))?readw((addr) & ~1) & 0xff:readb(addr)) +#define inw(addr) _swapw(readw(addr)) +#define inl(addr) _swapl(readl(addr)) +#define outb(x,addr) ((void)((h8300_buswidth(addr) && \ + ((addr) & 1))?writew(x,(addr) & ~1):writeb(x,addr))) +#define outw(x,addr) ((void) writew(_swapw(x),addr)) +#define outl(x,addr) ((void) writel(_swapl(x),addr)) + +#define inb_p(addr) inb(addr) +#define inw_p(addr) inw(addr) +#define inl_p(addr) inl(addr) +#define outb_p(x,addr) outb(x,addr) +#define outw_p(x,addr) outw(x,addr) +#define outl_p(x,addr) outl(x,addr) + +#define outsb(a,b,l) io_outsb(a,b,l) +#define outsw(a,b,l) io_outsw(a,b,l) +#define outsl(a,b,l) io_outsl(a,b,l) + +#define insb(a,b,l) io_insb(a,b,l) +#define insw(a,b,l) io_insw(a,b,l) +#define insl(a,b,l) io_insl(a,b,l) + +#define IO_SPACE_LIMIT 0xffffff + + +/* Values for nocacheflag and cmode */ +#define IOMAP_FULL_CACHING 0 +#define IOMAP_NOCACHE_SER 1 +#define IOMAP_NOCACHE_NONSER 2 +#define IOMAP_WRITETHROUGH 3 + +extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag); +extern void __iounmap(void *addr, unsigned long size); + +static inline void *ioremap(unsigned long physaddr, unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); +} +static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); +} +static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); +} +static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size) +{ + return __ioremap(physaddr, size, IOMAP_FULL_CACHING); +} + +extern void iounmap(void *addr); + +/* H8/300 internal I/O functions */ +static __inline__ unsigned char ctrl_inb(unsigned long addr) +{ + return *(volatile unsigned char*)addr; +} + +static __inline__ unsigned short ctrl_inw(unsigned long addr) +{ + return *(volatile unsigned short*)addr; +} + +static __inline__ unsigned long ctrl_inl(unsigned long addr) +{ + return *(volatile unsigned long*)addr; +} + +static __inline__ void ctrl_outb(unsigned char b, unsigned long addr) +{ + *(volatile unsigned char*)addr = b; +} + +static __inline__ void ctrl_outw(unsigned short b, unsigned long addr) +{ + *(volatile unsigned short*)addr = b; +} + +static __inline__ void ctrl_outl(unsigned long b, unsigned long addr) +{ + *(volatile unsigned long*)addr = b; +} + +/* Pages to physical address... */ +#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) +#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT) + +/* + * Macros used for converting between virtual and physical mappings. + */ +#define phys_to_virt(vaddr) ((void *) (vaddr)) +#define virt_to_phys(vaddr) ((unsigned long) (vaddr)) + +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +#endif /* __KERNEL__ */ + +#endif /* _H8300_IO_H */ diff --git a/arch/h8300/include/asm/ioctl.h b/arch/h8300/include/asm/ioctl.h new file mode 100644 index 00000000000..b279fe06dfe --- /dev/null +++ b/arch/h8300/include/asm/ioctl.h @@ -0,0 +1 @@ +#include diff --git a/arch/h8300/include/asm/ioctls.h b/arch/h8300/include/asm/ioctls.h new file mode 100644 index 00000000000..98a53d06726 --- /dev/null +++ b/arch/h8300/include/asm/ioctls.h @@ -0,0 +1,85 @@ +#ifndef __ARCH_H8300_IOCTLS_H__ +#define __ARCH_H8300_IOCTLS_H__ + +#include + +/* 0x54 is just a magic number to make these relatively unique ('T') */ + +#define TCGETS 0x5401 +#define TCSETS 0x5402 +#define TCSETSW 0x5403 +#define TCSETSF 0x5404 +#define TCGETA 0x5405 +#define TCSETA 0x5406 +#define TCSETAW 0x5407 +#define TCSETAF 0x5408 +#define TCSBRK 0x5409 +#define TCXONC 0x540A +#define TCFLSH 0x540B +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E +#define TIOCGPGRP 0x540F +#define TIOCSPGRP 0x5410 +#define TIOCOUTQ 0x5411 +#define TIOCSTI 0x5412 +#define TIOCGWINSZ 0x5413 +#define TIOCSWINSZ 0x5414 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define FIONREAD 0x541B +#define TIOCINQ FIONREAD +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +#define FIONBIO 0x5421 +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TCGETS2 _IOR('T',0x2A, struct termios2) +#define TCSETS2 _IOW('T',0x2B, struct termios2) +#define TCSETSW2 _IOW('T',0x2C, struct termios2) +#define TCSETSF2 _IOW('T',0x2D, struct termios2) +#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ + +#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ +#define FIOCLEX 0x5451 +#define FIOASYNC 0x5452 +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define FIOQSIZE 0x545E + +/* Used for packet mode */ +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 + +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ + +#endif /* __ARCH_H8300_IOCTLS_H__ */ diff --git a/arch/h8300/include/asm/ipcbuf.h b/arch/h8300/include/asm/ipcbuf.h new file mode 100644 index 00000000000..2cd1ebcc109 --- /dev/null +++ b/arch/h8300/include/asm/ipcbuf.h @@ -0,0 +1,29 @@ +#ifndef __H8300_IPCBUF_H__ +#define __H8300_IPCBUF_H__ + +/* + * The user_ipc_perm structure for H8/300 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit mode_t and seq + * - 2 miscellaneous 32-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned short __pad1; + unsigned short seq; + unsigned short __pad2; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* __H8300_IPCBUF_H__ */ diff --git a/arch/h8300/include/asm/irq.h b/arch/h8300/include/asm/irq.h new file mode 100644 index 00000000000..13d7c601cd0 --- /dev/null +++ b/arch/h8300/include/asm/irq.h @@ -0,0 +1,49 @@ +#ifndef _H8300_IRQ_H_ +#define _H8300_IRQ_H_ + +#include + +#if defined(CONFIG_CPU_H8300H) +#define NR_IRQS 64 +#define EXT_IRQ0 12 +#define EXT_IRQ1 13 +#define EXT_IRQ2 14 +#define EXT_IRQ3 15 +#define EXT_IRQ4 16 +#define EXT_IRQ5 17 +#define EXT_IRQ6 18 +#define EXT_IRQ7 19 +#define EXT_IRQS 5 +#define IER_REGS *(volatile unsigned char *)IER +#endif +#if defined(CONFIG_CPU_H8S) +#define NR_IRQS 128 +#define EXT_IRQ0 16 +#define EXT_IRQ1 17 +#define EXT_IRQ2 18 +#define EXT_IRQ3 19 +#define EXT_IRQ4 20 +#define EXT_IRQ5 21 +#define EXT_IRQ6 22 +#define EXT_IRQ7 23 +#define EXT_IRQ8 24 +#define EXT_IRQ9 25 +#define EXT_IRQ10 26 +#define EXT_IRQ11 27 +#define EXT_IRQ12 28 +#define EXT_IRQ13 29 +#define EXT_IRQ14 30 +#define EXT_IRQ15 31 +#define EXT_IRQS 15 + +#define IER_REGS *(volatile unsigned short *)IER +#endif + +static __inline__ int irq_canonicalize(int irq) +{ + return irq; +} + +typedef void (*h8300_vector)(void); + +#endif /* _H8300_IRQ_H_ */ diff --git a/arch/h8300/include/asm/irq_regs.h b/arch/h8300/include/asm/irq_regs.h new file mode 100644 index 00000000000..3dd9c0b7027 --- /dev/null +++ b/arch/h8300/include/asm/irq_regs.h @@ -0,0 +1 @@ +#include diff --git a/arch/h8300/include/asm/kdebug.h b/arch/h8300/include/asm/kdebug.h new file mode 100644 index 00000000000..6ece1b03766 --- /dev/null +++ b/arch/h8300/include/asm/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/arch/h8300/include/asm/kmap_types.h b/arch/h8300/include/asm/kmap_types.h new file mode 100644 index 00000000000..1ec8a342712 --- /dev/null +++ b/arch/h8300/include/asm/kmap_types.h @@ -0,0 +1,21 @@ +#ifndef _ASM_H8300_KMAP_TYPES_H +#define _ASM_H8300_KMAP_TYPES_H + +enum km_type { + KM_BOUNCE_READ, + KM_SKB_SUNRPC_DATA, + KM_SKB_DATA_SOFTIRQ, + KM_USER0, + KM_USER1, + KM_BIO_SRC_IRQ, + KM_BIO_DST_IRQ, + KM_PTE0, + KM_PTE1, + KM_IRQ0, + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, + KM_TYPE_NR +}; + +#endif diff --git a/arch/h8300/include/asm/linkage.h b/arch/h8300/include/asm/linkage.h new file mode 100644 index 00000000000..6f4df7d4618 --- /dev/null +++ b/arch/h8300/include/asm/linkage.h @@ -0,0 +1,8 @@ +#ifndef _H8300_LINKAGE_H +#define _H8300_LINKAGE_H + +#undef SYMBOL_NAME_LABEL +#undef SYMBOL_NAME +#define SYMBOL_NAME_LABEL(_name_) _##_name_##: +#define SYMBOL_NAME(_name_) _##_name_ +#endif diff --git a/arch/h8300/include/asm/local.h b/arch/h8300/include/asm/local.h new file mode 100644 index 00000000000..fdd4efe437c --- /dev/null +++ b/arch/h8300/include/asm/local.h @@ -0,0 +1,6 @@ +#ifndef _H8300_LOCAL_H_ +#define _H8300_LOCAL_H_ + +#include + +#endif diff --git a/arch/h8300/include/asm/mc146818rtc.h b/arch/h8300/include/asm/mc146818rtc.h new file mode 100644 index 00000000000..ab9d9646d24 --- /dev/null +++ b/arch/h8300/include/asm/mc146818rtc.h @@ -0,0 +1,9 @@ +/* + * Machine dependent access functions for RTC registers. + */ +#ifndef _H8300_MC146818RTC_H +#define _H8300_MC146818RTC_H + +/* empty include file to satisfy the include in genrtc.c/ide-geometry.c */ + +#endif /* _H8300_MC146818RTC_H */ diff --git a/arch/h8300/include/asm/md.h b/arch/h8300/include/asm/md.h new file mode 100644 index 00000000000..1a47dc6691f --- /dev/null +++ b/arch/h8300/include/asm/md.h @@ -0,0 +1,13 @@ +/* $Id: md.h,v 1.1 2002/11/19 02:09:26 gerg Exp $ + * md.h: High speed xor_block operation for RAID4/5 + * + */ + +#ifndef __ASM_MD_H +#define __ASM_MD_H + +/* #define HAVE_ARCH_XORBLOCK */ + +#define MD_XORBLOCK_ALIGNMENT sizeof(long) + +#endif /* __ASM_MD_H */ diff --git a/arch/h8300/include/asm/mman.h b/arch/h8300/include/asm/mman.h new file mode 100644 index 00000000000..b9f104f22a3 --- /dev/null +++ b/arch/h8300/include/asm/mman.h @@ -0,0 +1,17 @@ +#ifndef __H8300_MMAN_H__ +#define __H8300_MMAN_H__ + +#include + +#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ +#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ +#define MAP_LOCKED 0x2000 /* pages are locked */ +#define MAP_NORESERVE 0x4000 /* don't check for reservations */ +#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x10000 /* do not block on IO */ + +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ + +#endif /* __H8300_MMAN_H__ */ diff --git a/arch/h8300/include/asm/mmu.h b/arch/h8300/include/asm/mmu.h new file mode 100644 index 00000000000..2ce06ea4610 --- /dev/null +++ b/arch/h8300/include/asm/mmu.h @@ -0,0 +1,11 @@ +#ifndef __MMU_H +#define __MMU_H + +/* Copyright (C) 2002, David McCullough */ + +typedef struct { + struct vm_list_struct *vmlist; + unsigned long end_brk; +} mm_context_t; + +#endif diff --git a/arch/h8300/include/asm/mmu_context.h b/arch/h8300/include/asm/mmu_context.h new file mode 100644 index 00000000000..f44b730da54 --- /dev/null +++ b/arch/h8300/include/asm/mmu_context.h @@ -0,0 +1,32 @@ +#ifndef __H8300_MMU_CONTEXT_H +#define __H8300_MMU_CONTEXT_H + +#include +#include +#include +#include + +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + // mm->context = virt_to_phys(mm->pgd); + return(0); +} + +#define destroy_context(mm) do { } while(0) +#define deactivate_mm(tsk,mm) do { } while(0) + +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) +{ +} + +static inline void activate_mm(struct mm_struct *prev_mm, + struct mm_struct *next_mm) +{ +} + +#endif diff --git a/arch/h8300/include/asm/module.h b/arch/h8300/include/asm/module.h new file mode 100644 index 00000000000..de23231f319 --- /dev/null +++ b/arch/h8300/include/asm/module.h @@ -0,0 +1,13 @@ +#ifndef _ASM_H8300_MODULE_H +#define _ASM_H8300_MODULE_H +/* + * This file contains the H8/300 architecture specific module code. + */ +struct mod_arch_specific { }; +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Ehdr Elf32_Ehdr + +#define MODULE_SYMBOL_PREFIX "_" + +#endif /* _ASM_H8/300_MODULE_H */ diff --git a/arch/h8300/include/asm/msgbuf.h b/arch/h8300/include/asm/msgbuf.h new file mode 100644 index 00000000000..6b148cd09aa --- /dev/null +++ b/arch/h8300/include/asm/msgbuf.h @@ -0,0 +1,31 @@ +#ifndef _H8300_MSGBUF_H +#define _H8300_MSGBUF_H + +/* + * The msqid64_ds structure for H8/300 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + unsigned long __unused1; + __kernel_time_t msg_rtime; /* last msgrcv time */ + unsigned long __unused2; + __kernel_time_t msg_ctime; /* last change time */ + unsigned long __unused3; + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* _H8300_MSGBUF_H */ diff --git a/arch/h8300/include/asm/mutex.h b/arch/h8300/include/asm/mutex.h new file mode 100644 index 00000000000..458c1f7fbc1 --- /dev/null +++ b/arch/h8300/include/asm/mutex.h @@ -0,0 +1,9 @@ +/* + * Pull in the generic implementation for the mutex fastpath. + * + * TODO: implement optimized primitives instead, or leave the generic + * implementation in place, or pick the atomic_xchg() based generic + * implementation. (see asm-generic/mutex-xchg.h for details) + */ + +#include diff --git a/arch/h8300/include/asm/page.h b/arch/h8300/include/asm/page.h new file mode 100644 index 00000000000..0b6acf0b03a --- /dev/null +++ b/arch/h8300/include/asm/page.h @@ -0,0 +1,78 @@ +#ifndef _H8300_PAGE_H +#define _H8300_PAGE_H + +/* PAGE_SHIFT determines the page size */ + +#define PAGE_SHIFT (12) +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#include + +#ifndef __ASSEMBLY__ + +#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) +#define free_user_page(page, addr) free_page(addr) + +#define clear_page(page) memset((page), 0, PAGE_SIZE) +#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd[16]; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((&x)->pmd[0]) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +extern unsigned long memory_start; +extern unsigned long memory_end; + +#endif /* !__ASSEMBLY__ */ + +#include + +#define PAGE_OFFSET (PAGE_OFFSET_RAW) + +#ifndef __ASSEMBLY__ + +#define __pa(vaddr) virt_to_phys(vaddr) +#define __va(paddr) phys_to_virt((unsigned long)paddr) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) +#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) + +#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) +#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) +#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) +#define pfn_valid(page) (page < max_mapnr) + +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) + +#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ + ((void *)(kaddr) < (void *)memory_end)) + +#endif /* __ASSEMBLY__ */ + +#include +#include + +#endif /* _H8300_PAGE_H */ diff --git a/arch/h8300/include/asm/page_offset.h b/arch/h8300/include/asm/page_offset.h new file mode 100644 index 00000000000..f8706463008 --- /dev/null +++ b/arch/h8300/include/asm/page_offset.h @@ -0,0 +1,3 @@ + +#define PAGE_OFFSET_RAW 0x00000000 + diff --git a/arch/h8300/include/asm/param.h b/arch/h8300/include/asm/param.h new file mode 100644 index 00000000000..1c72fb8080f --- /dev/null +++ b/arch/h8300/include/asm/param.h @@ -0,0 +1,20 @@ +#ifndef _H8300_PARAM_H +#define _H8300_PARAM_H + +#ifdef __KERNEL__ +#define HZ CONFIG_HZ +#define USER_HZ HZ +#define CLOCKS_PER_SEC (USER_HZ) +#else +#define HZ 100 +#endif + +#define EXEC_PAGESIZE 4096 + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#endif /* _H8300_PARAM_H */ diff --git a/arch/h8300/include/asm/pci.h b/arch/h8300/include/asm/pci.h new file mode 100644 index 00000000000..97389b35aa3 --- /dev/null +++ b/arch/h8300/include/asm/pci.h @@ -0,0 +1,25 @@ +#ifndef _ASM_H8300_PCI_H +#define _ASM_H8300_PCI_H + +/* + * asm-h8300/pci.h - H8/300 specific PCI declarations. + * + * Yoshinori Sato + */ + +#define pcibios_assign_all_busses() 0 +#define pcibios_scan_all_fns(a, b) 0 + +static inline void pcibios_set_master(struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} + +static inline void pcibios_penalize_isa_irq(int irq, int active) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +#define PCI_DMA_BUS_IS_PHYS (1) + +#endif /* _ASM_H8300_PCI_H */ diff --git a/arch/h8300/include/asm/percpu.h b/arch/h8300/include/asm/percpu.h new file mode 100644 index 00000000000..72c03e3666d --- /dev/null +++ b/arch/h8300/include/asm/percpu.h @@ -0,0 +1,6 @@ +#ifndef __ARCH_H8300_PERCPU__ +#define __ARCH_H8300_PERCPU__ + +#include + +#endif /* __ARCH_H8300_PERCPU__ */ diff --git a/arch/h8300/include/asm/pgalloc.h b/arch/h8300/include/asm/pgalloc.h new file mode 100644 index 00000000000..c2e89a286d2 --- /dev/null +++ b/arch/h8300/include/asm/pgalloc.h @@ -0,0 +1,8 @@ +#ifndef _H8300_PGALLOC_H +#define _H8300_PGALLOC_H + +#include + +#define check_pgt_cache() do { } while (0) + +#endif /* _H8300_PGALLOC_H */ diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h new file mode 100644 index 00000000000..a09230a08e0 --- /dev/null +++ b/arch/h8300/include/asm/pgtable.h @@ -0,0 +1,73 @@ +#ifndef _H8300_PGTABLE_H +#define _H8300_PGTABLE_H + +#include + +#include +#include +#include +#include + +#define pgd_present(pgd) (1) /* pages are always present on NO_MM */ +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_clear(pgdp) +#define kern_addr_valid(addr) (1) +#define pmd_offset(a, b) ((void *)0) +#define pmd_none(pmd) (1) +#define pgd_offset_k(adrdress) ((pgd_t *)0) +#define pte_offset_kernel(dir, address) ((pte_t *)0) + +#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */ +#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */ +#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */ +#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */ +#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */ + +extern void paging_init(void); +#define swapper_pg_dir ((pgd_t *) 0) + +#define __swp_type(x) (0) +#define __swp_offset(x) (0) +#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +static inline int pte_file(pte_t pte) { return 0; } + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +#define ZERO_PAGE(vaddr) (virt_to_page(0)) + +/* + * These would be in other places but having them here reduces the diffs. + */ +extern unsigned int kobjsize(const void *objp); +extern int is_in_rom(unsigned long); + +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +/* + * All 32bit addresses are effectively valid for vmalloc... + * Sort of meaningless for non-VM targets. + */ +#define VMALLOC_START 0 +#define VMALLOC_END 0xffffffff + +/* + * All 32bit addresses are effectively valid for vmalloc... + * Sort of meaningless for non-VM targets. + */ +#define VMALLOC_START 0 +#define VMALLOC_END 0xffffffff + +#define arch_enter_lazy_cpu_mode() do {} while (0) +#endif /* _H8300_PGTABLE_H */ diff --git a/arch/h8300/include/asm/poll.h b/arch/h8300/include/asm/poll.h new file mode 100644 index 00000000000..f61540c22d9 --- /dev/null +++ b/arch/h8300/include/asm/poll.h @@ -0,0 +1,11 @@ +#ifndef __H8300_POLL_H +#define __H8300_POLL_H + +#define POLLWRNORM POLLOUT +#define POLLWRBAND 256 + +#include + +#undef POLLREMOVE + +#endif diff --git a/arch/h8300/include/asm/posix_types.h b/arch/h8300/include/asm/posix_types.h new file mode 100644 index 00000000000..5c553927fc5 --- /dev/null +++ b/arch/h8300/include/asm/posix_types.h @@ -0,0 +1,60 @@ +#ifndef __ARCH_H8300_POSIX_TYPES_H +#define __ARCH_H8300_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned long __kernel_ino_t; +typedef unsigned short __kernel_mode_t; +typedef unsigned short __kernel_nlink_t; +typedef long __kernel_off_t; +typedef int __kernel_pid_t; +typedef unsigned short __kernel_ipc_pid_t; +typedef unsigned short __kernel_uid_t; +typedef unsigned short __kernel_gid_t; +typedef unsigned int __kernel_size_t; +typedef int __kernel_ssize_t; +typedef int __kernel_ptrdiff_t; +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_timer_t; +typedef int __kernel_clockid_t; +typedef int __kernel_daddr_t; +typedef char * __kernel_caddr_t; +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef unsigned int __kernel_uid32_t; +typedef unsigned int __kernel_gid32_t; + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +#if defined(__KERNEL__) + +#undef __FD_SET +#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d)) + +#undef __FD_CLR +#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d)) + +#undef __FD_ISSET +#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) + +#undef __FD_ZERO +#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp))) + +#endif /* defined(__KERNEL__) */ + +#endif diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h new file mode 100644 index 00000000000..69e8a34eb6d --- /dev/null +++ b/arch/h8300/include/asm/processor.h @@ -0,0 +1,140 @@ +/* + * include/asm-h8300/processor.h + * + * Copyright (C) 2002 Yoshinori Sato + * + * Based on: linux/asm-m68nommu/processor.h + * + * Copyright (C) 1995 Hamish Macdonald + */ + +#ifndef __ASM_H8300_PROCESSOR_H +#define __ASM_H8300_PROCESSOR_H + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l;}) + +#include +#include +#include +#include +#include + +static inline unsigned long rdusp(void) { + extern unsigned int sw_usp; + return(sw_usp); +} + +static inline void wrusp(unsigned long usp) { + extern unsigned int sw_usp; + sw_usp = usp; +} + +/* + * User space process size: 3.75GB. This is hardcoded into a few places, + * so don't change it unless you know what you are doing. + */ +#define TASK_SIZE (0xFFFFFFFFUL) + +#ifdef __KERNEL__ +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP +#endif + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. We won't be using it + */ +#define TASK_UNMAPPED_BASE 0 + +struct thread_struct { + unsigned long ksp; /* kernel stack pointer */ + unsigned long usp; /* user stack pointer */ + unsigned long ccr; /* saved status register */ + unsigned long esp0; /* points to SR of stack frame */ + struct { + unsigned short *addr; + unsigned short inst; + } breakinfo; +}; + +#define INIT_THREAD { \ + .ksp = sizeof(init_stack) + (unsigned long)init_stack, \ + .usp = 0, \ + .ccr = PS_S, \ + .esp0 = 0, \ + .breakinfo = { \ + .addr = (unsigned short *)-1, \ + .inst = 0 \ + } \ +} + +/* + * Do necessary setup to start up a newly executed thread. + * + * pass the data segment into user programs if it exists, + * it can't hurt anything as far as I can tell + */ +#if defined(__H8300H__) +#define start_thread(_regs, _pc, _usp) \ +do { \ + set_fs(USER_DS); /* reads from user space */ \ + (_regs)->pc = (_pc); \ + (_regs)->ccr = 0x00; /* clear all flags */ \ + (_regs)->er5 = current->mm->start_data; /* GOT base */ \ + wrusp((unsigned long)(_usp) - sizeof(unsigned long)*3); \ +} while(0) +#endif +#if defined(__H8300S__) +#define start_thread(_regs, _pc, _usp) \ +do { \ + set_fs(USER_DS); /* reads from user space */ \ + (_regs)->pc = (_pc); \ + (_regs)->ccr = 0x00; /* clear kernel flag */ \ + (_regs)->exr = 0x78; /* enable all interrupts */ \ + (_regs)->er5 = current->mm->start_data; /* GOT base */ \ + /* 14 = space for retaddr(4), vector(4), er0(4) and ext(2) on stack */ \ + wrusp(((unsigned long)(_usp)) - 14); \ +} while(0) +#endif + +/* Forward declaration, a strange C thing */ +struct task_struct; + +/* Free all resources held by a thread. */ +static inline void release_thread(struct task_struct *dead_task) +{ +} + +extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); + +#define prepare_to_copy(tsk) do { } while (0) + +/* + * Free current thread data structures etc.. + */ +static inline void exit_thread(void) +{ +} + +/* + * Return saved PC of a blocked thread. + */ +unsigned long thread_saved_pc(struct task_struct *tsk); +unsigned long get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) \ + ({ \ + unsigned long eip = 0; \ + if ((tsk)->thread.esp0 > PAGE_SIZE && \ + MAP_NR((tsk)->thread.esp0) < max_mapnr) \ + eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \ + eip; }) +#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) + +#define cpu_relax() barrier() + +#endif diff --git a/arch/h8300/include/asm/ptrace.h b/arch/h8300/include/asm/ptrace.h new file mode 100644 index 00000000000..c2e05e4b512 --- /dev/null +++ b/arch/h8300/include/asm/ptrace.h @@ -0,0 +1,64 @@ +#ifndef _H8300_PTRACE_H +#define _H8300_PTRACE_H + +#ifndef __ASSEMBLY__ + +#define PT_ER1 0 +#define PT_ER2 1 +#define PT_ER3 2 +#define PT_ER4 3 +#define PT_ER5 4 +#define PT_ER6 5 +#define PT_ER0 6 +#define PT_ORIG_ER0 7 +#define PT_CCR 8 +#define PT_PC 9 +#define PT_USP 10 +#define PT_EXR 12 + +/* this struct defines the way the registers are stored on the + stack during a system call. */ + +struct pt_regs { + long retpc; + long er4; + long er5; + long er6; + long er3; + long er2; + long er1; + long orig_er0; + unsigned short ccr; + long er0; + long vector; +#if defined(CONFIG_CPU_H8S) + unsigned short exr; +#endif + unsigned long pc; +} __attribute__((aligned(2),packed)); + +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 + +#ifdef __KERNEL__ +#ifndef PS_S +#define PS_S (0x10) +#endif + +#if defined(__H8300H__) +#define H8300_REGS_NO 11 +#endif +#if defined(__H8300S__) +#define H8300_REGS_NO 12 +#endif + +/* Find the stack offset for a register, relative to thread.esp0. */ +#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg) + +#define user_mode(regs) (!((regs)->ccr & PS_S)) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +extern void show_regs(struct pt_regs *); +#endif /* __KERNEL__ */ +#endif /* __ASSEMBLY__ */ +#endif /* _H8300_PTRACE_H */ diff --git a/arch/h8300/include/asm/regs267x.h b/arch/h8300/include/asm/regs267x.h new file mode 100644 index 00000000000..1bff731a9f7 --- /dev/null +++ b/arch/h8300/include/asm/regs267x.h @@ -0,0 +1,336 @@ +/* internal Peripherals Register address define */ +/* CPU: H8/306x */ + +#if !defined(__REGS_H8S267x__) +#define __REGS_H8S267x__ + +#if defined(__KERNEL__) + +#define DASTCR 0xFEE01A +#define DADR0 0xFFFFA4 +#define DADR1 0xFFFFA5 +#define DACR01 0xFFFFA6 +#define DADR2 0xFFFFA8 +#define DADR3 0xFFFFA9 +#define DACR23 0xFFFFAA + +#define ADDRA 0xFFFF90 +#define ADDRAH 0xFFFF90 +#define ADDRAL 0xFFFF91 +#define ADDRB 0xFFFF92 +#define ADDRBH 0xFFFF92 +#define ADDRBL 0xFFFF93 +#define ADDRC 0xFFFF94 +#define ADDRCH 0xFFFF94 +#define ADDRCL 0xFFFF95 +#define ADDRD 0xFFFF96 +#define ADDRDH 0xFFFF96 +#define ADDRDL 0xFFFF97 +#define ADDRE 0xFFFF98 +#define ADDREH 0xFFFF98 +#define ADDREL 0xFFFF99 +#define ADDRF 0xFFFF9A +#define ADDRFH 0xFFFF9A +#define ADDRFL 0xFFFF9B +#define ADDRG 0xFFFF9C +#define ADDRGH 0xFFFF9C +#define ADDRGL 0xFFFF9D +#define ADDRH 0xFFFF9E +#define ADDRHH 0xFFFF9E +#define ADDRHL 0xFFFF9F + +#define ADCSR 0xFFFFA0 +#define ADCR 0xFFFFA1 + +#define ABWCR 0xFFFEC0 +#define ASTCR 0xFFFEC1 +#define WTCRAH 0xFFFEC2 +#define WTCRAL 0xFFFEC3 +#define WTCRBH 0xFFFEC4 +#define WTCRBL 0xFFFEC5 +#define RDNCR 0xFFFEC6 +#define CSACRH 0xFFFEC8 +#define CSACRL 0xFFFEC9 +#define BROMCRH 0xFFFECA +#define BROMCRL 0xFFFECB +#define BCR 0xFFFECC +#define DRAMCR 0xFFFED0 +#define DRACCR 0xFFFED2 +#define REFCR 0xFFFED4 +#define RTCNT 0xFFFED6 +#define RTCOR 0xFFFED7 + +#define MAR0AH 0xFFFEE0 +#define MAR0AL 0xFFFEE2 +#define IOAR0A 0xFFFEE4 +#define ETCR0A 0xFFFEE6 +#define MAR0BH 0xFFFEE8 +#define MAR0BL 0xFFFEEA +#define IOAR0B 0xFFFEEC +#define ETCR0B 0xFFFEEE +#define MAR1AH 0xFFFEF0 +#define MAR1AL 0xFFFEF2 +#define IOAR1A 0xFFFEF4 +#define ETCR1A 0xFFFEF6 +#define MAR1BH 0xFFFEF8 +#define MAR1BL 0xFFFEFA +#define IOAR1B 0xFFFEFC +#define ETCR1B 0xFFFEFE +#define DMAWER 0xFFFF20 +#define DMATCR 0xFFFF21 +#define DMACR0A 0xFFFF22 +#define DMACR0B 0xFFFF23 +#define DMACR1A 0xFFFF24 +#define DMACR1B 0xFFFF25 +#define DMABCRH 0xFFFF26 +#define DMABCRL 0xFFFF27 + +#define EDSAR0 0xFFFDC0 +#define EDDAR0 0xFFFDC4 +#define EDTCR0 0xFFFDC8 +#define EDMDR0 0xFFFDCC +#define EDMDR0H 0xFFFDCC +#define EDMDR0L 0xFFFDCD +#define EDACR0 0xFFFDCE +#define EDSAR1 0xFFFDD0 +#define EDDAR1 0xFFFDD4 +#define EDTCR1 0xFFFDD8 +#define EDMDR1 0xFFFDDC +#define EDMDR1H 0xFFFDDC +#define EDMDR1L 0xFFFDDD +#define EDACR1 0xFFFDDE +#define EDSAR2 0xFFFDE0 +#define EDDAR2 0xFFFDE4 +#define EDTCR2 0xFFFDE8 +#define EDMDR2 0xFFFDEC +#define EDMDR2H 0xFFFDEC +#define EDMDR2L 0xFFFDED +#define EDACR2 0xFFFDEE +#define EDSAR3 0xFFFDF0 +#define EDDAR3 0xFFFDF4 +#define EDTCR3 0xFFFDF8 +#define EDMDR3 0xFFFDFC +#define EDMDR3H 0xFFFDFC +#define EDMDR3L 0xFFFDFD +#define EDACR3 0xFFFDFE + +#define IPRA 0xFFFE00 +#define IPRB 0xFFFE02 +#define IPRC 0xFFFE04 +#define IPRD 0xFFFE06 +#define IPRE 0xFFFE08 +#define IPRF 0xFFFE0A +#define IPRG 0xFFFE0C +#define IPRH 0xFFFE0E +#define IPRI 0xFFFE10 +#define IPRJ 0xFFFE12 +#define IPRK 0xFFFE14 +#define ITSR 0xFFFE16 +#define SSIER 0xFFFE18 +#define ISCRH 0xFFFE1A +#define ISCRL 0xFFFE1C + +#define INTCR 0xFFFF31 +#define IER 0xFFFF32 +#define IERH 0xFFFF32 +#define IERL 0xFFFF33 +#define ISR 0xFFFF34 +#define ISRH 0xFFFF34 +#define ISRL 0xFFFF35 + +#define P1DDR 0xFFFE20 +#define P2DDR 0xFFFE21 +#define P3DDR 0xFFFE22 +#define P4DDR 0xFFFE23 +#define P5DDR 0xFFFE24 +#define P6DDR 0xFFFE25 +#define P7DDR 0xFFFE26 +#define P8DDR 0xFFFE27 +#define P9DDR 0xFFFE28 +#define PADDR 0xFFFE29 +#define PBDDR 0xFFFE2A +#define PCDDR 0xFFFE2B +#define PDDDR 0xFFFE2C +#define PEDDR 0xFFFE2D +#define PFDDR 0xFFFE2E +#define PGDDR 0xFFFE2F +#define PHDDR 0xFFFF74 + +#define PFCR0 0xFFFE32 +#define PFCR1 0xFFFE33 +#define PFCR2 0xFFFE34 + +#define PAPCR 0xFFFE36 +#define PBPCR 0xFFFE37 +#define PCPCR 0xFFFE38 +#define PDPCR 0xFFFE39 +#define PEPCR 0xFFFE3A + +#define P3ODR 0xFFFE3C +#define PAODR 0xFFFE3D + +#define P1DR 0xFFFF60 +#define P2DR 0xFFFF61 +#define P3DR 0xFFFF62 +#define P4DR 0xFFFF63 +#define P5DR 0xFFFF64 +#define P6DR 0xFFFF65 +#define P7DR 0xFFFF66 +#define P8DR 0xFFFF67 +#define P9DR 0xFFFF68 +#define PADR 0xFFFF69 +#define PBDR 0xFFFF6A +#define PCDR 0xFFFF6B +#define PDDR 0xFFFF6C +#define PEDR 0xFFFF6D +#define PFDR 0xFFFF6E +#define PGDR 0xFFFF6F +#define PHDR 0xFFFF72 + +#define PORT1 0xFFFF50 +#define PORT2 0xFFFF51 +#define PORT3 0xFFFF52 +#define PORT4 0xFFFF53 +#define PORT5 0xFFFF54 +#define PORT6 0xFFFF55 +#define PORT7 0xFFFF56 +#define PORT8 0xFFFF57 +#define PORT9 0xFFFF58 +#define PORTA 0xFFFF59 +#define PORTB 0xFFFF5A +#define PORTC 0xFFFF5B +#define PORTD 0xFFFF5C +#define PORTE 0xFFFF5D +#define PORTF 0xFFFF5E +#define PORTG 0xFFFF5F +#define PORTH 0xFFFF70 + +#define PCR 0xFFFF46 +#define PMR 0xFFFF47 +#define NDERH 0xFFFF48 +#define NDERL 0xFFFF49 +#define PODRH 0xFFFF4A +#define PODRL 0xFFFF4B +#define NDRH1 0xFFFF4C +#define NDRL1 0xFFFF4D +#define NDRH2 0xFFFF4E +#define NDRL2 0xFFFF4F + +#define SMR0 0xFFFF78 +#define BRR0 0xFFFF79 +#define SCR0 0xFFFF7A +#define TDR0 0xFFFF7B +#define SSR0 0xFFFF7C +#define RDR0 0xFFFF7D +#define SCMR0 0xFFFF7E +#define SMR1 0xFFFF80 +#define BRR1 0xFFFF81 +#define SCR1 0xFFFF82 +#define TDR1 0xFFFF83 +#define SSR1 0xFFFF84 +#define RDR1 0xFFFF85 +#define SCMR1 0xFFFF86 +#define SMR2 0xFFFF88 +#define BRR2 0xFFFF89 +#define SCR2 0xFFFF8A +#define TDR2 0xFFFF8B +#define SSR2 0xFFFF8C +#define RDR2 0xFFFF8D +#define SCMR2 0xFFFF8E + +#define IRCR0 0xFFFE1E +#define SEMR 0xFFFDA8 + +#define MDCR 0xFFFF3E +#define SYSCR 0xFFFF3D +#define MSTPCRH 0xFFFF40 +#define MSTPCRL 0xFFFF41 +#define FLMCR1 0xFFFFC8 +#define FLMCR2 0xFFFFC9 +#define EBR1 0xFFFFCA +#define EBR2 0xFFFFCB +#define CTGARC_RAMCR 0xFFFECE +#define SBYCR 0xFFFF3A +#define SCKCR 0xFFFF3B +#define PLLCR 0xFFFF45 + +#define TSTR 0xFFFFC0 +#define TSNC 0XFFFFC1 + +#define TCR0 0xFFFFD0 +#define TMDR0 0xFFFFD1 +#define TIORH0 0xFFFFD2 +#define TIORL0 0xFFFFD3 +#define TIER0 0xFFFFD4 +#define TSR0 0xFFFFD5 +#define TCNT0 0xFFFFD6 +#define GRA0 0xFFFFD8 +#define GRB0 0xFFFFDA +#define GRC0 0xFFFFDC +#define GRD0 0xFFFFDE +#define TCR1 0xFFFFE0 +#define TMDR1 0xFFFFE1 +#define TIORH1 0xFFFFE2 +#define TIORL1 0xFFFFE3 +#define TIER1 0xFFFFE4 +#define TSR1 0xFFFFE5 +#define TCNT1 0xFFFFE6 +#define GRA1 0xFFFFE8 +#define GRB1 0xFFFFEA +#define TCR2 0xFFFFF0 +#define TMDR2 0xFFFFF1 +#define TIORH2 0xFFFFF2 +#define TIORL2 0xFFFFF3 +#define TIER2 0xFFFFF4 +#define TSR2 0xFFFFF5 +#define TCNT2 0xFFFFF6 +#define GRA2 0xFFFFF8 +#define GRB2 0xFFFFFA +#define TCR3 0xFFFE80 +#define TMDR3 0xFFFE81 +#define TIORH3 0xFFFE82 +#define TIORL3 0xFFFE83 +#define TIER3 0xFFFE84 +#define TSR3 0xFFFE85 +#define TCNT3 0xFFFE86 +#define GRA3 0xFFFE88 +#define GRB3 0xFFFE8A +#define GRC3 0xFFFE8C +#define GRD3 0xFFFE8E +#define TCR4 0xFFFE90 +#define TMDR4 0xFFFE91 +#define TIORH4 0xFFFE92 +#define TIORL4 0xFFFE93 +#define TIER4 0xFFFE94 +#define TSR4 0xFFFE95 +#define TCNT4 0xFFFE96 +#define GRA4 0xFFFE98 +#define GRB4 0xFFFE9A +#define TCR5 0xFFFEA0 +#define TMDR5 0xFFFEA1 +#define TIORH5 0xFFFEA2 +#define TIORL5 0xFFFEA3 +#define TIER5 0xFFFEA4 +#define TSR5 0xFFFEA5 +#define TCNT5 0xFFFEA6 +#define GRA5 0xFFFEA8 +#define GRB5 0xFFFEAA + +#define _8TCR0 0xFFFFB0 +#define _8TCR1 0xFFFFB1 +#define _8TCSR0 0xFFFFB2 +#define _8TCSR1 0xFFFFB3 +#define _8TCORA0 0xFFFFB4 +#define _8TCORA1 0xFFFFB5 +#define _8TCORB0 0xFFFFB6 +#define _8TCORB1 0xFFFFB7 +#define _8TCNT0 0xFFFFB8 +#define _8TCNT1 0xFFFFB9 + +#define TCSR 0xFFFFBC +#define TCNT 0xFFFFBD +#define RSTCSRW 0xFFFFBE +#define RSTCSRR 0xFFFFBF + +#endif /* __KERNEL__ */ +#endif /* __REGS_H8S267x__ */ diff --git a/arch/h8300/include/asm/regs306x.h b/arch/h8300/include/asm/regs306x.h new file mode 100644 index 00000000000..027dd633fa2 --- /dev/null +++ b/arch/h8300/include/asm/regs306x.h @@ -0,0 +1,212 @@ +/* internal Peripherals Register address define */ +/* CPU: H8/306x */ + +#if !defined(__REGS_H8306x__) +#define __REGS_H8306x__ + +#if defined(__KERNEL__) + +#define DASTCR 0xFEE01A +#define DADR0 0xFEE09C +#define DADR1 0xFEE09D +#define DACR 0xFEE09E + +#define ADDRAH 0xFFFFE0 +#define ADDRAL 0xFFFFE1 +#define ADDRBH 0xFFFFE2 +#define ADDRBL 0xFFFFE3 +#define ADDRCH 0xFFFFE4 +#define ADDRCL 0xFFFFE5 +#define ADDRDH 0xFFFFE6 +#define ADDRDL 0xFFFFE7 +#define ADCSR 0xFFFFE8 +#define ADCR 0xFFFFE9 + +#define BRCR 0xFEE013 +#define ADRCR 0xFEE01E +#define CSCR 0xFEE01F +#define ABWCR 0xFEE020 +#define ASTCR 0xFEE021 +#define WCRH 0xFEE022 +#define WCRL 0xFEE023 +#define BCR 0xFEE024 +#define DRCRA 0xFEE026 +#define DRCRB 0xFEE027 +#define RTMCSR 0xFEE028 +#define RTCNT 0xFEE029 +#define RTCOR 0xFEE02A + +#define MAR0AR 0xFFFF20 +#define MAR0AE 0xFFFF21 +#define MAR0AH 0xFFFF22 +#define MAR0AL 0xFFFF23 +#define ETCR0AL 0xFFFF24 +#define ETCR0AH 0xFFFF25 +#define IOAR0A 0xFFFF26 +#define DTCR0A 0xFFFF27 +#define MAR0BR 0xFFFF28 +#define MAR0BE 0xFFFF29 +#define MAR0BH 0xFFFF2A +#define MAR0BL 0xFFFF2B +#define ETCR0BL 0xFFFF2C +#define ETCR0BH 0xFFFF2D +#define IOAR0B 0xFFFF2E +#define DTCR0B 0xFFFF2F +#define MAR1AR 0xFFFF30 +#define MAR1AE 0xFFFF31 +#define MAR1AH 0xFFFF32 +#define MAR1AL 0xFFFF33 +#define ETCR1AL 0xFFFF34 +#define ETCR1AH 0xFFFF35 +#define IOAR1A 0xFFFF36 +#define DTCR1A 0xFFFF37 +#define MAR1BR 0xFFFF38 +#define MAR1BE 0xFFFF39 +#define MAR1BH 0xFFFF3A +#define MAR1BL 0xFFFF3B +#define ETCR1BL 0xFFFF3C +#define ETCR1BH 0xFFFF3D +#define IOAR1B 0xFFFF3E +#define DTCR1B 0xFFFF3F + +#define ISCR 0xFEE014 +#define IER 0xFEE015 +#define ISR 0xFEE016 +#define IPRA 0xFEE018 +#define IPRB 0xFEE019 + +#define P1DDR 0xFEE000 +#define P2DDR 0xFEE001 +#define P3DDR 0xFEE002 +#define P4DDR 0xFEE003 +#define P5DDR 0xFEE004 +#define P6DDR 0xFEE005 +/*#define P7DDR 0xFEE006*/ +#define P8DDR 0xFEE007 +#define P9DDR 0xFEE008 +#define PADDR 0xFEE009 +#define PBDDR 0xFEE00A + +#define P1DR 0xFFFFD0 +#define P2DR 0xFFFFD1 +#define P3DR 0xFFFFD2 +#define P4DR 0xFFFFD3 +#define P5DR 0xFFFFD4 +#define P6DR 0xFFFFD5 +/*#define P7DR 0xFFFFD6*/ +#define P8DR 0xFFFFD7 +#define P9DR 0xFFFFD8 +#define PADR 0xFFFFD9 +#define PBDR 0xFFFFDA + +#define P2CR 0xFEE03C +#define P4CR 0xFEE03E +#define P5CR 0xFEE03F + +#define SMR0 0xFFFFB0 +#define BRR0 0xFFFFB1 +#define SCR0 0xFFFFB2 +#define TDR0 0xFFFFB3 +#define SSR0 0xFFFFB4 +#define RDR0 0xFFFFB5 +#define SCMR0 0xFFFFB6 +#define SMR1 0xFFFFB8 +#define BRR1 0xFFFFB9 +#define SCR1 0xFFFFBA +#define TDR1 0xFFFFBB +#define SSR1 0xFFFFBC +#define RDR1 0xFFFFBD +#define SCMR1 0xFFFFBE +#define SMR2 0xFFFFC0 +#define BRR2 0xFFFFC1 +#define SCR2 0xFFFFC2 +#define TDR2 0xFFFFC3 +#define SSR2 0xFFFFC4 +#define RDR2 0xFFFFC5 +#define SCMR2 0xFFFFC6 + +#define MDCR 0xFEE011 +#define SYSCR 0xFEE012 +#define DIVCR 0xFEE01B +#define MSTCRH 0xFEE01C +#define MSTCRL 0xFEE01D +#define FLMCR1 0xFEE030 +#define FLMCR2 0xFEE031 +#define EBR1 0xFEE032 +#define EBR2 0xFEE033 +#define RAMCR 0xFEE077 + +#define TSTR 0xFFFF60 +#define TSNC 0XFFFF61 +#define TMDR 0xFFFF62 +#define TOLR 0xFFFF63 +#define TISRA 0xFFFF64 +#define TISRB 0xFFFF65 +#define TISRC 0xFFFF66 +#define TCR0 0xFFFF68 +#define TIOR0 0xFFFF69 +#define TCNT0H 0xFFFF6A +#define TCNT0L 0xFFFF6B +#define GRA0H 0xFFFF6C +#define GRA0L 0xFFFF6D +#define GRB0H 0xFFFF6E +#define GRB0L 0xFFFF6F +#define TCR1 0xFFFF70 +#define TIOR1 0xFFFF71 +#define TCNT1H 0xFFFF72 +#define TCNT1L 0xFFFF73 +#define GRA1H 0xFFFF74 +#define GRA1L 0xFFFF75 +#define GRB1H 0xFFFF76 +#define GRB1L 0xFFFF77 +#define TCR3 0xFFFF78 +#define TIOR3 0xFFFF79 +#define TCNT3H 0xFFFF7A +#define TCNT3L 0xFFFF7B +#define GRA3H 0xFFFF7C +#define GRA3L 0xFFFF7D +#define GRB3H 0xFFFF7E +#define GRB3L 0xFFFF7F + +#define _8TCR0 0xFFFF80 +#define _8TCR1 0xFFFF81 +#define _8TCSR0 0xFFFF82 +#define _8TCSR1 0xFFFF83 +#define TCORA0 0xFFFF84 +#define TCORA1 0xFFFF85 +#define TCORB0 0xFFFF86 +#define TCORB1 0xFFFF87 +#define _8TCNT0 0xFFFF88 +#define _8TCNT1 0xFFFF89 + +#define _8TCR2 0xFFFF90 +#define _8TCR3 0xFFFF91 +#define _8TCSR2 0xFFFF92 +#define _8TCSR3 0xFFFF93 +#define TCORA2 0xFFFF94 +#define TCORA3 0xFFFF95 +#define TCORB2 0xFFFF96 +#define TCORB3 0xFFFF97 +#define _8TCNT2 0xFFFF98 +#define _8TCNT3 0xFFFF99 + +#define TCSR 0xFFFF8C +#define TCNT 0xFFFF8D +#define RSTCSR 0xFFFF8F + +#define TPMR 0xFFFFA0 +#define TPCR 0xFFFFA1 +#define NDERB 0xFFFFA2 +#define NDERA 0xFFFFA3 +#define NDRB1 0xFFFFA4 +#define NDRA1 0xFFFFA5 +#define NDRB2 0xFFFFA6 +#define NDRA2 0xFFFFA7 + +#define TCSR 0xFFFF8C +#define TCNT 0xFFFF8D +#define RSTCSRW 0xFFFF8E +#define RSTCSRR 0xFFFF8F + +#endif /* __KERNEL__ */ +#endif /* __REGS_H8306x__ */ diff --git a/arch/h8300/include/asm/resource.h b/arch/h8300/include/asm/resource.h new file mode 100644 index 00000000000..46c5f439160 --- /dev/null +++ b/arch/h8300/include/asm/resource.h @@ -0,0 +1,6 @@ +#ifndef _H8300_RESOURCE_H +#define _H8300_RESOURCE_H + +#include + +#endif /* _H8300_RESOURCE_H */ diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h new file mode 100644 index 00000000000..d3ecdd87ac9 --- /dev/null +++ b/arch/h8300/include/asm/scatterlist.h @@ -0,0 +1,18 @@ +#ifndef _H8300_SCATTERLIST_H +#define _H8300_SCATTERLIST_H + +#include + +struct scatterlist { +#ifdef CONFIG_DEBUG_SG + unsigned long sg_magic; +#endif + unsigned long page_link; + unsigned int offset; + dma_addr_t dma_address; + unsigned int length; +}; + +#define ISA_DMA_THRESHOLD (0xffffffff) + +#endif /* !(_H8300_SCATTERLIST_H) */ diff --git a/arch/h8300/include/asm/sections.h b/arch/h8300/include/asm/sections.h new file mode 100644 index 00000000000..a81743e8b74 --- /dev/null +++ b/arch/h8300/include/asm/sections.h @@ -0,0 +1,6 @@ +#ifndef _H8300_SECTIONS_H_ +#define _H8300_SECTIONS_H_ + +#include + +#endif diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h new file mode 100644 index 00000000000..b79a82d0f99 --- /dev/null +++ b/arch/h8300/include/asm/segment.h @@ -0,0 +1,49 @@ +#ifndef _H8300_SEGMENT_H +#define _H8300_SEGMENT_H + +/* define constants */ +#define USER_DATA (1) +#ifndef __USER_DS +#define __USER_DS (USER_DATA) +#endif +#define USER_PROGRAM (2) +#define SUPER_DATA (3) +#ifndef __KERNEL_DS +#define __KERNEL_DS (SUPER_DATA) +#endif +#define SUPER_PROGRAM (4) + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) +#define USER_DS MAKE_MM_SEG(__USER_DS) +#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS) + +/* + * Get/set the SFC/DFC registers for MOVES instructions + */ + +static inline mm_segment_t get_fs(void) +{ + return USER_DS; +} + +static inline mm_segment_t get_ds(void) +{ + /* return the supervisor data space code */ + return KERNEL_DS; +} + +static inline void set_fs(mm_segment_t val) +{ +} + +#define segment_eq(a,b) ((a).seg == (b).seg) + +#endif /* __ASSEMBLY__ */ + +#endif /* _H8300_SEGMENT_H */ diff --git a/arch/h8300/include/asm/sembuf.h b/arch/h8300/include/asm/sembuf.h new file mode 100644 index 00000000000..e04a3ec0cb9 --- /dev/null +++ b/arch/h8300/include/asm/sembuf.h @@ -0,0 +1,25 @@ +#ifndef _H8300_SEMBUF_H +#define _H8300_SEMBUF_H + +/* + * The semid64_ds structure for m68k architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + unsigned long __unused1; + __kernel_time_t sem_ctime; /* last change time */ + unsigned long __unused2; + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _H8300_SEMBUF_H */ diff --git a/arch/h8300/include/asm/setup.h b/arch/h8300/include/asm/setup.h new file mode 100644 index 00000000000..e2c600e9673 --- /dev/null +++ b/arch/h8300/include/asm/setup.h @@ -0,0 +1,6 @@ +#ifndef __H8300_SETUP_H +#define __H8300_SETUP_H + +#define COMMAND_LINE_SIZE 512 + +#endif diff --git a/arch/h8300/include/asm/sh_bios.h b/arch/h8300/include/asm/sh_bios.h new file mode 100644 index 00000000000..b6bb6e58295 --- /dev/null +++ b/arch/h8300/include/asm/sh_bios.h @@ -0,0 +1,29 @@ +/* eCos HAL interface header */ + +#ifndef SH_BIOS_H +#define SH_BIOS_H + +#define HAL_IF_VECTOR_TABLE 0xfffe20 +#define CALL_IF_SET_CONSOLE_COMM 13 +#define QUERY_CURRENT -1 +#define MANGLER -3 + +/* Checking for GDB stub active */ +/* suggestion Jonathan Larmour */ +static int sh_bios_in_gdb_mode(void) +{ + static int gdb_active = -1; + if (gdb_active == -1) { + int (*set_console_comm)(int); + set_console_comm = ((void **)HAL_IF_VECTOR_TABLE)[CALL_IF_SET_CONSOLE_COMM]; + gdb_active = (set_console_comm(QUERY_CURRENT) == MANGLER); + } + return gdb_active; +} + +static void sh_bios_gdb_detach(void) +{ + +} + +#endif diff --git a/arch/h8300/include/asm/shm.h b/arch/h8300/include/asm/shm.h new file mode 100644 index 00000000000..ed6623c0545 --- /dev/null +++ b/arch/h8300/include/asm/shm.h @@ -0,0 +1,31 @@ +#ifndef _H8300_SHM_H +#define _H8300_SHM_H + + +/* format of page table entries that correspond to shared memory pages + currently out in swap space (see also mm/swap.c): + bits 0-1 (PAGE_PRESENT) is = 0 + bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE + bits 31..9 are used like this: + bits 15..9 (SHM_ID) the id of the shared memory segment + bits 30..16 (SHM_IDX) the index of the page within the shared memory segment + (actually only bits 25..16 get used since SHMMAX is so low) + bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach +*/ +/* on the m68k both bits 0 and 1 must be zero */ +/* format on the sun3 is similar, but bits 30, 31 are set to zero and all + others are reduced by 2. --m */ + +#ifndef CONFIG_SUN3 +#define SHM_ID_SHIFT 9 +#else +#define SHM_ID_SHIFT 7 +#endif +#define _SHM_ID_BITS 7 +#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1) + +#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS) +#define _SHM_IDX_BITS 15 +#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1) + +#endif /* _H8300_SHM_H */ diff --git a/arch/h8300/include/asm/shmbuf.h b/arch/h8300/include/asm/shmbuf.h new file mode 100644 index 00000000000..64e77993a7a --- /dev/null +++ b/arch/h8300/include/asm/shmbuf.h @@ -0,0 +1,42 @@ +#ifndef _H8300_SHMBUF_H +#define _H8300_SHMBUF_H + +/* + * The shmid64_ds structure for m68k architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + unsigned long __unused1; + __kernel_time_t shm_dtime; /* last detach time */ + unsigned long __unused2; + __kernel_time_t shm_ctime; /* last change time */ + unsigned long __unused3; + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused4; + unsigned long __unused5; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _H8300_SHMBUF_H */ diff --git a/arch/h8300/include/asm/shmparam.h b/arch/h8300/include/asm/shmparam.h new file mode 100644 index 00000000000..d1863953ec6 --- /dev/null +++ b/arch/h8300/include/asm/shmparam.h @@ -0,0 +1,6 @@ +#ifndef _H8300_SHMPARAM_H +#define _H8300_SHMPARAM_H + +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ + +#endif /* _H8300_SHMPARAM_H */ diff --git a/arch/h8300/include/asm/sigcontext.h b/arch/h8300/include/asm/sigcontext.h new file mode 100644 index 00000000000..e4b81505f8f --- /dev/null +++ b/arch/h8300/include/asm/sigcontext.h @@ -0,0 +1,18 @@ +#ifndef _ASM_H8300_SIGCONTEXT_H +#define _ASM_H8300_SIGCONTEXT_H + +struct sigcontext { + unsigned long sc_mask; /* old sigmask */ + unsigned long sc_usp; /* old user stack pointer */ + unsigned long sc_er0; + unsigned long sc_er1; + unsigned long sc_er2; + unsigned long sc_er3; + unsigned long sc_er4; + unsigned long sc_er5; + unsigned long sc_er6; + unsigned short sc_ccr; + unsigned long sc_pc; +}; + +#endif diff --git a/arch/h8300/include/asm/siginfo.h b/arch/h8300/include/asm/siginfo.h new file mode 100644 index 00000000000..bc8fbea931a --- /dev/null +++ b/arch/h8300/include/asm/siginfo.h @@ -0,0 +1,6 @@ +#ifndef _H8300_SIGINFO_H +#define _H8300_SIGINFO_H + +#include + +#endif diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h new file mode 100644 index 00000000000..7bc15048a64 --- /dev/null +++ b/arch/h8300/include/asm/signal.h @@ -0,0 +1,161 @@ +#ifndef _H8300_SIGNAL_H +#define _H8300_SIGNAL_H + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifdef __KERNEL__ +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ + +#define _NSIG 64 +#define _NSIG_BPW 32 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* +#define SIGLOST 29 +*/ +#define SIGPWR 30 +#define SIGSYS 31 +#define SIGUNUSED 31 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_NOCLDSTOP 0x00000001 +#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ +#define SA_SIGINFO 0x00000004 +#define SA_ONSTACK 0x08000000 +#define SA_RESTART 0x10000000 +#define SA_NODEFER 0x40000000 +#define SA_RESETHAND 0x80000000 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND + +#define SA_RESTORER 0x04000000 + +/* + * sigaltstack controls + */ +#define SS_ONSTACK 1 +#define SS_DISABLE 2 + +#define MINSIGSTKSZ 2048 +#define SIGSTKSZ 8192 + +#include + +#ifdef __KERNEL__ +struct old_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer)(void); +}; + +struct sigaction { + __sighandler_t sa_handler; + unsigned long sa_flags; + void (*sa_restorer)(void); + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +}; +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int, struct siginfo *, void *); + } _u; + sigset_t sa_mask; + unsigned long sa_flags; + void (*sa_restorer)(void); +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#ifdef __KERNEL__ + +#include +#undef __HAVE_ARCH_SIG_BITOPS + +#define ptrace_signal_deliver(regs, cookie) do { } while (0) + +#endif /* __KERNEL__ */ + +#endif /* _H8300_SIGNAL_H */ diff --git a/arch/h8300/include/asm/smp.h b/arch/h8300/include/asm/smp.h new file mode 100644 index 00000000000..9e9bd7e5892 --- /dev/null +++ b/arch/h8300/include/asm/smp.h @@ -0,0 +1 @@ +/* nothing required here yet */ diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/asm/socket.h new file mode 100644 index 00000000000..da2520dbf25 --- /dev/null +++ b/arch/h8300/include/asm/socket.h @@ -0,0 +1,57 @@ +#ifndef _ASM_SOCKET_H +#define _ASM_SOCKET_H + +#include + +/* For setsockoptions(2) */ +#define SOL_SOCKET 1 + +#define SO_DEBUG 1 +#define SO_REUSEADDR 2 +#define SO_TYPE 3 +#define SO_ERROR 4 +#define SO_DONTROUTE 5 +#define SO_BROADCAST 6 +#define SO_SNDBUF 7 +#define SO_RCVBUF 8 +#define SO_SNDBUFFORCE 32 +#define SO_RCVBUFFORCE 33 +#define SO_KEEPALIVE 9 +#define SO_OOBINLINE 10 +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_LINGER 13 +#define SO_BSDCOMPAT 14 +/* To add :#define SO_REUSEPORT 15 */ +#define SO_PASSCRED 16 +#define SO_PEERCRED 17 +#define SO_RCVLOWAT 18 +#define SO_SNDLOWAT 19 +#define SO_RCVTIMEO 20 +#define SO_SNDTIMEO 21 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 22 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 +#define SO_SECURITY_ENCRYPTION_NETWORK 24 + +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 + +#define SO_PEERNAME 28 +#define SO_TIMESTAMP 29 +#define SCM_TIMESTAMP SO_TIMESTAMP + +#define SO_ACCEPTCONN 30 + +#define SO_PEERSEC 31 +#define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS + +#define SO_MARK 36 + +#endif /* _ASM_SOCKET_H */ diff --git a/arch/h8300/include/asm/sockios.h b/arch/h8300/include/asm/sockios.h new file mode 100644 index 00000000000..e9c7ec810c2 --- /dev/null +++ b/arch/h8300/include/asm/sockios.h @@ -0,0 +1,13 @@ +#ifndef __ARCH_H8300_SOCKIOS__ +#define __ARCH_H8300_SOCKIOS__ + +/* Socket-level I/O control calls. */ +#define FIOSETOWN 0x8901 +#define SIOCSPGRP 0x8902 +#define FIOGETOWN 0x8903 +#define SIOCGPGRP 0x8904 +#define SIOCATMARK 0x8905 +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ + +#endif /* __ARCH_H8300_SOCKIOS__ */ diff --git a/arch/h8300/include/asm/spinlock.h b/arch/h8300/include/asm/spinlock.h new file mode 100644 index 00000000000..d5407fa173e --- /dev/null +++ b/arch/h8300/include/asm/spinlock.h @@ -0,0 +1,6 @@ +#ifndef __H8300_SPINLOCK_H +#define __H8300_SPINLOCK_H + +#error "H8/300 doesn't do SMP yet" + +#endif diff --git a/arch/h8300/include/asm/stat.h b/arch/h8300/include/asm/stat.h new file mode 100644 index 00000000000..62c3cc24dfe --- /dev/null +++ b/arch/h8300/include/asm/stat.h @@ -0,0 +1,78 @@ +#ifndef _H8300_STAT_H +#define _H8300_STAT_H + +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; +}; + +struct stat { + unsigned short st_dev; + unsigned short __pad1; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long __unused1; + unsigned long st_mtime; + unsigned long __unused2; + unsigned long st_ctime; + unsigned long __unused3; + unsigned long __unused4; + unsigned long __unused5; +}; + +/* This matches struct stat64 in glibc2.1, hence the absolutely + * insane amounts of padding around dev_t's. + */ +struct stat64 { + unsigned long long st_dev; + unsigned char __pad1[2]; + +#define STAT64_HAS_BROKEN_ST_INO 1 + unsigned long __st_ino; + + unsigned int st_mode; + unsigned int st_nlink; + + unsigned long st_uid; + unsigned long st_gid; + + unsigned long long st_rdev; + unsigned char __pad3[2]; + + long long st_size; + unsigned long st_blksize; + + unsigned long __pad4; /* future possible st_blocks high bits */ + unsigned long st_blocks; /* Number 512-byte blocks allocated. */ + + unsigned long st_atime; + unsigned long st_atime_nsec; + + unsigned long st_mtime; + unsigned long st_mtime_nsec; + + unsigned long st_ctime; + unsigned long st_ctime_nsec; + + unsigned long long st_ino; +}; + +#endif /* _H8300_STAT_H */ diff --git a/arch/h8300/include/asm/statfs.h b/arch/h8300/include/asm/statfs.h new file mode 100644 index 00000000000..b96efa712aa --- /dev/null +++ b/arch/h8300/include/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _H8300_STATFS_H +#define _H8300_STATFS_H + +#include + +#endif /* _H8300_STATFS_H */ diff --git a/arch/h8300/include/asm/string.h b/arch/h8300/include/asm/string.h new file mode 100644 index 00000000000..ca5034897d8 --- /dev/null +++ b/arch/h8300/include/asm/string.h @@ -0,0 +1,44 @@ +#ifndef _H8300_STRING_H_ +#define _H8300_STRING_H_ + +#ifdef __KERNEL__ /* only set these up for kernel code */ + +#include +#include + +#define __HAVE_ARCH_MEMSET +extern void * memset(void * s, int c, size_t count); + +#define __HAVE_ARCH_MEMCPY +extern void * memcpy(void *d, const void *s, size_t count); + +#else /* KERNEL */ + +/* + * let user libraries deal with these, + * IMHO the kernel has no place defining these functions for user apps + */ + +#define __HAVE_ARCH_STRCPY 1 +#define __HAVE_ARCH_STRNCPY 1 +#define __HAVE_ARCH_STRCAT 1 +#define __HAVE_ARCH_STRNCAT 1 +#define __HAVE_ARCH_STRCMP 1 +#define __HAVE_ARCH_STRNCMP 1 +#define __HAVE_ARCH_STRNICMP 1 +#define __HAVE_ARCH_STRCHR 1 +#define __HAVE_ARCH_STRRCHR 1 +#define __HAVE_ARCH_STRSTR 1 +#define __HAVE_ARCH_STRLEN 1 +#define __HAVE_ARCH_STRNLEN 1 +#define __HAVE_ARCH_MEMSET 1 +#define __HAVE_ARCH_MEMCPY 1 +#define __HAVE_ARCH_MEMMOVE 1 +#define __HAVE_ARCH_MEMSCAN 1 +#define __HAVE_ARCH_MEMCMP 1 +#define __HAVE_ARCH_MEMCHR 1 +#define __HAVE_ARCH_STRTOK 1 + +#endif /* KERNEL */ + +#endif /* _M68K_STRING_H_ */ diff --git a/arch/h8300/include/asm/system.h b/arch/h8300/include/asm/system.h new file mode 100644 index 00000000000..4b8e475908a --- /dev/null +++ b/arch/h8300/include/asm/system.h @@ -0,0 +1,158 @@ +#ifndef _H8300_SYSTEM_H +#define _H8300_SYSTEM_H + +#include + +/* + * switch_to(n) should switch tasks to task ptr, first checking that + * ptr isn't the current task, in which case it does nothing. This + * also clears the TS-flag if the task we switched to has used the + * math co-processor latest. + */ +/* + * switch_to() saves the extra registers, that are not saved + * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and + * a0-a1. Some of these are used by schedule() and its predecessors + * and so we might get see unexpected behaviors when a task returns + * with unexpected register values. + * + * syscall stores these registers itself and none of them are used + * by syscall after the function in the syscall has been called. + * + * Beware that resume now expects *next to be in d1 and the offset of + * tss to be in a1. This saves a few instructions as we no longer have + * to push them onto the stack and read them back right after. + * + * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) + * + * Changed 96/09/19 by Andreas Schwab + * pass prev in a0, next in a1, offset of tss in d1, and whether + * the mm structures are shared in d2 (to avoid atc flushing). + * + * H8/300 Porting 2002/09/04 Yoshinori Sato + */ + +asmlinkage void resume(void); +#define switch_to(prev,next,last) { \ + void *_last; \ + __asm__ __volatile__( \ + "mov.l %1, er0\n\t" \ + "mov.l %2, er1\n\t" \ + "mov.l %3, er2\n\t" \ + "jsr @_resume\n\t" \ + "mov.l er2,%0\n\t" \ + : "=r" (_last) \ + : "r" (&(prev->thread)), \ + "r" (&(next->thread)), \ + "g" (prev) \ + : "cc", "er0", "er1", "er2", "er3"); \ + (last) = _last; \ +} + +#define __sti() asm volatile ("andc #0x7f,ccr") +#define __cli() asm volatile ("orc #0x80,ccr") + +#define __save_flags(x) \ + asm volatile ("stc ccr,%w0":"=r" (x)) + +#define __restore_flags(x) \ + asm volatile ("ldc %w0,ccr": :"r" (x)) + +#define irqs_disabled() \ +({ \ + unsigned char flags; \ + __save_flags(flags); \ + ((flags & 0x80) == 0x80); \ +}) + +#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") + +/* For spinlocks etc */ +#define local_irq_disable() __cli() +#define local_irq_enable() __sti() +#define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); }) +#define local_irq_restore(x) __restore_flags(x) +#define local_save_flags(x) __save_flags(x) + +/* + * Force strict CPU ordering. + * Not really required on H8... + */ +#define nop() asm volatile ("nop"::) +#define mb() asm volatile ("" : : :"memory") +#define rmb() asm volatile ("" : : :"memory") +#define wmb() asm volatile ("" : : :"memory") +#define set_mb(var, value) do { xchg(&var, value); } while (0) + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while(0) +#endif + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +struct __xchg_dummy { unsigned long a[100]; }; +#define __xg(x) ((volatile struct __xchg_dummy *)(x)) + +static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +{ + unsigned long tmp, flags; + + local_irq_save(flags); + + switch (size) { + case 1: + __asm__ __volatile__ + ("mov.b %2,%0\n\t" + "mov.b %1,%2" + : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); + break; + case 2: + __asm__ __volatile__ + ("mov.w %2,%0\n\t" + "mov.w %1,%2" + : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); + break; + case 4: + __asm__ __volatile__ + ("mov.l %2,%0\n\t" + "mov.l %1,%2" + : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); + break; + default: + tmp = 0; + } + local_irq_restore(flags); + return tmp; +} + +#define HARD_RESET_NOW() ({ \ + local_irq_disable(); \ + asm("jmp @@0"); \ +}) + +#include + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#ifndef CONFIG_SMP +#include +#endif + +#define arch_align_stack(x) (x) + +#endif /* _H8300_SYSTEM_H */ diff --git a/arch/h8300/include/asm/target_time.h b/arch/h8300/include/asm/target_time.h new file mode 100644 index 00000000000..9f2a9aa1fe6 --- /dev/null +++ b/arch/h8300/include/asm/target_time.h @@ -0,0 +1,4 @@ +extern int platform_timer_setup(void (*timer_int)(int, void *, struct pt_regs *)); +extern void platform_timer_eoi(void); +extern void platform_gettod(unsigned int *year, unsigned int *mon, unsigned int *day, + unsigned int *hour, unsigned int *min, unsigned int *sec); diff --git a/arch/h8300/include/asm/termbits.h b/arch/h8300/include/asm/termbits.h new file mode 100644 index 00000000000..31eca81db3f --- /dev/null +++ b/arch/h8300/include/asm/termbits.h @@ -0,0 +1,200 @@ +#ifndef __ARCH_H8300_TERMBITS_H__ +#define __ARCH_H8300_TERMBITS_H__ + +#include + +typedef unsigned char cc_t; +typedef unsigned int speed_t; +typedef unsigned int tcflag_t; + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ +}; + +struct termios2 { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define VINTR 0 +#define VQUIT 1 +#define VERASE 2 +#define VKILL 3 +#define VEOF 4 +#define VTIME 5 +#define VMIN 6 +#define VSWTC 7 +#define VSTART 8 +#define VSTOP 9 +#define VSUSP 10 +#define VEOL 11 +#define VREPRINT 12 +#define VDISCARD 13 +#define VWERASE 14 +#define VLNEXT 15 +#define VEOL2 16 + + +/* c_iflag bits */ +#define IGNBRK 0000001 +#define BRKINT 0000002 +#define IGNPAR 0000004 +#define PARMRK 0000010 +#define INPCK 0000020 +#define ISTRIP 0000040 +#define INLCR 0000100 +#define IGNCR 0000200 +#define ICRNL 0000400 +#define IUCLC 0001000 +#define IXON 0002000 +#define IXANY 0004000 +#define IXOFF 0010000 +#define IMAXBEL 0020000 +#define IUTF8 0040000 + +/* c_oflag bits */ +#define OPOST 0000001 +#define OLCUC 0000002 +#define ONLCR 0000004 +#define OCRNL 0000010 +#define ONOCR 0000020 +#define ONLRET 0000040 +#define OFILL 0000100 +#define OFDEL 0000200 +#define NLDLY 0000400 +#define NL0 0000000 +#define NL1 0000400 +#define CRDLY 0003000 +#define CR0 0000000 +#define CR1 0001000 +#define CR2 0002000 +#define CR3 0003000 +#define TABDLY 0014000 +#define TAB0 0000000 +#define TAB1 0004000 +#define TAB2 0010000 +#define TAB3 0014000 +#define XTABS 0014000 +#define BSDLY 0020000 +#define BS0 0000000 +#define BS1 0020000 +#define VTDLY 0040000 +#define VT0 0000000 +#define VT1 0040000 +#define FFDLY 0100000 +#define FF0 0000000 +#define FF1 0100000 + +/* c_cflag bit meaning */ +#define CBAUD 0010017 +#define B0 0000000 /* hang up */ +#define B50 0000001 +#define B75 0000002 +#define B110 0000003 +#define B134 0000004 +#define B150 0000005 +#define B200 0000006 +#define B300 0000007 +#define B600 0000010 +#define B1200 0000011 +#define B1800 0000012 +#define B2400 0000013 +#define B4800 0000014 +#define B9600 0000015 +#define B19200 0000016 +#define B38400 0000017 +#define EXTA B19200 +#define EXTB B38400 +#define CSIZE 0000060 +#define CS5 0000000 +#define CS6 0000020 +#define CS7 0000040 +#define CS8 0000060 +#define CSTOPB 0000100 +#define CREAD 0000200 +#define PARENB 0000400 +#define PARODD 0001000 +#define HUPCL 0002000 +#define CLOCAL 0004000 +#define CBAUDEX 0010000 +#define BOTHER 0010000 +#define B57600 0010001 +#define B115200 0010002 +#define B230400 0010003 +#define B460800 0010004 +#define B500000 0010005 +#define B576000 0010006 +#define B921600 0010007 +#define B1000000 0010010 +#define B1152000 0010011 +#define B1500000 0010012 +#define B2000000 0010013 +#define B2500000 0010014 +#define B3000000 0010015 +#define B3500000 0010016 +#define B4000000 0010017 +#define CIBAUD 002003600000 /* input baud rate */ +#define CMSPAR 010000000000 /* mark or space (stick) parity */ +#define CRTSCTS 020000000000 /* flow control */ + +#define IBSHIFT 16 /* shift from CBAUD to CIBAUD */ + +/* c_lflag bits */ +#define ISIG 0000001 +#define ICANON 0000002 +#define XCASE 0000004 +#define ECHO 0000010 +#define ECHOE 0000020 +#define ECHOK 0000040 +#define ECHONL 0000100 +#define NOFLSH 0000200 +#define TOSTOP 0000400 +#define ECHOCTL 0001000 +#define ECHOPRT 0002000 +#define ECHOKE 0004000 +#define FLUSHO 0010000 +#define PENDIN 0040000 +#define IEXTEN 0100000 + + +/* tcflow() and TCXONC use these */ +#define TCOOFF 0 +#define TCOON 1 +#define TCIOFF 2 +#define TCION 3 + +/* tcflush() and TCFLSH use these */ +#define TCIFLUSH 0 +#define TCOFLUSH 1 +#define TCIOFLUSH 2 + +/* tcsetattr uses these */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* __ARCH_H8300_TERMBITS_H__ */ diff --git a/arch/h8300/include/asm/termios.h b/arch/h8300/include/asm/termios.h new file mode 100644 index 00000000000..70eea64b421 --- /dev/null +++ b/arch/h8300/include/asm/termios.h @@ -0,0 +1,92 @@ +#ifndef _H8300_TERMIOS_H +#define _H8300_TERMIOS_H + +#include +#include + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +#ifdef __KERNEL__ +/* intr=^C quit=^| erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" +#endif + +/* modem lines */ +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ + +#ifdef __KERNEL__ + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +#define user_termio_to_kernel_termios(termios, termio) \ +({ \ + unsigned short tmp; \ + get_user(tmp, &(termio)->c_iflag); \ + (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \ + get_user(tmp, &(termio)->c_oflag); \ + (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \ + get_user(tmp, &(termio)->c_cflag); \ + (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \ + get_user(tmp, &(termio)->c_lflag); \ + (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \ + get_user((termios)->c_line, &(termio)->c_line); \ + copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ +}) + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +#define kernel_termios_to_user_termio(termio, termios) \ +({ \ + put_user((termios)->c_iflag, &(termio)->c_iflag); \ + put_user((termios)->c_oflag, &(termio)->c_oflag); \ + put_user((termios)->c_cflag, &(termio)->c_cflag); \ + put_user((termios)->c_lflag, &(termio)->c_lflag); \ + put_user((termios)->c_line, &(termio)->c_line); \ + copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ +}) + +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) +#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __KERNEL__ */ + +#endif /* _H8300_TERMIOS_H */ diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h new file mode 100644 index 00000000000..aafd4d322ec --- /dev/null +++ b/arch/h8300/include/asm/thread_info.h @@ -0,0 +1,104 @@ +/* thread_info.h: h8300 low-level thread information + * adapted from the i386 and PPC versions by Yoshinori Sato + * + * Copyright (C) 2002 David Howells (dhowells@redhat.com) + * - Incorporating suggestions made by Linus Torvalds and Dave Miller + */ + +#ifndef _ASM_THREAD_INFO_H +#define _ASM_THREAD_INFO_H + +#include + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +/* + * low level task data. + * If you change this, change the TI_* offsets below to match. + */ +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned long flags; /* low level flags */ + int cpu; /* cpu we're on */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ + struct restart_block restart_block; +}; + +/* + * macros/functions for gaining access to the thread information structure + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = 1, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + + +/* + * Size of kernel stack for each process. This must be a power of 2... + */ +#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE 8192 /* 2 pages */ + + +/* how to get the thread information struct from C */ +static inline struct thread_info *current_thread_info(void) +{ + struct thread_info *ti; + __asm__( + "mov.l sp, %0 \n\t" + "and.l %1, %0" + : "=&r"(ti) + : "i" (~(THREAD_SIZE-1)) + ); + return ti; +} + +#endif /* __ASSEMBLY__ */ + +/* + * Offsets in thread_info structure, used in assembly code + */ +#define TI_TASK 0 +#define TI_EXECDOMAIN 4 +#define TI_FLAGS 8 +#define TI_CPU 12 +#define TI_PRE_COUNT 16 + +#define PREEMPT_ACTIVE 0x4000000 + +/* + * thread information flag bit numbers + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling + TIF_NEED_RESCHED */ +#define TIF_MEMDIE 4 +#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ + +/* as above, but as bit values */ +#define _TIF_SYSCALL_TRACE (1< + +#endif + +#endif diff --git a/arch/h8300/include/asm/tlbflush.h b/arch/h8300/include/asm/tlbflush.h new file mode 100644 index 00000000000..41c148a9208 --- /dev/null +++ b/arch/h8300/include/asm/tlbflush.h @@ -0,0 +1,55 @@ +#ifndef _H8300_TLBFLUSH_H +#define _H8300_TLBFLUSH_H + +/* + * Copyright (C) 2000 Lineo, David McCullough + * Copyright (C) 2000-2002, Greg Ungerer + */ + +#include + +/* + * flush all user-space atc entries. + */ +static inline void __flush_tlb(void) +{ + BUG(); +} + +static inline void __flush_tlb_one(unsigned long addr) +{ + BUG(); +} + +#define flush_tlb() __flush_tlb() + +/* + * flush all atc entries (both kernel and user-space entries). + */ +static inline void flush_tlb_all(void) +{ + BUG(); +} + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + BUG(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + BUG(); +} + +static inline void flush_tlb_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + BUG(); +} + +static inline void flush_tlb_kernel_page(unsigned long addr) +{ + BUG(); +} + +#endif /* _H8300_TLBFLUSH_H */ diff --git a/arch/h8300/include/asm/topology.h b/arch/h8300/include/asm/topology.h new file mode 100644 index 00000000000..fdc121924d4 --- /dev/null +++ b/arch/h8300/include/asm/topology.h @@ -0,0 +1,6 @@ +#ifndef _ASM_H8300_TOPOLOGY_H +#define _ASM_H8300_TOPOLOGY_H + +#include + +#endif /* _ASM_H8300_TOPOLOGY_H */ diff --git a/arch/h8300/include/asm/traps.h b/arch/h8300/include/asm/traps.h new file mode 100644 index 00000000000..41cf6be02f6 --- /dev/null +++ b/arch/h8300/include/asm/traps.h @@ -0,0 +1,37 @@ +/* + * linux/include/asm-h8300/traps.h + * + * Copyright (C) 2003 Yoshinori Sato + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#ifndef _H8300_TRAPS_H +#define _H8300_TRAPS_H + +extern void system_call(void); +extern void interrupt_entry(void); +extern void trace_break(void); + +#define JMP_OP 0x5a000000 +#define JSR_OP 0x5e000000 +#define VECTOR(address) ((JMP_OP)|((unsigned long)address)) +#define REDIRECT(address) ((JSR_OP)|((unsigned long)address)) + +#define TRACE_VEC 5 + +#define TRAP0_VEC 8 +#define TRAP1_VEC 9 +#define TRAP2_VEC 10 +#define TRAP3_VEC 11 + +#if defined(__H8300H__) +#define NR_TRAPS 12 +#endif +#if defined(__H8300S__) +#define NR_TRAPS 16 +#endif + +#endif /* _H8300_TRAPS_H */ diff --git a/arch/h8300/include/asm/types.h b/arch/h8300/include/asm/types.h new file mode 100644 index 00000000000..12875190b15 --- /dev/null +++ b/arch/h8300/include/asm/types.h @@ -0,0 +1,33 @@ +#ifndef _H8300_TYPES_H +#define _H8300_TYPES_H + +#include + +#if !defined(__ASSEMBLY__) + +/* + * This file is never included by application software unless + * explicitly requested (e.g., via linux/types.h) in which case the + * application is Linux specific so (user-) name space pollution is + * not a major issue. However, for interoperability, libraries still + * need to be careful to avoid a name clashes. + */ + +typedef unsigned short umode_t; + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +#define BITS_PER_LONG 32 + +/* Dma addresses are 32-bits wide. */ + +typedef u32 dma_addr_t; + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _H8300_TYPES_H */ diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h new file mode 100644 index 00000000000..356068cd087 --- /dev/null +++ b/arch/h8300/include/asm/uaccess.h @@ -0,0 +1,162 @@ +#ifndef __H8300_UACCESS_H +#define __H8300_UACCESS_H + +/* + * User space memory access functions + */ +#include +#include +#include + +#include + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* We let the MMU do all checking */ +#define access_ok(type, addr, size) __access_ok((unsigned long)addr,size) +static inline int __access_ok(unsigned long addr, unsigned long size) +{ +#define RANGE_CHECK_OK(addr, size, lower, upper) \ + (((addr) >= (lower)) && (((addr) + (size)) < (upper))) + + extern unsigned long _ramend; + return(RANGE_CHECK_OK(addr, size, 0L, (unsigned long)&_ramend)); +} + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +/* Returns 0 if exception not found and fixup otherwise. */ +extern unsigned long search_exception_table(unsigned long); + + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + */ + +#define put_user(x, ptr) \ +({ \ + int __pu_err = 0; \ + typeof(*(ptr)) __pu_val = (x); \ + switch (sizeof (*(ptr))) { \ + case 1: \ + case 2: \ + case 4: \ + *(ptr) = (__pu_val); \ + break; \ + case 8: \ + memcpy(ptr, &__pu_val, sizeof (*(ptr))); \ + break; \ + default: \ + __pu_err = __put_user_bad(); \ + break; \ + } \ + __pu_err; \ +}) +#define __put_user(x, ptr) put_user(x, ptr) + +extern int __put_user_bad(void); + +/* + * Tell gcc we read from memory instead of writing: this is because + * we do not write to any memory gcc knows about, so there are no + * aliasing issues. + */ + +#define __ptr(x) ((unsigned long *)(x)) + +/* + * Tell gcc we read from memory instead of writing: this is because + * we do not write to any memory gcc knows about, so there are no + * aliasing issues. + */ + +#define get_user(x, ptr) \ +({ \ + int __gu_err = 0; \ + typeof(*(ptr)) __gu_val = *ptr; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + case 2: \ + case 4: \ + case 8: \ + break; \ + default: \ + __gu_err = __get_user_bad(); \ + __gu_val = 0; \ + break; \ + } \ + (x) = __gu_val; \ + __gu_err; \ +}) +#define __get_user(x, ptr) get_user(x, ptr) + +extern int __get_user_bad(void); + +#define copy_from_user(to, from, n) (memcpy(to, from, n), 0) +#define copy_to_user(to, from, n) (memcpy(to, from, n), 0) + +#define __copy_from_user(to, from, n) copy_from_user(to, from, n) +#define __copy_to_user(to, from, n) copy_to_user(to, from, n) +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + +#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; }) + +#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; }) + +/* + * Copy a null terminated string from userspace. + */ + +static inline long +strncpy_from_user(char *dst, const char *src, long count) +{ + char *tmp; + strncpy(dst, src, count); + for (tmp = dst; *tmp && count > 0; tmp++, count--) + ; + return(tmp - dst); /* DAVIDM should we count a NUL ? check getname */ +} + +/* + * Return the size of a string (including the ending 0) + * + * Return 0 on exception, a value greater than N if too long + */ +static inline long strnlen_user(const char *src, long n) +{ + return(strlen(src) + 1); /* DAVIDM make safer */ +} + +#define strlen_user(str) strnlen_user(str, 32767) + +/* + * Zero Userspace + */ + +static inline unsigned long +clear_user(void *to, unsigned long n) +{ + memset(to, 0, n); + return 0; +} + +#endif /* _H8300_UACCESS_H */ diff --git a/arch/h8300/include/asm/ucontext.h b/arch/h8300/include/asm/ucontext.h new file mode 100644 index 00000000000..0bcf8f85fab --- /dev/null +++ b/arch/h8300/include/asm/ucontext.h @@ -0,0 +1,12 @@ +#ifndef _H8300_UCONTEXT_H +#define _H8300_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif diff --git a/arch/h8300/include/asm/unaligned.h b/arch/h8300/include/asm/unaligned.h new file mode 100644 index 00000000000..b8d06c70c2d --- /dev/null +++ b/arch/h8300/include/asm/unaligned.h @@ -0,0 +1,11 @@ +#ifndef _ASM_H8300_UNALIGNED_H +#define _ASM_H8300_UNALIGNED_H + +#include +#include +#include + +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be + +#endif /* _ASM_H8300_UNALIGNED_H */ diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h new file mode 100644 index 00000000000..99f3c3561ec --- /dev/null +++ b/arch/h8300/include/asm/unistd.h @@ -0,0 +1,364 @@ +#ifndef _ASM_H8300_UNISTD_H_ +#define _ASM_H8300_UNISTD_H_ + +/* + * This file contains the system call numbers. + */ + +#define __NR_restart_syscall 0 +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_open 5 +#define __NR_close 6 +#define __NR_waitpid 7 +#define __NR_creat 8 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_execve 11 +#define __NR_chdir 12 +#define __NR_time 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_lchown 16 +#define __NR_break 17 +#define __NR_oldstat 18 +#define __NR_lseek 19 +#define __NR_getpid 20 +#define __NR_mount 21 +#define __NR_umount 22 +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_ptrace 26 +#define __NR_alarm 27 +#define __NR_oldfstat 28 +#define __NR_pause 29 +#define __NR_utime 30 +#define __NR_stty 31 +#define __NR_gtty 32 +#define __NR_access 33 +#define __NR_nice 34 +#define __NR_ftime 35 +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_rename 38 +#define __NR_mkdir 39 +#define __NR_rmdir 40 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_times 43 +#define __NR_prof 44 +#define __NR_brk 45 +#define __NR_setgid 46 +#define __NR_getgid 47 +#define __NR_signal 48 +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_acct 51 +#define __NR_umount2 52 +#define __NR_lock 53 +#define __NR_ioctl 54 +#define __NR_fcntl 55 +#define __NR_mpx 56 +#define __NR_setpgid 57 +#define __NR_ulimit 58 +#define __NR_oldolduname 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_ustat 62 +#define __NR_dup2 63 +#define __NR_getppid 64 +#define __NR_getpgrp 65 +#define __NR_setsid 66 +#define __NR_sigaction 67 +#define __NR_sgetmask 68 +#define __NR_ssetmask 69 +#define __NR_setreuid 70 +#define __NR_setregid 71 +#define __NR_sigsuspend 72 +#define __NR_sigpending 73 +#define __NR_sethostname 74 +#define __NR_setrlimit 75 +#define __NR_getrlimit 76 +#define __NR_getrusage 77 +#define __NR_gettimeofday 78 +#define __NR_settimeofday 79 +#define __NR_getgroups 80 +#define __NR_setgroups 81 +#define __NR_select 82 +#define __NR_symlink 83 +#define __NR_oldlstat 84 +#define __NR_readlink 85 +#define __NR_uselib 86 +#define __NR_swapon 87 +#define __NR_reboot 88 +#define __NR_readdir 89 +#define __NR_mmap 90 +#define __NR_munmap 91 +#define __NR_truncate 92 +#define __NR_ftruncate 93 +#define __NR_fchmod 94 +#define __NR_fchown 95 +#define __NR_getpriority 96 +#define __NR_setpriority 97 +#define __NR_profil 98 +#define __NR_statfs 99 +#define __NR_fstatfs 100 +#define __NR_ioperm 101 +#define __NR_socketcall 102 +#define __NR_syslog 103 +#define __NR_setitimer 104 +#define __NR_getitimer 105 +#define __NR_stat 106 +#define __NR_lstat 107 +#define __NR_fstat 108 +#define __NR_olduname 109 +#define __NR_iopl 110 +#define __NR_vhangup 111 +#define __NR_idle 112 +#define __NR_vm86old 113 +#define __NR_wait4 114 +#define __NR_swapoff 115 +#define __NR_sysinfo 116 +#define __NR_ipc 117 +#define __NR_fsync 118 +#define __NR_sigreturn 119 +#define __NR_clone 120 +#define __NR_setdomainname 121 +#define __NR_uname 122 +#define __NR_modify_ldt 123 +#define __NR_adjtimex 124 +#define __NR_mprotect 125 +#define __NR_sigprocmask 126 +#define __NR_create_module 127 +#define __NR_init_module 128 +#define __NR_delete_module 129 +#define __NR_get_kernel_syms 130 +#define __NR_quotactl 131 +#define __NR_getpgid 132 +#define __NR_fchdir 133 +#define __NR_bdflush 134 +#define __NR_sysfs 135 +#define __NR_personality 136 +#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR_getdents 141 +#define __NR__newselect 142 +#define __NR_flock 143 +#define __NR_msync 144 +#define __NR_readv 145 +#define __NR_writev 146 +#define __NR_getsid 147 +#define __NR_fdatasync 148 +#define __NR__sysctl 149 +#define __NR_mlock 150 +#define __NR_munlock 151 +#define __NR_mlockall 152 +#define __NR_munlockall 153 +#define __NR_sched_setparam 154 +#define __NR_sched_getparam 155 +#define __NR_sched_setscheduler 156 +#define __NR_sched_getscheduler 157 +#define __NR_sched_yield 158 +#define __NR_sched_get_priority_max 159 +#define __NR_sched_get_priority_min 160 +#define __NR_sched_rr_get_interval 161 +#define __NR_nanosleep 162 +#define __NR_mremap 163 +#define __NR_setresuid 164 +#define __NR_getresuid 165 +#define __NR_vm86 166 +#define __NR_query_module 167 +#define __NR_poll 168 +#define __NR_nfsservctl 169 +#define __NR_setresgid 170 +#define __NR_getresgid 171 +#define __NR_prctl 172 +#define __NR_rt_sigreturn 173 +#define __NR_rt_sigaction 174 +#define __NR_rt_sigprocmask 175 +#define __NR_rt_sigpending 176 +#define __NR_rt_sigtimedwait 177 +#define __NR_rt_sigqueueinfo 178 +#define __NR_rt_sigsuspend 179 +#define __NR_pread64 180 +#define __NR_pwrite64 181 +#define __NR_chown 182 +#define __NR_getcwd 183 +#define __NR_capget 184 +#define __NR_capset 185 +#define __NR_sigaltstack 186 +#define __NR_sendfile 187 +#define __NR_getpmsg 188 /* some people actually want streams */ +#define __NR_putpmsg 189 /* some people actually want streams */ +#define __NR_vfork 190 +#define __NR_ugetrlimit 191 +#define __NR_mmap2 192 +#define __NR_truncate64 193 +#define __NR_ftruncate64 194 +#define __NR_stat64 195 +#define __NR_lstat64 196 +#define __NR_fstat64 197 +#define __NR_lchown32 198 +#define __NR_getuid32 199 +#define __NR_getgid32 200 +#define __NR_geteuid32 201 +#define __NR_getegid32 202 +#define __NR_setreuid32 203 +#define __NR_setregid32 204 +#define __NR_getgroups32 205 +#define __NR_setgroups32 206 +#define __NR_fchown32 207 +#define __NR_setresuid32 208 +#define __NR_getresuid32 209 +#define __NR_setresgid32 210 +#define __NR_getresgid32 211 +#define __NR_chown32 212 +#define __NR_setuid32 213 +#define __NR_setgid32 214 +#define __NR_setfsuid32 215 +#define __NR_setfsgid32 216 +#define __NR_pivot_root 217 +#define __NR_mincore 218 +#define __NR_madvise 219 +#define __NR_madvise1 219 +#define __NR_getdents64 220 +#define __NR_fcntl64 221 +/* 223 is unused */ +#define __NR_gettid 224 +#define __NR_readahead 225 +#define __NR_setxattr 226 +#define __NR_lsetxattr 227 +#define __NR_fsetxattr 228 +#define __NR_getxattr 229 +#define __NR_lgetxattr 230 +#define __NR_fgetxattr 231 +#define __NR_listxattr 232 +#define __NR_llistxattr 233 +#define __NR_flistxattr 234 +#define __NR_removexattr 235 +#define __NR_lremovexattr 236 +#define __NR_fremovexattr 237 +#define __NR_tkill 238 +#define __NR_sendfile64 239 +#define __NR_futex 240 +#define __NR_sched_setaffinity 241 +#define __NR_sched_getaffinity 242 +#define __NR_set_thread_area 243 +#define __NR_get_thread_area 244 +#define __NR_io_setup 245 +#define __NR_io_destroy 246 +#define __NR_io_getevents 247 +#define __NR_io_submit 248 +#define __NR_io_cancel 249 +#define __NR_fadvise64 250 +/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ +#define __NR_exit_group 252 +#define __NR_lookup_dcookie 253 +#define __NR_epoll_create 254 +#define __NR_epoll_ctl 255 +#define __NR_epoll_wait 256 +#define __NR_remap_file_pages 257 +#define __NR_set_tid_address 258 +#define __NR_timer_create 259 +#define __NR_timer_settime (__NR_timer_create+1) +#define __NR_timer_gettime (__NR_timer_create+2) +#define __NR_timer_getoverrun (__NR_timer_create+3) +#define __NR_timer_delete (__NR_timer_create+4) +#define __NR_clock_settime (__NR_timer_create+5) +#define __NR_clock_gettime (__NR_timer_create+6) +#define __NR_clock_getres (__NR_timer_create+7) +#define __NR_clock_nanosleep (__NR_timer_create+8) +#define __NR_statfs64 268 +#define __NR_fstatfs64 269 +#define __NR_tgkill 270 +#define __NR_utimes 271 +#define __NR_fadvise64_64 272 +#define __NR_vserver 273 +#define __NR_mbind 274 +#define __NR_get_mempolicy 275 +#define __NR_set_mempolicy 276 +#define __NR_mq_open 277 +#define __NR_mq_unlink (__NR_mq_open+1) +#define __NR_mq_timedsend (__NR_mq_open+2) +#define __NR_mq_timedreceive (__NR_mq_open+3) +#define __NR_mq_notify (__NR_mq_open+4) +#define __NR_mq_getsetattr (__NR_mq_open+5) +#define __NR_kexec_load 283 +#define __NR_waitid 284 +/* #define __NR_sys_setaltroot 285 */ +#define __NR_add_key 286 +#define __NR_request_key 287 +#define __NR_keyctl 288 +#define __NR_ioprio_set 289 +#define __NR_ioprio_get 290 +#define __NR_inotify_init 291 +#define __NR_inotify_add_watch 292 +#define __NR_inotify_rm_watch 293 +#define __NR_migrate_pages 294 +#define __NR_openat 295 +#define __NR_mkdirat 296 +#define __NR_mknodat 297 +#define __NR_fchownat 298 +#define __NR_futimesat 299 +#define __NR_fstatat64 300 +#define __NR_unlinkat 301 +#define __NR_renameat 302 +#define __NR_linkat 303 +#define __NR_symlinkat 304 +#define __NR_readlinkat 305 +#define __NR_fchmodat 306 +#define __NR_faccessat 307 +#define __NR_pselect6 308 +#define __NR_ppoll 309 +#define __NR_unshare 310 +#define __NR_set_robust_list 311 +#define __NR_get_robust_list 312 +#define __NR_splice 313 +#define __NR_sync_file_range 314 +#define __NR_tee 315 +#define __NR_vmsplice 316 +#define __NR_move_pages 317 +#define __NR_getcpu 318 +#define __NR_epoll_pwait 319 + +#ifdef __KERNEL__ + +#define NR_syscalls 320 + +#define __ARCH_WANT_IPC_PARSE_VERSION +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_OLD_STAT +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_ALARM +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_SGETMASK +#define __ARCH_WANT_SYS_SIGNAL +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_WAITPID +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGACTION + +/* + * "Conditional" syscalls + */ +#define cond_syscall(name) \ + asm (".weak\t_" #name "\n" \ + ".set\t_" #name ",_sys_ni_syscall"); + +#endif /* __KERNEL__ */ +#endif /* _ASM_H8300_UNISTD_H_ */ diff --git a/arch/h8300/include/asm/user.h b/arch/h8300/include/asm/user.h new file mode 100644 index 00000000000..14a9e18950f --- /dev/null +++ b/arch/h8300/include/asm/user.h @@ -0,0 +1,75 @@ +#ifndef _H8300_USER_H +#define _H8300_USER_H + +#include + +/* Core file format: The core file is written in such a way that gdb + can understand it and provide useful information to the user (under + linux we use the 'trad-core' bfd). There are quite a number of + obstacles to being able to view the contents of the floating point + registers, and until these are solved you will not be able to view the + contents of them. Actually, you can read in the core file and look at + the contents of the user struct to find out what the floating point + registers contain. + The actual file contents are as follows: + UPAGE: 1 page consisting of a user struct that tells gdb what is present + in the file. Directly after this is a copy of the task_struct, which + is currently not used by gdb, but it may come in useful at some point. + All of the registers are stored as part of the upage. The upage should + always be only one page. + DATA: The data area is stored. We use current->end_text to + current->brk to pick up all of the user variables, plus any memory + that may have been malloced. No attempt is made to determine if a page + is demand-zero or if a page is totally unused, we just cover the entire + range. All of the addresses are rounded in such a way that an integral + number of pages is written. + STACK: We need the stack information in order to get a meaningful + backtrace. We need to write the data from (esp) to + current->start_stack, so we round each of these off in order to be able + to write an integer number of pages. + The minimum core file size is 3 pages, or 12288 bytes. +*/ + +/* This is the old layout of "struct pt_regs" as of Linux 1.x, and + is still the layout used by user (the new pt_regs doesn't have + all registers). */ +struct user_regs_struct { + long er1,er2,er3,er4,er5,er6; + long er0; + long usp; + long orig_er0; + short ccr; + long pc; +}; + + +/* When the kernel dumps core, it starts by dumping the user struct - + this will be used by gdb to figure out where the data and stack segments + are within the file, and what virtual addresses to use. */ +struct user{ +/* We start with the registers, to mimic the way that "memory" is returned + from the ptrace(3,...) function. */ + struct user_regs_struct regs; /* Where the registers are actually stored */ +/* ptrace does not yet supply these. Someday.... */ +/* The rest of this junk is to help gdb figure out what goes where */ + unsigned long int u_tsize; /* Text segment size (pages). */ + unsigned long int u_dsize; /* Data segment size (pages). */ + unsigned long int u_ssize; /* Stack segment size (pages). */ + unsigned long start_code; /* Starting virtual address of text. */ + unsigned long start_stack; /* Starting virtual address of stack area. + This is actually the bottom of the stack, + the top of the stack is always found in the + esp register. */ + long int signal; /* Signal that caused the core dump. */ + int reserved; /* No longer used */ + unsigned long u_ar0; /* Used by gdb to help find the values for */ + /* the registers. */ + unsigned long magic; /* To uniquely identify a core file */ + char u_comm[32]; /* User command that was responsible */ +}; +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif diff --git a/arch/h8300/include/asm/virtconvert.h b/arch/h8300/include/asm/virtconvert.h new file mode 100644 index 00000000000..19cfd62b11c --- /dev/null +++ b/arch/h8300/include/asm/virtconvert.h @@ -0,0 +1,20 @@ +#ifndef __H8300_VIRT_CONVERT__ +#define __H8300_VIRT_CONVERT__ + +/* + * Macros used for converting between virtual and physical mappings. + */ + +#ifdef __KERNEL__ + +#include +#include + +#define phys_to_virt(vaddr) ((void *) (vaddr)) +#define virt_to_phys(vaddr) ((unsigned long) (vaddr)) + +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + +#endif +#endif diff --git a/include/asm-h8300/Kbuild b/include/asm-h8300/Kbuild deleted file mode 100644 index c68e1680da0..00000000000 --- a/include/asm-h8300/Kbuild +++ /dev/null @@ -1 +0,0 @@ -include include/asm-generic/Kbuild.asm diff --git a/include/asm-h8300/a.out.h b/include/asm-h8300/a.out.h deleted file mode 100644 index ded780f0a49..00000000000 --- a/include/asm-h8300/a.out.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __H8300_A_OUT_H__ -#define __H8300_A_OUT_H__ - -struct exec -{ - unsigned long a_info; /* Use macros N_MAGIC, etc for access */ - unsigned a_text; /* length of text, in bytes */ - unsigned a_data; /* length of data, in bytes */ - unsigned a_bss; /* length of uninitialized data area for file, in bytes */ - unsigned a_syms; /* length of symbol table data in file, in bytes */ - unsigned a_entry; /* start address */ - unsigned a_trsize; /* length of relocation info for text, in bytes */ - unsigned a_drsize; /* length of relocation info for data, in bytes */ -}; - -#define N_TRSIZE(a) ((a).a_trsize) -#define N_DRSIZE(a) ((a).a_drsize) -#define N_SYMSIZE(a) ((a).a_syms) - -#endif /* __H8300_A_OUT_H__ */ diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h deleted file mode 100644 index b4cf0ea97ed..00000000000 --- a/include/asm-h8300/atomic.h +++ /dev/null @@ -1,144 +0,0 @@ -#ifndef __ARCH_H8300_ATOMIC__ -#define __ARCH_H8300_ATOMIC__ - -/* - * Atomic operations that C can't guarantee us. Useful for - * resource counting etc.. - */ - -typedef struct { int counter; } atomic_t; -#define ATOMIC_INIT(i) { (i) } - -#define atomic_read(v) ((v)->counter) -#define atomic_set(v, i) (((v)->counter) = i) - -#include -#include - -static __inline__ int atomic_add_return(int i, atomic_t *v) -{ - int ret,flags; - local_irq_save(flags); - ret = v->counter += i; - local_irq_restore(flags); - return ret; -} - -#define atomic_add(i, v) atomic_add_return(i, v) -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - -static __inline__ int atomic_sub_return(int i, atomic_t *v) -{ - int ret,flags; - local_irq_save(flags); - ret = v->counter -= i; - local_irq_restore(flags); - return ret; -} - -#define atomic_sub(i, v) atomic_sub_return(i, v) -#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0) - -static __inline__ int atomic_inc_return(atomic_t *v) -{ - int ret,flags; - local_irq_save(flags); - v->counter++; - ret = v->counter; - local_irq_restore(flags); - return ret; -} - -#define atomic_inc(v) atomic_inc_return(v) - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -static __inline__ int atomic_dec_return(atomic_t *v) -{ - int ret,flags; - local_irq_save(flags); - --v->counter; - ret = v->counter; - local_irq_restore(flags); - return ret; -} - -#define atomic_dec(v) atomic_dec_return(v) - -static __inline__ int atomic_dec_and_test(atomic_t *v) -{ - int ret,flags; - local_irq_save(flags); - --v->counter; - ret = v->counter; - local_irq_restore(flags); - return ret == 0; -} - -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int ret; - unsigned long flags; - - local_irq_save(flags); - ret = v->counter; - if (likely(ret == old)) - v->counter = new; - local_irq_restore(flags); - return ret; -} - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -static inline int atomic_add_unless(atomic_t *v, int a, int u) -{ - int ret; - unsigned long flags; - - local_irq_save(flags); - ret = v->counter; - if (ret != u) - v->counter += a; - local_irq_restore(flags); - return ret != u; -} -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("stc ccr,r1l\n\t" - "orc #0x80,ccr\n\t" - "mov.l %0,er0\n\t" - "and.l %1,er0\n\t" - "mov.l er0,%0\n\t" - "ldc r1l,ccr" - : "=m" (*v) : "g" (~(mask)) :"er0","er1"); -} - -static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("stc ccr,r1l\n\t" - "orc #0x80,ccr\n\t" - "mov.l %0,er0\n\t" - "or.l %1,er0\n\t" - "mov.l er0,%0\n\t" - "ldc r1l,ccr" - : "=m" (*v) : "g" (mask) :"er0","er1"); -} - -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - -#include -#endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/include/asm-h8300/auxvec.h b/include/asm-h8300/auxvec.h deleted file mode 100644 index 1d36fe38b08..00000000000 --- a/include/asm-h8300/auxvec.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef __ASMH8300_AUXVEC_H -#define __ASMH8300_AUXVEC_H - -#endif diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h deleted file mode 100644 index cb18e3b0aa9..00000000000 --- a/include/asm-h8300/bitops.h +++ /dev/null @@ -1,212 +0,0 @@ -#ifndef _H8300_BITOPS_H -#define _H8300_BITOPS_H - -/* - * Copyright 1992, Linus Torvalds. - * Copyright 2002, Yoshinori Sato - */ - -#include -#include - -#ifdef __KERNEL__ - -#ifndef _LINUX_BITOPS_H -#error only can be included directly -#endif - -/* - * Function prototypes to keep gcc -Wall happy - */ - -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static __inline__ unsigned long ffz(unsigned long word) -{ - unsigned long result; - - result = -1; - __asm__("1:\n\t" - "shlr.l %2\n\t" - "adds #1,%0\n\t" - "bcs 1b" - : "=r" (result) - : "0" (result),"r" (word)); - return result; -} - -#define H8300_GEN_BITOP_CONST(OP,BIT) \ - case BIT: \ - __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \ - break; - -#define H8300_GEN_BITOP(FNAME,OP) \ -static __inline__ void FNAME(int nr, volatile unsigned long* addr) \ -{ \ - volatile unsigned char *b_addr; \ - b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ - if (__builtin_constant_p(nr)) { \ - switch(nr & 7) { \ - H8300_GEN_BITOP_CONST(OP,0) \ - H8300_GEN_BITOP_CONST(OP,1) \ - H8300_GEN_BITOP_CONST(OP,2) \ - H8300_GEN_BITOP_CONST(OP,3) \ - H8300_GEN_BITOP_CONST(OP,4) \ - H8300_GEN_BITOP_CONST(OP,5) \ - H8300_GEN_BITOP_CONST(OP,6) \ - H8300_GEN_BITOP_CONST(OP,7) \ - } \ - } else { \ - __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \ - } \ -} - -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - -H8300_GEN_BITOP(set_bit ,"bset") -H8300_GEN_BITOP(clear_bit ,"bclr") -H8300_GEN_BITOP(change_bit,"bnot") -#define __set_bit(nr,addr) set_bit((nr),(addr)) -#define __clear_bit(nr,addr) clear_bit((nr),(addr)) -#define __change_bit(nr,addr) change_bit((nr),(addr)) - -#undef H8300_GEN_BITOP -#undef H8300_GEN_BITOP_CONST - -static __inline__ int test_bit(int nr, const unsigned long* addr) -{ - return (*((volatile unsigned char *)addr + - ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0; -} - -#define __test_bit(nr, addr) test_bit(nr, addr) - -#define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \ - case BIT: \ - __asm__("stc ccr,%w1\n\t" \ - "orc #0x80,ccr\n\t" \ - "bld #" #BIT ",@%4\n\t" \ - OP " #" #BIT ",@%4\n\t" \ - "rotxl.l %0\n\t" \ - "ldc %w1,ccr" \ - : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ - : "0" (retval),"r" (b_addr) \ - : "memory"); \ - break; - -#define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \ - case BIT: \ - __asm__("bld #" #BIT ",@%3\n\t" \ - OP " #" #BIT ",@%3\n\t" \ - "rotxl.l %0\n\t" \ - : "=r"(retval),"=m"(*b_addr) \ - : "0" (retval),"r" (b_addr) \ - : "memory"); \ - break; - -#define H8300_GEN_TEST_BITOP(FNNAME,OP) \ -static __inline__ int FNNAME(int nr, volatile void * addr) \ -{ \ - int retval = 0; \ - char ccrsave; \ - volatile unsigned char *b_addr; \ - b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ - if (__builtin_constant_p(nr)) { \ - switch(nr & 7) { \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \ - } \ - } else { \ - __asm__("stc ccr,%w1\n\t" \ - "orc #0x80,ccr\n\t" \ - "btst %w5,@%4\n\t" \ - OP " %w5,@%4\n\t" \ - "beq 1f\n\t" \ - "inc.l #1,%0\n" \ - "1:\n\t" \ - "ldc %w1,ccr" \ - : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ - : "0" (retval),"r" (b_addr),"r"(nr) \ - : "memory"); \ - } \ - return retval; \ -} \ - \ -static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \ -{ \ - int retval = 0; \ - volatile unsigned char *b_addr; \ - b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ - if (__builtin_constant_p(nr)) { \ - switch(nr & 7) { \ - H8300_GEN_TEST_BITOP_CONST(OP,0) \ - H8300_GEN_TEST_BITOP_CONST(OP,1) \ - H8300_GEN_TEST_BITOP_CONST(OP,2) \ - H8300_GEN_TEST_BITOP_CONST(OP,3) \ - H8300_GEN_TEST_BITOP_CONST(OP,4) \ - H8300_GEN_TEST_BITOP_CONST(OP,5) \ - H8300_GEN_TEST_BITOP_CONST(OP,6) \ - H8300_GEN_TEST_BITOP_CONST(OP,7) \ - } \ - } else { \ - __asm__("btst %w4,@%3\n\t" \ - OP " %w4,@%3\n\t" \ - "beq 1f\n\t" \ - "inc.l #1,%0\n" \ - "1:" \ - : "=r"(retval),"=m"(*b_addr) \ - : "0" (retval),"r" (b_addr),"r"(nr) \ - : "memory"); \ - } \ - return retval; \ -} - -H8300_GEN_TEST_BITOP(test_and_set_bit, "bset") -H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr") -H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") -#undef H8300_GEN_TEST_BITOP_CONST -#undef H8300_GEN_TEST_BITOP_CONST_INT -#undef H8300_GEN_TEST_BITOP - -#include - -static __inline__ unsigned long __ffs(unsigned long word) -{ - unsigned long result; - - result = -1; - __asm__("1:\n\t" - "shlr.l %2\n\t" - "adds #1,%0\n\t" - "bcc 1b" - : "=r" (result) - : "0"(result),"r"(word)); - return result; -} - -#include -#include -#include -#include -#include -#include -#include - -#endif /* __KERNEL__ */ - -#include -#include - -#endif /* _H8300_BITOPS_H */ diff --git a/include/asm-h8300/bootinfo.h b/include/asm-h8300/bootinfo.h deleted file mode 100644 index 5bed7e7aac0..00000000000 --- a/include/asm-h8300/bootinfo.h +++ /dev/null @@ -1,2 +0,0 @@ - -/* Nothing for h8300 */ diff --git a/include/asm-h8300/bug.h b/include/asm-h8300/bug.h deleted file mode 100644 index edddf5b086e..00000000000 --- a/include/asm-h8300/bug.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _H8300_BUG_H -#define _H8300_BUG_H -#include -#endif diff --git a/include/asm-h8300/bugs.h b/include/asm-h8300/bugs.h deleted file mode 100644 index 1cb4afba6eb..00000000000 --- a/include/asm-h8300/bugs.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * include/asm-h8300/bugs.h - * - * Copyright (C) 1994 Linus Torvalds - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -static void check_bugs(void) -{ -} diff --git a/include/asm-h8300/byteorder.h b/include/asm-h8300/byteorder.h deleted file mode 100644 index 36e597d6161..00000000000 --- a/include/asm-h8300/byteorder.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _H8300_BYTEORDER_H -#define _H8300_BYTEORDER_H - -#include - -#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) -# define __BYTEORDER_HAS_U64__ -# define __SWAB_64_THRU_32__ -#endif - -#include - -#endif /* _H8300_BYTEORDER_H */ diff --git a/include/asm-h8300/cache.h b/include/asm-h8300/cache.h deleted file mode 100644 index c6350283649..00000000000 --- a/include/asm-h8300/cache.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ARCH_H8300_CACHE_H -#define __ARCH_H8300_CACHE_H - -/* bytes per L1 cache line */ -#define L1_CACHE_BYTES 4 - -/* m68k-elf-gcc 2.95.2 doesn't like these */ - -#define __cacheline_aligned -#define ____cacheline_aligned - -#endif diff --git a/include/asm-h8300/cachectl.h b/include/asm-h8300/cachectl.h deleted file mode 100644 index c464022d8e2..00000000000 --- a/include/asm-h8300/cachectl.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _H8300_CACHECTL_H -#define _H8300_CACHECTL_H - -/* Definitions for the cacheflush system call. */ - -#define FLUSH_SCOPE_LINE 0 /* Flush a cache line */ -#define FLUSH_SCOPE_PAGE 0 /* Flush a page */ -#define FLUSH_SCOPE_ALL 0 /* Flush the whole cache -- superuser only */ - -#define FLUSH_CACHE_DATA 0 /* Writeback and flush data cache */ -#define FLUSH_CACHE_INSN 0 /* Flush instruction cache */ -#define FLUSH_CACHE_BOTH 0 /* Flush both caches */ - -#endif /* _H8300_CACHECTL_H */ diff --git a/include/asm-h8300/cacheflush.h b/include/asm-h8300/cacheflush.h deleted file mode 100644 index 5ffdca217b9..00000000000 --- a/include/asm-h8300/cacheflush.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * (C) Copyright 2002, Yoshinori Sato - */ - -#ifndef _ASM_H8300_CACHEFLUSH_H -#define _ASM_H8300_CACHEFLUSH_H - -/* - * Cache handling functions - * No Cache memory all dummy functions - */ - -#define flush_cache_all() -#define flush_cache_mm(mm) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma,a,b) -#define flush_cache_page(vma,p,pfn) -#define flush_dcache_page(page) -#define flush_dcache_mmap_lock(mapping) -#define flush_dcache_mmap_unlock(mapping) -#define flush_icache() -#define flush_icache_page(vma,page) -#define flush_icache_range(start,len) -#define flush_cache_vmap(start, end) -#define flush_cache_vunmap(start, end) -#define cache_push_v(vaddr,len) -#define cache_push(paddr,len) -#define cache_clear(paddr,len) - -#define flush_dcache_range(a,b) - -#define flush_icache_user_range(vma,page,addr,len) - -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) - -#endif /* _ASM_H8300_CACHEFLUSH_H */ diff --git a/include/asm-h8300/checksum.h b/include/asm-h8300/checksum.h deleted file mode 100644 index 98724e12508..00000000000 --- a/include/asm-h8300/checksum.h +++ /dev/null @@ -1,102 +0,0 @@ -#ifndef _H8300_CHECKSUM_H -#define _H8300_CHECKSUM_H - -/* - * computes the checksum of a memory block at buff, length len, - * and adds in "sum" (32-bit) - * - * returns a 32-bit number suitable for feeding into itself - * or csum_tcpudp_magic - * - * this function must be called with even lengths, except - * for the last fragment, which may be odd - * - * it's best to have buff aligned on a 32-bit boundary - */ -__wsum csum_partial(const void *buff, int len, __wsum sum); - -/* - * the same as csum_partial, but copies from src while it - * checksums - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ - -__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); - - -/* - * the same as csum_partial_copy, but copies from user space. - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ - -extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *csum_err); - -__sum16 ip_fast_csum(const void *iph, unsigned int ihl); - - -/* - * Fold a partial checksum - */ - -static inline __sum16 csum_fold(__wsum sum) -{ - __asm__("mov.l %0,er0\n\t" - "add.w e0,r0\n\t" - "xor.w e0,e0\n\t" - "rotxl.w e0\n\t" - "add.w e0,r0\n\t" - "sub.w e0,e0\n\t" - "mov.l er0,%0" - : "=r"(sum) - : "0"(sum) - : "er0"); - return (__force __sum16)~sum; -} - - -/* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 16-bit checksum, already complemented - */ - -static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) -{ - __asm__ ("sub.l er0,er0\n\t" - "add.l %2,%0\n\t" - "addx #0,r0l\n\t" - "add.l %3,%0\n\t" - "addx #0,r0l\n\t" - "add.l %4,%0\n\t" - "addx #0,r0l\n\t" - "add.l er0,%0\n\t" - "bcc 1f\n\t" - "inc.l #1,%0\n" - "1:" - : "=&r" (sum) - : "0" (sum), "r" (daddr), "r" (saddr), "r" (len + proto) - :"er0"); - return sum; -} - -static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) -{ - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); -} - -/* - * this routine is used for miscellaneous IP-like checksums, mainly - * in icmp.c - */ - -extern __sum16 ip_compute_csum(const void *buff, int len); - -#endif /* _H8300_CHECKSUM_H */ diff --git a/include/asm-h8300/cputime.h b/include/asm-h8300/cputime.h deleted file mode 100644 index 092e187c7b0..00000000000 --- a/include/asm-h8300/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __H8300_CPUTIME_H -#define __H8300_CPUTIME_H - -#include - -#endif /* __H8300_CPUTIME_H */ diff --git a/include/asm-h8300/current.h b/include/asm-h8300/current.h deleted file mode 100644 index 57d74ee55a1..00000000000 --- a/include/asm-h8300/current.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _H8300_CURRENT_H -#define _H8300_CURRENT_H -/* - * current.h - * (C) Copyright 2000, Lineo, David McCullough - * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) - * - * rather than dedicate a register (as the m68k source does), we - * just keep a global, we should probably just change it all to be - * current and lose _current_task. - */ - -#include -#include - -struct task_struct; - -static inline struct task_struct *get_current(void) -{ - return(current_thread_info()->task); -} - -#define current get_current() - -#endif /* _H8300_CURRENT_H */ diff --git a/include/asm-h8300/dbg.h b/include/asm-h8300/dbg.h deleted file mode 100644 index 2c6d1cbcf73..00000000000 --- a/include/asm-h8300/dbg.h +++ /dev/null @@ -1,2 +0,0 @@ -#define DEBUG 1 -#define BREAK asm volatile ("trap #3") diff --git a/include/asm-h8300/delay.h b/include/asm-h8300/delay.h deleted file mode 100644 index 743beba70f8..00000000000 --- a/include/asm-h8300/delay.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef _H8300_DELAY_H -#define _H8300_DELAY_H - -#include - -/* - * Copyright (C) 2002 Yoshinori Sato - * - * Delay routines, using a pre-computed "loops_per_second" value. - */ - -static inline void __delay(unsigned long loops) -{ - __asm__ __volatile__ ("1:\n\t" - "dec.l #1,%0\n\t" - "bne 1b" - :"=r" (loops):"0"(loops)); -} - -/* - * Use only for very small delays ( < 1 msec). Should probably use a - * lookup table, really, as the multiplications take much too long with - * short delays. This is a "reasonable" implementation, though (and the - * first constant multiplications gets optimized away if the delay is - * a constant) - */ - -extern unsigned long loops_per_jiffy; - -static inline void udelay(unsigned long usecs) -{ - usecs *= 4295; /* 2**32 / 1000000 */ - usecs /= (loops_per_jiffy*HZ); - if (usecs) - __delay(usecs); -} - -#endif /* _H8300_DELAY_H */ diff --git a/include/asm-h8300/device.h b/include/asm-h8300/device.h deleted file mode 100644 index d8f9872b0e2..00000000000 --- a/include/asm-h8300/device.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#include - diff --git a/include/asm-h8300/div64.h b/include/asm-h8300/div64.h deleted file mode 100644 index 6cd978cefb2..00000000000 --- a/include/asm-h8300/div64.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/include/asm-h8300/dma.h b/include/asm-h8300/dma.h deleted file mode 100644 index 3edbaaaedf5..00000000000 --- a/include/asm-h8300/dma.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _H8300_DMA_H -#define _H8300_DMA_H - - -/* - * Set number of channels of DMA on ColdFire for different implementations. - */ -#define MAX_DMA_CHANNELS 0 -#define MAX_DMA_ADDRESS PAGE_OFFSET - -/* These are in kernel/dma.c: */ -extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ -extern void free_dma(unsigned int dmanr); /* release it again */ - -#endif /* _H8300_DMA_H */ diff --git a/include/asm-h8300/elf.h b/include/asm-h8300/elf.h deleted file mode 100644 index a8b57d1f412..00000000000 --- a/include/asm-h8300/elf.h +++ /dev/null @@ -1,104 +0,0 @@ -#ifndef __ASMH8300_ELF_H -#define __ASMH8300_ELF_H - -/* - * ELF register definitions.. - */ - -#include -#include - -typedef unsigned long elf_greg_t; - -#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) -typedef elf_greg_t elf_gregset_t[ELF_NGREG]; -typedef unsigned long elf_fpregset_t; - -/* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) ((x)->e_machine == EM_H8_300) - -/* - * These are used to set parameters in the core dumps. - */ -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2MSB -#define ELF_ARCH EM_H8_300 -#if defined(__H8300H__) -#define ELF_CORE_EFLAGS 0x810000 -#endif -#if defined(__H8300S__) -#define ELF_CORE_EFLAGS 0x820000 -#endif - -#define ELF_PLAT_INIT(_r) _r->er1 = 0 - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE 0xD0000000UL - -/* This yields a mask that user programs can use to figure out what - instruction set this cpu supports. */ - -#define ELF_HWCAP (0) - -/* This yields a string that ld.so will use to load implementation - specific libraries for optimization. This is more specific in - intent than poking at uname or /proc/cpuinfo. */ - -#define ELF_PLATFORM (NULL) - -#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX) - -#define R_H8_NONE 0 -#define R_H8_DIR32 1 -#define R_H8_DIR32_28 2 -#define R_H8_DIR32_24 3 -#define R_H8_DIR32_16 4 -#define R_H8_DIR32U 6 -#define R_H8_DIR32U_28 7 -#define R_H8_DIR32U_24 8 -#define R_H8_DIR32U_20 9 -#define R_H8_DIR32U_16 10 -#define R_H8_DIR24 11 -#define R_H8_DIR24_20 12 -#define R_H8_DIR24_16 13 -#define R_H8_DIR24U 14 -#define R_H8_DIR24U_20 15 -#define R_H8_DIR24U_16 16 -#define R_H8_DIR16 17 -#define R_H8_DIR16U 18 -#define R_H8_DIR16S_32 19 -#define R_H8_DIR16S_28 20 -#define R_H8_DIR16S_24 21 -#define R_H8_DIR16S_20 22 -#define R_H8_DIR16S 23 -#define R_H8_DIR8 24 -#define R_H8_DIR8U 25 -#define R_H8_DIR8Z_32 26 -#define R_H8_DIR8Z_28 27 -#define R_H8_DIR8Z_24 28 -#define R_H8_DIR8Z_20 29 -#define R_H8_DIR8Z_16 30 -#define R_H8_PCREL16 31 -#define R_H8_PCREL8 32 -#define R_H8_BPOS 33 -#define R_H8_PCREL32 34 -#define R_H8_GOT32O 35 -#define R_H8_GOT16O 36 -#define R_H8_DIR16A8 59 -#define R_H8_DIR16R8 60 -#define R_H8_DIR24A8 61 -#define R_H8_DIR24R8 62 -#define R_H8_DIR32A16 63 -#define R_H8_ABS32 65 -#define R_H8_ABS32A16 127 - -#endif diff --git a/include/asm-h8300/emergency-restart.h b/include/asm-h8300/emergency-restart.h deleted file mode 100644 index 108d8c48e42..00000000000 --- a/include/asm-h8300/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/include/asm-h8300/errno.h b/include/asm-h8300/errno.h deleted file mode 100644 index 0c2f5641fdc..00000000000 --- a/include/asm-h8300/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_ERRNO_H -#define _H8300_ERRNO_H - -#include - -#endif /* _H8300_ERRNO_H */ diff --git a/include/asm-h8300/fb.h b/include/asm-h8300/fb.h deleted file mode 100644 index c7df3803099..00000000000 --- a/include/asm-h8300/fb.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ -#include - -#define fb_pgprotect(...) do {} while (0) - -static inline int fb_is_primary_device(struct fb_info *info) -{ - return 0; -} - -#endif /* _ASM_FB_H_ */ diff --git a/include/asm-h8300/fcntl.h b/include/asm-h8300/fcntl.h deleted file mode 100644 index 1952cb2e3b0..00000000000 --- a/include/asm-h8300/fcntl.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _H8300_FCNTL_H -#define _H8300_FCNTL_H - -#define O_DIRECTORY 040000 /* must be a directory */ -#define O_NOFOLLOW 0100000 /* don't follow links */ -#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ -#define O_LARGEFILE 0400000 - -#include - -#endif /* _H8300_FCNTL_H */ diff --git a/include/asm-h8300/flat.h b/include/asm-h8300/flat.h deleted file mode 100644 index 2a873508a9a..00000000000 --- a/include/asm-h8300/flat.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * include/asm-h8300/flat.h -- uClinux flat-format executables - */ - -#ifndef __H8300_FLAT_H__ -#define __H8300_FLAT_H__ - -#define flat_stack_align(sp) /* nothing needed */ -#define flat_argvp_envp_on_stack() 1 -#define flat_old_ram_flag(flags) 1 -#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) -#define flat_set_persistent(relval, p) 0 - -/* - * on the H8 a couple of the relocations have an instruction in the - * top byte. As there can only be 24bits of address space, we just - * always preserve that 8bits at the top, when it isn't an instruction - * is is 0 (davidm@snapgear.com) - */ - -#define flat_get_relocate_addr(rel) (rel) -#define flat_get_addr_from_rp(rp, relval, flags, persistent) \ - (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff)) -#define flat_put_addr_at_rp(rp, addr, rel) \ - put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp) - -#endif /* __H8300_FLAT_H__ */ diff --git a/include/asm-h8300/fpu.h b/include/asm-h8300/fpu.h deleted file mode 100644 index 4fc416e80be..00000000000 --- a/include/asm-h8300/fpu.h +++ /dev/null @@ -1 +0,0 @@ -/* Nothing do */ diff --git a/include/asm-h8300/futex.h b/include/asm-h8300/futex.h deleted file mode 100644 index 6a332a9f099..00000000000 --- a/include/asm-h8300/futex.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#include - -#endif diff --git a/include/asm-h8300/gpio.h b/include/asm-h8300/gpio.h deleted file mode 100644 index a714f0c0efb..00000000000 --- a/include/asm-h8300/gpio.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef _H8300_GPIO_H -#define _H8300_GPIO_H - -#define H8300_GPIO_P1 0 -#define H8300_GPIO_P2 1 -#define H8300_GPIO_P3 2 -#define H8300_GPIO_P4 3 -#define H8300_GPIO_P5 4 -#define H8300_GPIO_P6 5 -#define H8300_GPIO_P7 6 -#define H8300_GPIO_P8 7 -#define H8300_GPIO_P9 8 -#define H8300_GPIO_PA 9 -#define H8300_GPIO_PB 10 -#define H8300_GPIO_PC 11 -#define H8300_GPIO_PD 12 -#define H8300_GPIO_PE 13 -#define H8300_GPIO_PF 14 -#define H8300_GPIO_PG 15 -#define H8300_GPIO_PH 16 - -#define H8300_GPIO_B7 0x80 -#define H8300_GPIO_B6 0x40 -#define H8300_GPIO_B5 0x20 -#define H8300_GPIO_B4 0x10 -#define H8300_GPIO_B3 0x08 -#define H8300_GPIO_B2 0x04 -#define H8300_GPIO_B1 0x02 -#define H8300_GPIO_B0 0x01 - -#define H8300_GPIO_INPUT 0 -#define H8300_GPIO_OUTPUT 1 - -#define H8300_GPIO_RESERVE(port, bits) \ - h8300_reserved_gpio(port, bits) - -#define H8300_GPIO_FREE(port, bits) \ - h8300_free_gpio(port, bits) - -#define H8300_GPIO_DDR(port, bit, dir) \ - h8300_set_gpio_dir(((port) << 8) | (bit), dir) - -#define H8300_GPIO_GETDIR(port, bit) \ - h8300_get_gpio_dir(((port) << 8) | (bit)) - -extern int h8300_reserved_gpio(int port, int bits); -extern int h8300_free_gpio(int port, int bits); -extern int h8300_set_gpio_dir(int port_bit, int dir); -extern int h8300_get_gpio_dir(int port_bit); -extern int h8300_init_gpio(void); - -#endif diff --git a/include/asm-h8300/hardirq.h b/include/asm-h8300/hardirq.h deleted file mode 100644 index 9d7f7a7462b..00000000000 --- a/include/asm-h8300/hardirq.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef __H8300_HARDIRQ_H -#define __H8300_HARDIRQ_H - -#include -#include -#include -#include - -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include /* Standard mappings for irq_cpustat_t above */ - -extern void ack_bad_irq(unsigned int irq); - -#define HARDIRQ_BITS 8 - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - -#endif diff --git a/include/asm-h8300/hw_irq.h b/include/asm-h8300/hw_irq.h deleted file mode 100644 index d75a5a1119e..00000000000 --- a/include/asm-h8300/hw_irq.h +++ /dev/null @@ -1 +0,0 @@ -/* Do Nothing */ diff --git a/include/asm-h8300/io.h b/include/asm-h8300/io.h deleted file mode 100644 index 26dc6ccd944..00000000000 --- a/include/asm-h8300/io.h +++ /dev/null @@ -1,324 +0,0 @@ -#ifndef _H8300_IO_H -#define _H8300_IO_H - -#ifdef __KERNEL__ - -#include - -#if defined(CONFIG_H83007) || defined(CONFIG_H83068) -#include -#elif defined(CONFIG_H8S2678) -#include -#else -#error UNKNOWN CPU TYPE -#endif - - -/* - * These are for ISA/PCI shared memory _only_ and should never be used - * on any other type of memory, including Zorro memory. They are meant to - * access the bus in the bus byte order which is little-endian!. - * - * readX/writeX() are used to access memory mapped devices. On some - * architectures the memory mapped IO stuff needs to be accessed - * differently. On the m68k architecture, we just read/write the - * memory location directly. - */ -/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates - * two accesses to memory, which may be undesireable for some devices. - */ - -/* - * swap functions are sometimes needed to interface little-endian hardware - */ - -static inline unsigned short _swapw(volatile unsigned short v) -{ -#ifndef H8300_IO_NOSWAP - unsigned short r; - __asm__("xor.b %w0,%x0\n\t" - "xor.b %x0,%w0\n\t" - "xor.b %w0,%x0" - :"=r"(r) - :"0"(v)); - return r; -#else - return v; -#endif -} - -static inline unsigned long _swapl(volatile unsigned long v) -{ -#ifndef H8300_IO_NOSWAP - unsigned long r; - __asm__("xor.b %w0,%x0\n\t" - "xor.b %x0,%w0\n\t" - "xor.b %w0,%x0\n\t" - "xor.w %e0,%f0\n\t" - "xor.w %f0,%e0\n\t" - "xor.w %e0,%f0\n\t" - "xor.b %w0,%x0\n\t" - "xor.b %x0,%w0\n\t" - "xor.b %w0,%x0" - :"=r"(r) - :"0"(v)); - return r; -#else - return v; -#endif -} - -#define readb(addr) \ - ({ unsigned char __v = \ - *(volatile unsigned char *)((unsigned long)(addr) & 0x00ffffff); \ - __v; }) -#define readw(addr) \ - ({ unsigned short __v = \ - *(volatile unsigned short *)((unsigned long)(addr) & 0x00ffffff); \ - __v; }) -#define readl(addr) \ - ({ unsigned long __v = \ - *(volatile unsigned long *)((unsigned long)(addr) & 0x00ffffff); \ - __v; }) - -#define writeb(b,addr) (void)((*(volatile unsigned char *) \ - ((unsigned long)(addr) & 0x00ffffff)) = (b)) -#define writew(b,addr) (void)((*(volatile unsigned short *) \ - ((unsigned long)(addr) & 0x00ffffff)) = (b)) -#define writel(b,addr) (void)((*(volatile unsigned long *) \ - ((unsigned long)(addr) & 0x00ffffff)) = (b)) -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) - -#define __raw_readb readb -#define __raw_readw readw -#define __raw_readl readl -#define __raw_writeb writeb -#define __raw_writew writew -#define __raw_writel writel - -static inline int h8300_buswidth(unsigned int addr) -{ - return (*(volatile unsigned char *)ABWCR & (1 << ((addr >> 21) & 7))) == 0; -} - -static inline void io_outsb(unsigned int addr, const void *buf, int len) -{ - volatile unsigned char *ap_b = (volatile unsigned char *) addr; - volatile unsigned short *ap_w = (volatile unsigned short *) addr; - unsigned char *bp = (unsigned char *) buf; - - if(h8300_buswidth(addr) && (addr & 1)) { - while (len--) - *ap_w = *bp++; - } else { - while (len--) - *ap_b = *bp++; - } -} - -static inline void io_outsw(unsigned int addr, const void *buf, int len) -{ - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *ap = _swapw(*bp++); -} - -static inline void io_outsl(unsigned int addr, const void *buf, int len) -{ - volatile unsigned long *ap = (volatile unsigned long *) addr; - unsigned long *bp = (unsigned long *) buf; - while (len--) - *ap = _swapl(*bp++); -} - -static inline void io_outsw_noswap(unsigned int addr, const void *buf, int len) -{ - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *ap = *bp++; -} - -static inline void io_outsl_noswap(unsigned int addr, const void *buf, int len) -{ - volatile unsigned long *ap = (volatile unsigned long *) addr; - unsigned long *bp = (unsigned long *) buf; - while (len--) - *ap = *bp++; -} - -static inline void io_insb(unsigned int addr, void *buf, int len) -{ - volatile unsigned char *ap_b; - volatile unsigned short *ap_w; - unsigned char *bp = (unsigned char *) buf; - - if(h8300_buswidth(addr)) { - ap_w = (volatile unsigned short *)(addr & ~1); - while (len--) - *bp++ = *ap_w & 0xff; - } else { - ap_b = (volatile unsigned char *)addr; - while (len--) - *bp++ = *ap_b; - } -} - -static inline void io_insw(unsigned int addr, void *buf, int len) -{ - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *bp++ = _swapw(*ap); -} - -static inline void io_insl(unsigned int addr, void *buf, int len) -{ - volatile unsigned long *ap = (volatile unsigned long *) addr; - unsigned long *bp = (unsigned long *) buf; - while (len--) - *bp++ = _swapl(*ap); -} - -static inline void io_insw_noswap(unsigned int addr, void *buf, int len) -{ - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *bp++ = *ap; -} - -static inline void io_insl_noswap(unsigned int addr, void *buf, int len) -{ - volatile unsigned long *ap = (volatile unsigned long *) addr; - unsigned long *bp = (unsigned long *) buf; - while (len--) - *bp++ = *ap; -} - -/* - * make the short names macros so specific devices - * can override them as required - */ - -#define memset_io(a,b,c) memset((void *)(a),(b),(c)) -#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) -#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) - -#define mmiowb() - -#define inb(addr) ((h8300_buswidth(addr))?readw((addr) & ~1) & 0xff:readb(addr)) -#define inw(addr) _swapw(readw(addr)) -#define inl(addr) _swapl(readl(addr)) -#define outb(x,addr) ((void)((h8300_buswidth(addr) && \ - ((addr) & 1))?writew(x,(addr) & ~1):writeb(x,addr))) -#define outw(x,addr) ((void) writew(_swapw(x),addr)) -#define outl(x,addr) ((void) writel(_swapl(x),addr)) - -#define inb_p(addr) inb(addr) -#define inw_p(addr) inw(addr) -#define inl_p(addr) inl(addr) -#define outb_p(x,addr) outb(x,addr) -#define outw_p(x,addr) outw(x,addr) -#define outl_p(x,addr) outl(x,addr) - -#define outsb(a,b,l) io_outsb(a,b,l) -#define outsw(a,b,l) io_outsw(a,b,l) -#define outsl(a,b,l) io_outsl(a,b,l) - -#define insb(a,b,l) io_insb(a,b,l) -#define insw(a,b,l) io_insw(a,b,l) -#define insl(a,b,l) io_insl(a,b,l) - -#define IO_SPACE_LIMIT 0xffffff - - -/* Values for nocacheflag and cmode */ -#define IOMAP_FULL_CACHING 0 -#define IOMAP_NOCACHE_SER 1 -#define IOMAP_NOCACHE_NONSER 2 -#define IOMAP_WRITETHROUGH 3 - -extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag); -extern void __iounmap(void *addr, unsigned long size); - -static inline void *ioremap(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); -} -static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); -} -static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); -} -static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_FULL_CACHING); -} - -extern void iounmap(void *addr); - -/* H8/300 internal I/O functions */ -static __inline__ unsigned char ctrl_inb(unsigned long addr) -{ - return *(volatile unsigned char*)addr; -} - -static __inline__ unsigned short ctrl_inw(unsigned long addr) -{ - return *(volatile unsigned short*)addr; -} - -static __inline__ unsigned long ctrl_inl(unsigned long addr) -{ - return *(volatile unsigned long*)addr; -} - -static __inline__ void ctrl_outb(unsigned char b, unsigned long addr) -{ - *(volatile unsigned char*)addr = b; -} - -static __inline__ void ctrl_outw(unsigned short b, unsigned long addr) -{ - *(volatile unsigned short*)addr = b; -} - -static __inline__ void ctrl_outl(unsigned long b, unsigned long addr) -{ - *(volatile unsigned long*)addr = b; -} - -/* Pages to physical address... */ -#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) -#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT) - -/* - * Macros used for converting between virtual and physical mappings. - */ -#define phys_to_virt(vaddr) ((void *) (vaddr)) -#define virt_to_phys(vaddr) ((unsigned long) (vaddr)) - -#define virt_to_bus virt_to_phys -#define bus_to_virt phys_to_virt - -/* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) - -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - -#endif /* __KERNEL__ */ - -#endif /* _H8300_IO_H */ diff --git a/include/asm-h8300/ioctl.h b/include/asm-h8300/ioctl.h deleted file mode 100644 index b279fe06dfe..00000000000 --- a/include/asm-h8300/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/include/asm-h8300/ioctls.h b/include/asm-h8300/ioctls.h deleted file mode 100644 index 98a53d06726..00000000000 --- a/include/asm-h8300/ioctls.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef __ARCH_H8300_IOCTLS_H__ -#define __ARCH_H8300_IOCTLS_H__ - -#include - -/* 0x54 is just a magic number to make these relatively unique ('T') */ - -#define TCGETS 0x5401 -#define TCSETS 0x5402 -#define TCSETSW 0x5403 -#define TCSETSF 0x5404 -#define TCGETA 0x5405 -#define TCSETA 0x5406 -#define TCSETAW 0x5407 -#define TCSETAF 0x5408 -#define TCSBRK 0x5409 -#define TCXONC 0x540A -#define TCFLSH 0x540B -#define TIOCEXCL 0x540C -#define TIOCNXCL 0x540D -#define TIOCSCTTY 0x540E -#define TIOCGPGRP 0x540F -#define TIOCSPGRP 0x5410 -#define TIOCOUTQ 0x5411 -#define TIOCSTI 0x5412 -#define TIOCGWINSZ 0x5413 -#define TIOCSWINSZ 0x5414 -#define TIOCMGET 0x5415 -#define TIOCMBIS 0x5416 -#define TIOCMBIC 0x5417 -#define TIOCMSET 0x5418 -#define TIOCGSOFTCAR 0x5419 -#define TIOCSSOFTCAR 0x541A -#define FIONREAD 0x541B -#define TIOCINQ FIONREAD -#define TIOCLINUX 0x541C -#define TIOCCONS 0x541D -#define TIOCGSERIAL 0x541E -#define TIOCSSERIAL 0x541F -#define TIOCPKT 0x5420 -#define FIONBIO 0x5421 -#define TIOCNOTTY 0x5422 -#define TIOCSETD 0x5423 -#define TIOCGETD 0x5424 -#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ -#define TIOCTTYGSTRUCT 0x5426 /* For debugging only */ -#define TIOCSBRK 0x5427 /* BSD compatibility */ -#define TIOCCBRK 0x5428 /* BSD compatibility */ -#define TIOCGSID 0x5429 /* Return the session ID of FD */ -#define TCGETS2 _IOR('T',0x2A, struct termios2) -#define TCSETS2 _IOW('T',0x2B, struct termios2) -#define TCSETSW2 _IOW('T',0x2C, struct termios2) -#define TCSETSF2 _IOW('T',0x2D, struct termios2) -#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ -#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ - -#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ -#define FIOCLEX 0x5451 -#define FIOASYNC 0x5452 -#define TIOCSERCONFIG 0x5453 -#define TIOCSERGWILD 0x5454 -#define TIOCSERSWILD 0x5455 -#define TIOCGLCKTRMIOS 0x5456 -#define TIOCSLCKTRMIOS 0x5457 -#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ -#define TIOCSERGETLSR 0x5459 /* Get line status register */ -#define TIOCSERGETMULTI 0x545A /* Get multiport config */ -#define TIOCSERSETMULTI 0x545B /* Set multiport config */ - -#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ -#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ -#define FIOQSIZE 0x545E - -/* Used for packet mode */ -#define TIOCPKT_DATA 0 -#define TIOCPKT_FLUSHREAD 1 -#define TIOCPKT_FLUSHWRITE 2 -#define TIOCPKT_STOP 4 -#define TIOCPKT_START 8 -#define TIOCPKT_NOSTOP 16 -#define TIOCPKT_DOSTOP 32 - -#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ - -#endif /* __ARCH_H8300_IOCTLS_H__ */ diff --git a/include/asm-h8300/ipcbuf.h b/include/asm-h8300/ipcbuf.h deleted file mode 100644 index 2cd1ebcc109..00000000000 --- a/include/asm-h8300/ipcbuf.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __H8300_IPCBUF_H__ -#define __H8300_IPCBUF_H__ - -/* - * The user_ipc_perm structure for H8/300 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 32-bit mode_t and seq - * - 2 miscellaneous 32-bit values - */ - -struct ipc64_perm -{ - __kernel_key_t key; - __kernel_uid32_t uid; - __kernel_gid32_t gid; - __kernel_uid32_t cuid; - __kernel_gid32_t cgid; - __kernel_mode_t mode; - unsigned short __pad1; - unsigned short seq; - unsigned short __pad2; - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* __H8300_IPCBUF_H__ */ diff --git a/include/asm-h8300/irq.h b/include/asm-h8300/irq.h deleted file mode 100644 index 13d7c601cd0..00000000000 --- a/include/asm-h8300/irq.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef _H8300_IRQ_H_ -#define _H8300_IRQ_H_ - -#include - -#if defined(CONFIG_CPU_H8300H) -#define NR_IRQS 64 -#define EXT_IRQ0 12 -#define EXT_IRQ1 13 -#define EXT_IRQ2 14 -#define EXT_IRQ3 15 -#define EXT_IRQ4 16 -#define EXT_IRQ5 17 -#define EXT_IRQ6 18 -#define EXT_IRQ7 19 -#define EXT_IRQS 5 -#define IER_REGS *(volatile unsigned char *)IER -#endif -#if defined(CONFIG_CPU_H8S) -#define NR_IRQS 128 -#define EXT_IRQ0 16 -#define EXT_IRQ1 17 -#define EXT_IRQ2 18 -#define EXT_IRQ3 19 -#define EXT_IRQ4 20 -#define EXT_IRQ5 21 -#define EXT_IRQ6 22 -#define EXT_IRQ7 23 -#define EXT_IRQ8 24 -#define EXT_IRQ9 25 -#define EXT_IRQ10 26 -#define EXT_IRQ11 27 -#define EXT_IRQ12 28 -#define EXT_IRQ13 29 -#define EXT_IRQ14 30 -#define EXT_IRQ15 31 -#define EXT_IRQS 15 - -#define IER_REGS *(volatile unsigned short *)IER -#endif - -static __inline__ int irq_canonicalize(int irq) -{ - return irq; -} - -typedef void (*h8300_vector)(void); - -#endif /* _H8300_IRQ_H_ */ diff --git a/include/asm-h8300/irq_regs.h b/include/asm-h8300/irq_regs.h deleted file mode 100644 index 3dd9c0b7027..00000000000 --- a/include/asm-h8300/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/include/asm-h8300/kdebug.h b/include/asm-h8300/kdebug.h deleted file mode 100644 index 6ece1b03766..00000000000 --- a/include/asm-h8300/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/include/asm-h8300/kmap_types.h b/include/asm-h8300/kmap_types.h deleted file mode 100644 index 1ec8a342712..00000000000 --- a/include/asm-h8300/kmap_types.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef _ASM_H8300_KMAP_TYPES_H -#define _ASM_H8300_KMAP_TYPES_H - -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR -}; - -#endif diff --git a/include/asm-h8300/linkage.h b/include/asm-h8300/linkage.h deleted file mode 100644 index 6f4df7d4618..00000000000 --- a/include/asm-h8300/linkage.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _H8300_LINKAGE_H -#define _H8300_LINKAGE_H - -#undef SYMBOL_NAME_LABEL -#undef SYMBOL_NAME -#define SYMBOL_NAME_LABEL(_name_) _##_name_##: -#define SYMBOL_NAME(_name_) _##_name_ -#endif diff --git a/include/asm-h8300/local.h b/include/asm-h8300/local.h deleted file mode 100644 index fdd4efe437c..00000000000 --- a/include/asm-h8300/local.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_LOCAL_H_ -#define _H8300_LOCAL_H_ - -#include - -#endif diff --git a/include/asm-h8300/mc146818rtc.h b/include/asm-h8300/mc146818rtc.h deleted file mode 100644 index ab9d9646d24..00000000000 --- a/include/asm-h8300/mc146818rtc.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Machine dependent access functions for RTC registers. - */ -#ifndef _H8300_MC146818RTC_H -#define _H8300_MC146818RTC_H - -/* empty include file to satisfy the include in genrtc.c/ide-geometry.c */ - -#endif /* _H8300_MC146818RTC_H */ diff --git a/include/asm-h8300/md.h b/include/asm-h8300/md.h deleted file mode 100644 index 1a47dc6691f..00000000000 --- a/include/asm-h8300/md.h +++ /dev/null @@ -1,13 +0,0 @@ -/* $Id: md.h,v 1.1 2002/11/19 02:09:26 gerg Exp $ - * md.h: High speed xor_block operation for RAID4/5 - * - */ - -#ifndef __ASM_MD_H -#define __ASM_MD_H - -/* #define HAVE_ARCH_XORBLOCK */ - -#define MD_XORBLOCK_ALIGNMENT sizeof(long) - -#endif /* __ASM_MD_H */ diff --git a/include/asm-h8300/mman.h b/include/asm-h8300/mman.h deleted file mode 100644 index b9f104f22a3..00000000000 --- a/include/asm-h8300/mman.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef __H8300_MMAN_H__ -#define __H8300_MMAN_H__ - -#include - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __H8300_MMAN_H__ */ diff --git a/include/asm-h8300/mmu.h b/include/asm-h8300/mmu.h deleted file mode 100644 index 2ce06ea4610..00000000000 --- a/include/asm-h8300/mmu.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __MMU_H -#define __MMU_H - -/* Copyright (C) 2002, David McCullough */ - -typedef struct { - struct vm_list_struct *vmlist; - unsigned long end_brk; -} mm_context_t; - -#endif diff --git a/include/asm-h8300/mmu_context.h b/include/asm-h8300/mmu_context.h deleted file mode 100644 index f44b730da54..00000000000 --- a/include/asm-h8300/mmu_context.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef __H8300_MMU_CONTEXT_H -#define __H8300_MMU_CONTEXT_H - -#include -#include -#include -#include - -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - -static inline int -init_new_context(struct task_struct *tsk, struct mm_struct *mm) -{ - // mm->context = virt_to_phys(mm->pgd); - return(0); -} - -#define destroy_context(mm) do { } while(0) -#define deactivate_mm(tsk,mm) do { } while(0) - -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) -{ -} - -static inline void activate_mm(struct mm_struct *prev_mm, - struct mm_struct *next_mm) -{ -} - -#endif diff --git a/include/asm-h8300/module.h b/include/asm-h8300/module.h deleted file mode 100644 index de23231f319..00000000000 --- a/include/asm-h8300/module.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _ASM_H8300_MODULE_H -#define _ASM_H8300_MODULE_H -/* - * This file contains the H8/300 architecture specific module code. - */ -struct mod_arch_specific { }; -#define Elf_Shdr Elf32_Shdr -#define Elf_Sym Elf32_Sym -#define Elf_Ehdr Elf32_Ehdr - -#define MODULE_SYMBOL_PREFIX "_" - -#endif /* _ASM_H8/300_MODULE_H */ diff --git a/include/asm-h8300/msgbuf.h b/include/asm-h8300/msgbuf.h deleted file mode 100644 index 6b148cd09aa..00000000000 --- a/include/asm-h8300/msgbuf.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _H8300_MSGBUF_H -#define _H8300_MSGBUF_H - -/* - * The msqid64_ds structure for H8/300 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ - unsigned long __unused1; - __kernel_time_t msg_rtime; /* last msgrcv time */ - unsigned long __unused2; - __kernel_time_t msg_ctime; /* last change time */ - unsigned long __unused3; - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused4; - unsigned long __unused5; -}; - -#endif /* _H8300_MSGBUF_H */ diff --git a/include/asm-h8300/mutex.h b/include/asm-h8300/mutex.h deleted file mode 100644 index 458c1f7fbc1..00000000000 --- a/include/asm-h8300/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h deleted file mode 100644 index 0b6acf0b03a..00000000000 --- a/include/asm-h8300/page.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef _H8300_PAGE_H -#define _H8300_PAGE_H - -/* PAGE_SHIFT determines the page size */ - -#define PAGE_SHIFT (12) -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -#include - -#ifndef __ASSEMBLY__ - -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - -#define clear_page(page) memset((page), 0, PAGE_SIZE) -#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - -#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ - alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) -#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE - -/* - * These are used to make use of C type-checking.. - */ -typedef struct { unsigned long pte; } pte_t; -typedef struct { unsigned long pmd[16]; } pmd_t; -typedef struct { unsigned long pgd; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; -typedef struct page *pgtable_t; - -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((&x)->pmd[0]) -#define pgd_val(x) ((x).pgd) -#define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) -#define __pgd(x) ((pgd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -extern unsigned long memory_start; -extern unsigned long memory_end; - -#endif /* !__ASSEMBLY__ */ - -#include - -#define PAGE_OFFSET (PAGE_OFFSET_RAW) - -#ifndef __ASSEMBLY__ - -#define __pa(vaddr) virt_to_phys(vaddr) -#define __va(paddr) phys_to_virt((unsigned long)paddr) - -#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) - -#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) -#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) -#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) -#define pfn_valid(page) (page < max_mapnr) - -#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) - -#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ - ((void *)(kaddr) < (void *)memory_end)) - -#endif /* __ASSEMBLY__ */ - -#include -#include - -#endif /* _H8300_PAGE_H */ diff --git a/include/asm-h8300/page_offset.h b/include/asm-h8300/page_offset.h deleted file mode 100644 index f8706463008..00000000000 --- a/include/asm-h8300/page_offset.h +++ /dev/null @@ -1,3 +0,0 @@ - -#define PAGE_OFFSET_RAW 0x00000000 - diff --git a/include/asm-h8300/param.h b/include/asm-h8300/param.h deleted file mode 100644 index 1c72fb8080f..00000000000 --- a/include/asm-h8300/param.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _H8300_PARAM_H -#define _H8300_PARAM_H - -#ifdef __KERNEL__ -#define HZ CONFIG_HZ -#define USER_HZ HZ -#define CLOCKS_PER_SEC (USER_HZ) -#else -#define HZ 100 -#endif - -#define EXEC_PAGESIZE 4096 - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - -#endif /* _H8300_PARAM_H */ diff --git a/include/asm-h8300/pci.h b/include/asm-h8300/pci.h deleted file mode 100644 index 97389b35aa3..00000000000 --- a/include/asm-h8300/pci.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _ASM_H8300_PCI_H -#define _ASM_H8300_PCI_H - -/* - * asm-h8300/pci.h - H8/300 specific PCI declarations. - * - * Yoshinori Sato - */ - -#define pcibios_assign_all_busses() 0 -#define pcibios_scan_all_fns(a, b) 0 - -static inline void pcibios_set_master(struct pci_dev *dev) -{ - /* No special bus mastering setup handling */ -} - -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - -#define PCI_DMA_BUS_IS_PHYS (1) - -#endif /* _ASM_H8300_PCI_H */ diff --git a/include/asm-h8300/percpu.h b/include/asm-h8300/percpu.h deleted file mode 100644 index 72c03e3666d..00000000000 --- a/include/asm-h8300/percpu.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ARCH_H8300_PERCPU__ -#define __ARCH_H8300_PERCPU__ - -#include - -#endif /* __ARCH_H8300_PERCPU__ */ diff --git a/include/asm-h8300/pgalloc.h b/include/asm-h8300/pgalloc.h deleted file mode 100644 index c2e89a286d2..00000000000 --- a/include/asm-h8300/pgalloc.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _H8300_PGALLOC_H -#define _H8300_PGALLOC_H - -#include - -#define check_pgt_cache() do { } while (0) - -#endif /* _H8300_PGALLOC_H */ diff --git a/include/asm-h8300/pgtable.h b/include/asm-h8300/pgtable.h deleted file mode 100644 index a09230a08e0..00000000000 --- a/include/asm-h8300/pgtable.h +++ /dev/null @@ -1,73 +0,0 @@ -#ifndef _H8300_PGTABLE_H -#define _H8300_PGTABLE_H - -#include - -#include -#include -#include -#include - -#define pgd_present(pgd) (1) /* pages are always present on NO_MM */ -#define pgd_none(pgd) (0) -#define pgd_bad(pgd) (0) -#define pgd_clear(pgdp) -#define kern_addr_valid(addr) (1) -#define pmd_offset(a, b) ((void *)0) -#define pmd_none(pmd) (1) -#define pgd_offset_k(adrdress) ((pgd_t *)0) -#define pte_offset_kernel(dir, address) ((pte_t *)0) - -#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */ -#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */ -#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */ -#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */ -#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */ - -extern void paging_init(void); -#define swapper_pg_dir ((pgd_t *) 0) - -#define __swp_type(x) (0) -#define __swp_offset(x) (0) -#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) - -static inline int pte_file(pte_t pte) { return 0; } - -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -#define ZERO_PAGE(vaddr) (virt_to_page(0)) - -/* - * These would be in other places but having them here reduces the diffs. - */ -extern unsigned int kobjsize(const void *objp); -extern int is_in_rom(unsigned long); - -/* - * No page table caches to initialise - */ -#define pgtable_cache_init() do { } while (0) - -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - -/* - * All 32bit addresses are effectively valid for vmalloc... - * Sort of meaningless for non-VM targets. - */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff - -/* - * All 32bit addresses are effectively valid for vmalloc... - * Sort of meaningless for non-VM targets. - */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff - -#define arch_enter_lazy_cpu_mode() do {} while (0) -#endif /* _H8300_PGTABLE_H */ diff --git a/include/asm-h8300/poll.h b/include/asm-h8300/poll.h deleted file mode 100644 index f61540c22d9..00000000000 --- a/include/asm-h8300/poll.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __H8300_POLL_H -#define __H8300_POLL_H - -#define POLLWRNORM POLLOUT -#define POLLWRBAND 256 - -#include - -#undef POLLREMOVE - -#endif diff --git a/include/asm-h8300/posix_types.h b/include/asm-h8300/posix_types.h deleted file mode 100644 index 5c553927fc5..00000000000 --- a/include/asm-h8300/posix_types.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef __ARCH_H8300_POSIX_TYPES_H -#define __ARCH_H8300_POSIX_TYPES_H - -/* - * This file is generally used by user-level software, so you need to - * be a little careful about namespace pollution etc. Also, we cannot - * assume GCC is being used. - */ - -typedef unsigned long __kernel_ino_t; -typedef unsigned short __kernel_mode_t; -typedef unsigned short __kernel_nlink_t; -typedef long __kernel_off_t; -typedef int __kernel_pid_t; -typedef unsigned short __kernel_ipc_pid_t; -typedef unsigned short __kernel_uid_t; -typedef unsigned short __kernel_gid_t; -typedef unsigned int __kernel_size_t; -typedef int __kernel_ssize_t; -typedef int __kernel_ptrdiff_t; -typedef long __kernel_time_t; -typedef long __kernel_suseconds_t; -typedef long __kernel_clock_t; -typedef int __kernel_timer_t; -typedef int __kernel_clockid_t; -typedef int __kernel_daddr_t; -typedef char * __kernel_caddr_t; -typedef unsigned short __kernel_uid16_t; -typedef unsigned short __kernel_gid16_t; -typedef unsigned int __kernel_uid32_t; -typedef unsigned int __kernel_gid32_t; - -typedef unsigned short __kernel_old_uid_t; -typedef unsigned short __kernel_old_gid_t; - -#ifdef __GNUC__ -typedef long long __kernel_loff_t; -#endif - -typedef struct { - int val[2]; -} __kernel_fsid_t; - -#if defined(__KERNEL__) - -#undef __FD_SET -#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d)) - -#undef __FD_CLR -#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d)) - -#undef __FD_ISSET -#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) - -#undef __FD_ZERO -#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp))) - -#endif /* defined(__KERNEL__) */ - -#endif diff --git a/include/asm-h8300/processor.h b/include/asm-h8300/processor.h deleted file mode 100644 index 69e8a34eb6d..00000000000 --- a/include/asm-h8300/processor.h +++ /dev/null @@ -1,140 +0,0 @@ -/* - * include/asm-h8300/processor.h - * - * Copyright (C) 2002 Yoshinori Sato - * - * Based on: linux/asm-m68nommu/processor.h - * - * Copyright (C) 1995 Hamish Macdonald - */ - -#ifndef __ASM_H8300_PROCESSOR_H -#define __ASM_H8300_PROCESSOR_H - -/* - * Default implementation of macro that returns current - * instruction pointer ("program counter"). - */ -#define current_text_addr() ({ __label__ _l; _l: &&_l;}) - -#include -#include -#include -#include -#include - -static inline unsigned long rdusp(void) { - extern unsigned int sw_usp; - return(sw_usp); -} - -static inline void wrusp(unsigned long usp) { - extern unsigned int sw_usp; - sw_usp = usp; -} - -/* - * User space process size: 3.75GB. This is hardcoded into a few places, - * so don't change it unless you know what you are doing. - */ -#define TASK_SIZE (0xFFFFFFFFUL) - -#ifdef __KERNEL__ -#define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP -#endif - -/* - * This decides where the kernel will search for a free chunk of vm - * space during mmap's. We won't be using it - */ -#define TASK_UNMAPPED_BASE 0 - -struct thread_struct { - unsigned long ksp; /* kernel stack pointer */ - unsigned long usp; /* user stack pointer */ - unsigned long ccr; /* saved status register */ - unsigned long esp0; /* points to SR of stack frame */ - struct { - unsigned short *addr; - unsigned short inst; - } breakinfo; -}; - -#define INIT_THREAD { \ - .ksp = sizeof(init_stack) + (unsigned long)init_stack, \ - .usp = 0, \ - .ccr = PS_S, \ - .esp0 = 0, \ - .breakinfo = { \ - .addr = (unsigned short *)-1, \ - .inst = 0 \ - } \ -} - -/* - * Do necessary setup to start up a newly executed thread. - * - * pass the data segment into user programs if it exists, - * it can't hurt anything as far as I can tell - */ -#if defined(__H8300H__) -#define start_thread(_regs, _pc, _usp) \ -do { \ - set_fs(USER_DS); /* reads from user space */ \ - (_regs)->pc = (_pc); \ - (_regs)->ccr = 0x00; /* clear all flags */ \ - (_regs)->er5 = current->mm->start_data; /* GOT base */ \ - wrusp((unsigned long)(_usp) - sizeof(unsigned long)*3); \ -} while(0) -#endif -#if defined(__H8300S__) -#define start_thread(_regs, _pc, _usp) \ -do { \ - set_fs(USER_DS); /* reads from user space */ \ - (_regs)->pc = (_pc); \ - (_regs)->ccr = 0x00; /* clear kernel flag */ \ - (_regs)->exr = 0x78; /* enable all interrupts */ \ - (_regs)->er5 = current->mm->start_data; /* GOT base */ \ - /* 14 = space for retaddr(4), vector(4), er0(4) and ext(2) on stack */ \ - wrusp(((unsigned long)(_usp)) - 14); \ -} while(0) -#endif - -/* Forward declaration, a strange C thing */ -struct task_struct; - -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *dead_task) -{ -} - -extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); - -#define prepare_to_copy(tsk) do { } while (0) - -/* - * Free current thread data structures etc.. - */ -static inline void exit_thread(void) -{ -} - -/* - * Return saved PC of a blocked thread. - */ -unsigned long thread_saved_pc(struct task_struct *tsk); -unsigned long get_wchan(struct task_struct *p); - -#define KSTK_EIP(tsk) \ - ({ \ - unsigned long eip = 0; \ - if ((tsk)->thread.esp0 > PAGE_SIZE && \ - MAP_NR((tsk)->thread.esp0) < max_mapnr) \ - eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \ - eip; }) -#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) - -#define cpu_relax() barrier() - -#endif diff --git a/include/asm-h8300/ptrace.h b/include/asm-h8300/ptrace.h deleted file mode 100644 index c2e05e4b512..00000000000 --- a/include/asm-h8300/ptrace.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef _H8300_PTRACE_H -#define _H8300_PTRACE_H - -#ifndef __ASSEMBLY__ - -#define PT_ER1 0 -#define PT_ER2 1 -#define PT_ER3 2 -#define PT_ER4 3 -#define PT_ER5 4 -#define PT_ER6 5 -#define PT_ER0 6 -#define PT_ORIG_ER0 7 -#define PT_CCR 8 -#define PT_PC 9 -#define PT_USP 10 -#define PT_EXR 12 - -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct pt_regs { - long retpc; - long er4; - long er5; - long er6; - long er3; - long er2; - long er1; - long orig_er0; - unsigned short ccr; - long er0; - long vector; -#if defined(CONFIG_CPU_H8S) - unsigned short exr; -#endif - unsigned long pc; -} __attribute__((aligned(2),packed)); - -#define PTRACE_GETREGS 12 -#define PTRACE_SETREGS 13 - -#ifdef __KERNEL__ -#ifndef PS_S -#define PS_S (0x10) -#endif - -#if defined(__H8300H__) -#define H8300_REGS_NO 11 -#endif -#if defined(__H8300S__) -#define H8300_REGS_NO 12 -#endif - -/* Find the stack offset for a register, relative to thread.esp0. */ -#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg) - -#define user_mode(regs) (!((regs)->ccr & PS_S)) -#define instruction_pointer(regs) ((regs)->pc) -#define profile_pc(regs) instruction_pointer(regs) -extern void show_regs(struct pt_regs *); -#endif /* __KERNEL__ */ -#endif /* __ASSEMBLY__ */ -#endif /* _H8300_PTRACE_H */ diff --git a/include/asm-h8300/regs267x.h b/include/asm-h8300/regs267x.h deleted file mode 100644 index 1bff731a9f7..00000000000 --- a/include/asm-h8300/regs267x.h +++ /dev/null @@ -1,336 +0,0 @@ -/* internal Peripherals Register address define */ -/* CPU: H8/306x */ - -#if !defined(__REGS_H8S267x__) -#define __REGS_H8S267x__ - -#if defined(__KERNEL__) - -#define DASTCR 0xFEE01A -#define DADR0 0xFFFFA4 -#define DADR1 0xFFFFA5 -#define DACR01 0xFFFFA6 -#define DADR2 0xFFFFA8 -#define DADR3 0xFFFFA9 -#define DACR23 0xFFFFAA - -#define ADDRA 0xFFFF90 -#define ADDRAH 0xFFFF90 -#define ADDRAL 0xFFFF91 -#define ADDRB 0xFFFF92 -#define ADDRBH 0xFFFF92 -#define ADDRBL 0xFFFF93 -#define ADDRC 0xFFFF94 -#define ADDRCH 0xFFFF94 -#define ADDRCL 0xFFFF95 -#define ADDRD 0xFFFF96 -#define ADDRDH 0xFFFF96 -#define ADDRDL 0xFFFF97 -#define ADDRE 0xFFFF98 -#define ADDREH 0xFFFF98 -#define ADDREL 0xFFFF99 -#define ADDRF 0xFFFF9A -#define ADDRFH 0xFFFF9A -#define ADDRFL 0xFFFF9B -#define ADDRG 0xFFFF9C -#define ADDRGH 0xFFFF9C -#define ADDRGL 0xFFFF9D -#define ADDRH 0xFFFF9E -#define ADDRHH 0xFFFF9E -#define ADDRHL 0xFFFF9F - -#define ADCSR 0xFFFFA0 -#define ADCR 0xFFFFA1 - -#define ABWCR 0xFFFEC0 -#define ASTCR 0xFFFEC1 -#define WTCRAH 0xFFFEC2 -#define WTCRAL 0xFFFEC3 -#define WTCRBH 0xFFFEC4 -#define WTCRBL 0xFFFEC5 -#define RDNCR 0xFFFEC6 -#define CSACRH 0xFFFEC8 -#define CSACRL 0xFFFEC9 -#define BROMCRH 0xFFFECA -#define BROMCRL 0xFFFECB -#define BCR 0xFFFECC -#define DRAMCR 0xFFFED0 -#define DRACCR 0xFFFED2 -#define REFCR 0xFFFED4 -#define RTCNT 0xFFFED6 -#define RTCOR 0xFFFED7 - -#define MAR0AH 0xFFFEE0 -#define MAR0AL 0xFFFEE2 -#define IOAR0A 0xFFFEE4 -#define ETCR0A 0xFFFEE6 -#define MAR0BH 0xFFFEE8 -#define MAR0BL 0xFFFEEA -#define IOAR0B 0xFFFEEC -#define ETCR0B 0xFFFEEE -#define MAR1AH 0xFFFEF0 -#define MAR1AL 0xFFFEF2 -#define IOAR1A 0xFFFEF4 -#define ETCR1A 0xFFFEF6 -#define MAR1BH 0xFFFEF8 -#define MAR1BL 0xFFFEFA -#define IOAR1B 0xFFFEFC -#define ETCR1B 0xFFFEFE -#define DMAWER 0xFFFF20 -#define DMATCR 0xFFFF21 -#define DMACR0A 0xFFFF22 -#define DMACR0B 0xFFFF23 -#define DMACR1A 0xFFFF24 -#define DMACR1B 0xFFFF25 -#define DMABCRH 0xFFFF26 -#define DMABCRL 0xFFFF27 - -#define EDSAR0 0xFFFDC0 -#define EDDAR0 0xFFFDC4 -#define EDTCR0 0xFFFDC8 -#define EDMDR0 0xFFFDCC -#define EDMDR0H 0xFFFDCC -#define EDMDR0L 0xFFFDCD -#define EDACR0 0xFFFDCE -#define EDSAR1 0xFFFDD0 -#define EDDAR1 0xFFFDD4 -#define EDTCR1 0xFFFDD8 -#define EDMDR1 0xFFFDDC -#define EDMDR1H 0xFFFDDC -#define EDMDR1L 0xFFFDDD -#define EDACR1 0xFFFDDE -#define EDSAR2 0xFFFDE0 -#define EDDAR2 0xFFFDE4 -#define EDTCR2 0xFFFDE8 -#define EDMDR2 0xFFFDEC -#define EDMDR2H 0xFFFDEC -#define EDMDR2L 0xFFFDED -#define EDACR2 0xFFFDEE -#define EDSAR3 0xFFFDF0 -#define EDDAR3 0xFFFDF4 -#define EDTCR3 0xFFFDF8 -#define EDMDR3 0xFFFDFC -#define EDMDR3H 0xFFFDFC -#define EDMDR3L 0xFFFDFD -#define EDACR3 0xFFFDFE - -#define IPRA 0xFFFE00 -#define IPRB 0xFFFE02 -#define IPRC 0xFFFE04 -#define IPRD 0xFFFE06 -#define IPRE 0xFFFE08 -#define IPRF 0xFFFE0A -#define IPRG 0xFFFE0C -#define IPRH 0xFFFE0E -#define IPRI 0xFFFE10 -#define IPRJ 0xFFFE12 -#define IPRK 0xFFFE14 -#define ITSR 0xFFFE16 -#define SSIER 0xFFFE18 -#define ISCRH 0xFFFE1A -#define ISCRL 0xFFFE1C - -#define INTCR 0xFFFF31 -#define IER 0xFFFF32 -#define IERH 0xFFFF32 -#define IERL 0xFFFF33 -#define ISR 0xFFFF34 -#define ISRH 0xFFFF34 -#define ISRL 0xFFFF35 - -#define P1DDR 0xFFFE20 -#define P2DDR 0xFFFE21 -#define P3DDR 0xFFFE22 -#define P4DDR 0xFFFE23 -#define P5DDR 0xFFFE24 -#define P6DDR 0xFFFE25 -#define P7DDR 0xFFFE26 -#define P8DDR 0xFFFE27 -#define P9DDR 0xFFFE28 -#define PADDR 0xFFFE29 -#define PBDDR 0xFFFE2A -#define PCDDR 0xFFFE2B -#define PDDDR 0xFFFE2C -#define PEDDR 0xFFFE2D -#define PFDDR 0xFFFE2E -#define PGDDR 0xFFFE2F -#define PHDDR 0xFFFF74 - -#define PFCR0 0xFFFE32 -#define PFCR1 0xFFFE33 -#define PFCR2 0xFFFE34 - -#define PAPCR 0xFFFE36 -#define PBPCR 0xFFFE37 -#define PCPCR 0xFFFE38 -#define PDPCR 0xFFFE39 -#define PEPCR 0xFFFE3A - -#define P3ODR 0xFFFE3C -#define PAODR 0xFFFE3D - -#define P1DR 0xFFFF60 -#define P2DR 0xFFFF61 -#define P3DR 0xFFFF62 -#define P4DR 0xFFFF63 -#define P5DR 0xFFFF64 -#define P6DR 0xFFFF65 -#define P7DR 0xFFFF66 -#define P8DR 0xFFFF67 -#define P9DR 0xFFFF68 -#define PADR 0xFFFF69 -#define PBDR 0xFFFF6A -#define PCDR 0xFFFF6B -#define PDDR 0xFFFF6C -#define PEDR 0xFFFF6D -#define PFDR 0xFFFF6E -#define PGDR 0xFFFF6F -#define PHDR 0xFFFF72 - -#define PORT1 0xFFFF50 -#define PORT2 0xFFFF51 -#define PORT3 0xFFFF52 -#define PORT4 0xFFFF53 -#define PORT5 0xFFFF54 -#define PORT6 0xFFFF55 -#define PORT7 0xFFFF56 -#define PORT8 0xFFFF57 -#define PORT9 0xFFFF58 -#define PORTA 0xFFFF59 -#define PORTB 0xFFFF5A -#define PORTC 0xFFFF5B -#define PORTD 0xFFFF5C -#define PORTE 0xFFFF5D -#define PORTF 0xFFFF5E -#define PORTG 0xFFFF5F -#define PORTH 0xFFFF70 - -#define PCR 0xFFFF46 -#define PMR 0xFFFF47 -#define NDERH 0xFFFF48 -#define NDERL 0xFFFF49 -#define PODRH 0xFFFF4A -#define PODRL 0xFFFF4B -#define NDRH1 0xFFFF4C -#define NDRL1 0xFFFF4D -#define NDRH2 0xFFFF4E -#define NDRL2 0xFFFF4F - -#define SMR0 0xFFFF78 -#define BRR0 0xFFFF79 -#define SCR0 0xFFFF7A -#define TDR0 0xFFFF7B -#define SSR0 0xFFFF7C -#define RDR0 0xFFFF7D -#define SCMR0 0xFFFF7E -#define SMR1 0xFFFF80 -#define BRR1 0xFFFF81 -#define SCR1 0xFFFF82 -#define TDR1 0xFFFF83 -#define SSR1 0xFFFF84 -#define RDR1 0xFFFF85 -#define SCMR1 0xFFFF86 -#define SMR2 0xFFFF88 -#define BRR2 0xFFFF89 -#define SCR2 0xFFFF8A -#define TDR2 0xFFFF8B -#define SSR2 0xFFFF8C -#define RDR2 0xFFFF8D -#define SCMR2 0xFFFF8E - -#define IRCR0 0xFFFE1E -#define SEMR 0xFFFDA8 - -#define MDCR 0xFFFF3E -#define SYSCR 0xFFFF3D -#define MSTPCRH 0xFFFF40 -#define MSTPCRL 0xFFFF41 -#define FLMCR1 0xFFFFC8 -#define FLMCR2 0xFFFFC9 -#define EBR1 0xFFFFCA -#define EBR2 0xFFFFCB -#define CTGARC_RAMCR 0xFFFECE -#define SBYCR 0xFFFF3A -#define SCKCR 0xFFFF3B -#define PLLCR 0xFFFF45 - -#define TSTR 0xFFFFC0 -#define TSNC 0XFFFFC1 - -#define TCR0 0xFFFFD0 -#define TMDR0 0xFFFFD1 -#define TIORH0 0xFFFFD2 -#define TIORL0 0xFFFFD3 -#define TIER0 0xFFFFD4 -#define TSR0 0xFFFFD5 -#define TCNT0 0xFFFFD6 -#define GRA0 0xFFFFD8 -#define GRB0 0xFFFFDA -#define GRC0 0xFFFFDC -#define GRD0 0xFFFFDE -#define TCR1 0xFFFFE0 -#define TMDR1 0xFFFFE1 -#define TIORH1 0xFFFFE2 -#define TIORL1 0xFFFFE3 -#define TIER1 0xFFFFE4 -#define TSR1 0xFFFFE5 -#define TCNT1 0xFFFFE6 -#define GRA1 0xFFFFE8 -#define GRB1 0xFFFFEA -#define TCR2 0xFFFFF0 -#define TMDR2 0xFFFFF1 -#define TIORH2 0xFFFFF2 -#define TIORL2 0xFFFFF3 -#define TIER2 0xFFFFF4 -#define TSR2 0xFFFFF5 -#define TCNT2 0xFFFFF6 -#define GRA2 0xFFFFF8 -#define GRB2 0xFFFFFA -#define TCR3 0xFFFE80 -#define TMDR3 0xFFFE81 -#define TIORH3 0xFFFE82 -#define TIORL3 0xFFFE83 -#define TIER3 0xFFFE84 -#define TSR3 0xFFFE85 -#define TCNT3 0xFFFE86 -#define GRA3 0xFFFE88 -#define GRB3 0xFFFE8A -#define GRC3 0xFFFE8C -#define GRD3 0xFFFE8E -#define TCR4 0xFFFE90 -#define TMDR4 0xFFFE91 -#define TIORH4 0xFFFE92 -#define TIORL4 0xFFFE93 -#define TIER4 0xFFFE94 -#define TSR4 0xFFFE95 -#define TCNT4 0xFFFE96 -#define GRA4 0xFFFE98 -#define GRB4 0xFFFE9A -#define TCR5 0xFFFEA0 -#define TMDR5 0xFFFEA1 -#define TIORH5 0xFFFEA2 -#define TIORL5 0xFFFEA3 -#define TIER5 0xFFFEA4 -#define TSR5 0xFFFEA5 -#define TCNT5 0xFFFEA6 -#define GRA5 0xFFFEA8 -#define GRB5 0xFFFEAA - -#define _8TCR0 0xFFFFB0 -#define _8TCR1 0xFFFFB1 -#define _8TCSR0 0xFFFFB2 -#define _8TCSR1 0xFFFFB3 -#define _8TCORA0 0xFFFFB4 -#define _8TCORA1 0xFFFFB5 -#define _8TCORB0 0xFFFFB6 -#define _8TCORB1 0xFFFFB7 -#define _8TCNT0 0xFFFFB8 -#define _8TCNT1 0xFFFFB9 - -#define TCSR 0xFFFFBC -#define TCNT 0xFFFFBD -#define RSTCSRW 0xFFFFBE -#define RSTCSRR 0xFFFFBF - -#endif /* __KERNEL__ */ -#endif /* __REGS_H8S267x__ */ diff --git a/include/asm-h8300/regs306x.h b/include/asm-h8300/regs306x.h deleted file mode 100644 index 027dd633fa2..00000000000 --- a/include/asm-h8300/regs306x.h +++ /dev/null @@ -1,212 +0,0 @@ -/* internal Peripherals Register address define */ -/* CPU: H8/306x */ - -#if !defined(__REGS_H8306x__) -#define __REGS_H8306x__ - -#if defined(__KERNEL__) - -#define DASTCR 0xFEE01A -#define DADR0 0xFEE09C -#define DADR1 0xFEE09D -#define DACR 0xFEE09E - -#define ADDRAH 0xFFFFE0 -#define ADDRAL 0xFFFFE1 -#define ADDRBH 0xFFFFE2 -#define ADDRBL 0xFFFFE3 -#define ADDRCH 0xFFFFE4 -#define ADDRCL 0xFFFFE5 -#define ADDRDH 0xFFFFE6 -#define ADDRDL 0xFFFFE7 -#define ADCSR 0xFFFFE8 -#define ADCR 0xFFFFE9 - -#define BRCR 0xFEE013 -#define ADRCR 0xFEE01E -#define CSCR 0xFEE01F -#define ABWCR 0xFEE020 -#define ASTCR 0xFEE021 -#define WCRH 0xFEE022 -#define WCRL 0xFEE023 -#define BCR 0xFEE024 -#define DRCRA 0xFEE026 -#define DRCRB 0xFEE027 -#define RTMCSR 0xFEE028 -#define RTCNT 0xFEE029 -#define RTCOR 0xFEE02A - -#define MAR0AR 0xFFFF20 -#define MAR0AE 0xFFFF21 -#define MAR0AH 0xFFFF22 -#define MAR0AL 0xFFFF23 -#define ETCR0AL 0xFFFF24 -#define ETCR0AH 0xFFFF25 -#define IOAR0A 0xFFFF26 -#define DTCR0A 0xFFFF27 -#define MAR0BR 0xFFFF28 -#define MAR0BE 0xFFFF29 -#define MAR0BH 0xFFFF2A -#define MAR0BL 0xFFFF2B -#define ETCR0BL 0xFFFF2C -#define ETCR0BH 0xFFFF2D -#define IOAR0B 0xFFFF2E -#define DTCR0B 0xFFFF2F -#define MAR1AR 0xFFFF30 -#define MAR1AE 0xFFFF31 -#define MAR1AH 0xFFFF32 -#define MAR1AL 0xFFFF33 -#define ETCR1AL 0xFFFF34 -#define ETCR1AH 0xFFFF35 -#define IOAR1A 0xFFFF36 -#define DTCR1A 0xFFFF37 -#define MAR1BR 0xFFFF38 -#define MAR1BE 0xFFFF39 -#define MAR1BH 0xFFFF3A -#define MAR1BL 0xFFFF3B -#define ETCR1BL 0xFFFF3C -#define ETCR1BH 0xFFFF3D -#define IOAR1B 0xFFFF3E -#define DTCR1B 0xFFFF3F - -#define ISCR 0xFEE014 -#define IER 0xFEE015 -#define ISR 0xFEE016 -#define IPRA 0xFEE018 -#define IPRB 0xFEE019 - -#define P1DDR 0xFEE000 -#define P2DDR 0xFEE001 -#define P3DDR 0xFEE002 -#define P4DDR 0xFEE003 -#define P5DDR 0xFEE004 -#define P6DDR 0xFEE005 -/*#define P7DDR 0xFEE006*/ -#define P8DDR 0xFEE007 -#define P9DDR 0xFEE008 -#define PADDR 0xFEE009 -#define PBDDR 0xFEE00A - -#define P1DR 0xFFFFD0 -#define P2DR 0xFFFFD1 -#define P3DR 0xFFFFD2 -#define P4DR 0xFFFFD3 -#define P5DR 0xFFFFD4 -#define P6DR 0xFFFFD5 -/*#define P7DR 0xFFFFD6*/ -#define P8DR 0xFFFFD7 -#define P9DR 0xFFFFD8 -#define PADR 0xFFFFD9 -#define PBDR 0xFFFFDA - -#define P2CR 0xFEE03C -#define P4CR 0xFEE03E -#define P5CR 0xFEE03F - -#define SMR0 0xFFFFB0 -#define BRR0 0xFFFFB1 -#define SCR0 0xFFFFB2 -#define TDR0 0xFFFFB3 -#define SSR0 0xFFFFB4 -#define RDR0 0xFFFFB5 -#define SCMR0 0xFFFFB6 -#define SMR1 0xFFFFB8 -#define BRR1 0xFFFFB9 -#define SCR1 0xFFFFBA -#define TDR1 0xFFFFBB -#define SSR1 0xFFFFBC -#define RDR1 0xFFFFBD -#define SCMR1 0xFFFFBE -#define SMR2 0xFFFFC0 -#define BRR2 0xFFFFC1 -#define SCR2 0xFFFFC2 -#define TDR2 0xFFFFC3 -#define SSR2 0xFFFFC4 -#define RDR2 0xFFFFC5 -#define SCMR2 0xFFFFC6 - -#define MDCR 0xFEE011 -#define SYSCR 0xFEE012 -#define DIVCR 0xFEE01B -#define MSTCRH 0xFEE01C -#define MSTCRL 0xFEE01D -#define FLMCR1 0xFEE030 -#define FLMCR2 0xFEE031 -#define EBR1 0xFEE032 -#define EBR2 0xFEE033 -#define RAMCR 0xFEE077 - -#define TSTR 0xFFFF60 -#define TSNC 0XFFFF61 -#define TMDR 0xFFFF62 -#define TOLR 0xFFFF63 -#define TISRA 0xFFFF64 -#define TISRB 0xFFFF65 -#define TISRC 0xFFFF66 -#define TCR0 0xFFFF68 -#define TIOR0 0xFFFF69 -#define TCNT0H 0xFFFF6A -#define TCNT0L 0xFFFF6B -#define GRA0H 0xFFFF6C -#define GRA0L 0xFFFF6D -#define GRB0H 0xFFFF6E -#define GRB0L 0xFFFF6F -#define TCR1 0xFFFF70 -#define TIOR1 0xFFFF71 -#define TCNT1H 0xFFFF72 -#define TCNT1L 0xFFFF73 -#define GRA1H 0xFFFF74 -#define GRA1L 0xFFFF75 -#define GRB1H 0xFFFF76 -#define GRB1L 0xFFFF77 -#define TCR3 0xFFFF78 -#define TIOR3 0xFFFF79 -#define TCNT3H 0xFFFF7A -#define TCNT3L 0xFFFF7B -#define GRA3H 0xFFFF7C -#define GRA3L 0xFFFF7D -#define GRB3H 0xFFFF7E -#define GRB3L 0xFFFF7F - -#define _8TCR0 0xFFFF80 -#define _8TCR1 0xFFFF81 -#define _8TCSR0 0xFFFF82 -#define _8TCSR1 0xFFFF83 -#define TCORA0 0xFFFF84 -#define TCORA1 0xFFFF85 -#define TCORB0 0xFFFF86 -#define TCORB1 0xFFFF87 -#define _8TCNT0 0xFFFF88 -#define _8TCNT1 0xFFFF89 - -#define _8TCR2 0xFFFF90 -#define _8TCR3 0xFFFF91 -#define _8TCSR2 0xFFFF92 -#define _8TCSR3 0xFFFF93 -#define TCORA2 0xFFFF94 -#define TCORA3 0xFFFF95 -#define TCORB2 0xFFFF96 -#define TCORB3 0xFFFF97 -#define _8TCNT2 0xFFFF98 -#define _8TCNT3 0xFFFF99 - -#define TCSR 0xFFFF8C -#define TCNT 0xFFFF8D -#define RSTCSR 0xFFFF8F - -#define TPMR 0xFFFFA0 -#define TPCR 0xFFFFA1 -#define NDERB 0xFFFFA2 -#define NDERA 0xFFFFA3 -#define NDRB1 0xFFFFA4 -#define NDRA1 0xFFFFA5 -#define NDRB2 0xFFFFA6 -#define NDRA2 0xFFFFA7 - -#define TCSR 0xFFFF8C -#define TCNT 0xFFFF8D -#define RSTCSRW 0xFFFF8E -#define RSTCSRR 0xFFFF8F - -#endif /* __KERNEL__ */ -#endif /* __REGS_H8306x__ */ diff --git a/include/asm-h8300/resource.h b/include/asm-h8300/resource.h deleted file mode 100644 index 46c5f439160..00000000000 --- a/include/asm-h8300/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_RESOURCE_H -#define _H8300_RESOURCE_H - -#include - -#endif /* _H8300_RESOURCE_H */ diff --git a/include/asm-h8300/scatterlist.h b/include/asm-h8300/scatterlist.h deleted file mode 100644 index d3ecdd87ac9..00000000000 --- a/include/asm-h8300/scatterlist.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _H8300_SCATTERLIST_H -#define _H8300_SCATTERLIST_H - -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - dma_addr_t dma_address; - unsigned int length; -}; - -#define ISA_DMA_THRESHOLD (0xffffffff) - -#endif /* !(_H8300_SCATTERLIST_H) */ diff --git a/include/asm-h8300/sections.h b/include/asm-h8300/sections.h deleted file mode 100644 index a81743e8b74..00000000000 --- a/include/asm-h8300/sections.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_SECTIONS_H_ -#define _H8300_SECTIONS_H_ - -#include - -#endif diff --git a/include/asm-h8300/segment.h b/include/asm-h8300/segment.h deleted file mode 100644 index b79a82d0f99..00000000000 --- a/include/asm-h8300/segment.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef _H8300_SEGMENT_H -#define _H8300_SEGMENT_H - -/* define constants */ -#define USER_DATA (1) -#ifndef __USER_DS -#define __USER_DS (USER_DATA) -#endif -#define USER_PROGRAM (2) -#define SUPER_DATA (3) -#ifndef __KERNEL_DS -#define __KERNEL_DS (SUPER_DATA) -#endif -#define SUPER_PROGRAM (4) - -#ifndef __ASSEMBLY__ - -typedef struct { - unsigned long seg; -} mm_segment_t; - -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) -#define USER_DS MAKE_MM_SEG(__USER_DS) -#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS) - -/* - * Get/set the SFC/DFC registers for MOVES instructions - */ - -static inline mm_segment_t get_fs(void) -{ - return USER_DS; -} - -static inline mm_segment_t get_ds(void) -{ - /* return the supervisor data space code */ - return KERNEL_DS; -} - -static inline void set_fs(mm_segment_t val) -{ -} - -#define segment_eq(a,b) ((a).seg == (b).seg) - -#endif /* __ASSEMBLY__ */ - -#endif /* _H8300_SEGMENT_H */ diff --git a/include/asm-h8300/sembuf.h b/include/asm-h8300/sembuf.h deleted file mode 100644 index e04a3ec0cb9..00000000000 --- a/include/asm-h8300/sembuf.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _H8300_SEMBUF_H -#define _H8300_SEMBUF_H - -/* - * The semid64_ds structure for m68k architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ - unsigned long __unused1; - __kernel_time_t sem_ctime; /* last change time */ - unsigned long __unused2; - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _H8300_SEMBUF_H */ diff --git a/include/asm-h8300/setup.h b/include/asm-h8300/setup.h deleted file mode 100644 index e2c600e9673..00000000000 --- a/include/asm-h8300/setup.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __H8300_SETUP_H -#define __H8300_SETUP_H - -#define COMMAND_LINE_SIZE 512 - -#endif diff --git a/include/asm-h8300/sh_bios.h b/include/asm-h8300/sh_bios.h deleted file mode 100644 index b6bb6e58295..00000000000 --- a/include/asm-h8300/sh_bios.h +++ /dev/null @@ -1,29 +0,0 @@ -/* eCos HAL interface header */ - -#ifndef SH_BIOS_H -#define SH_BIOS_H - -#define HAL_IF_VECTOR_TABLE 0xfffe20 -#define CALL_IF_SET_CONSOLE_COMM 13 -#define QUERY_CURRENT -1 -#define MANGLER -3 - -/* Checking for GDB stub active */ -/* suggestion Jonathan Larmour */ -static int sh_bios_in_gdb_mode(void) -{ - static int gdb_active = -1; - if (gdb_active == -1) { - int (*set_console_comm)(int); - set_console_comm = ((void **)HAL_IF_VECTOR_TABLE)[CALL_IF_SET_CONSOLE_COMM]; - gdb_active = (set_console_comm(QUERY_CURRENT) == MANGLER); - } - return gdb_active; -} - -static void sh_bios_gdb_detach(void) -{ - -} - -#endif diff --git a/include/asm-h8300/shm.h b/include/asm-h8300/shm.h deleted file mode 100644 index ed6623c0545..00000000000 --- a/include/asm-h8300/shm.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _H8300_SHM_H -#define _H8300_SHM_H - - -/* format of page table entries that correspond to shared memory pages - currently out in swap space (see also mm/swap.c): - bits 0-1 (PAGE_PRESENT) is = 0 - bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE - bits 31..9 are used like this: - bits 15..9 (SHM_ID) the id of the shared memory segment - bits 30..16 (SHM_IDX) the index of the page within the shared memory segment - (actually only bits 25..16 get used since SHMMAX is so low) - bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach -*/ -/* on the m68k both bits 0 and 1 must be zero */ -/* format on the sun3 is similar, but bits 30, 31 are set to zero and all - others are reduced by 2. --m */ - -#ifndef CONFIG_SUN3 -#define SHM_ID_SHIFT 9 -#else -#define SHM_ID_SHIFT 7 -#endif -#define _SHM_ID_BITS 7 -#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1) - -#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS) -#define _SHM_IDX_BITS 15 -#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1) - -#endif /* _H8300_SHM_H */ diff --git a/include/asm-h8300/shmbuf.h b/include/asm-h8300/shmbuf.h deleted file mode 100644 index 64e77993a7a..00000000000 --- a/include/asm-h8300/shmbuf.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _H8300_SHMBUF_H -#define _H8300_SHMBUF_H - -/* - * The shmid64_ds structure for m68k architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - unsigned long __unused1; - __kernel_time_t shm_dtime; /* last detach time */ - unsigned long __unused2; - __kernel_time_t shm_ctime; /* last change time */ - unsigned long __unused3; - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused4; - unsigned long __unused5; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _H8300_SHMBUF_H */ diff --git a/include/asm-h8300/shmparam.h b/include/asm-h8300/shmparam.h deleted file mode 100644 index d1863953ec6..00000000000 --- a/include/asm-h8300/shmparam.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_SHMPARAM_H -#define _H8300_SHMPARAM_H - -#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ - -#endif /* _H8300_SHMPARAM_H */ diff --git a/include/asm-h8300/sigcontext.h b/include/asm-h8300/sigcontext.h deleted file mode 100644 index e4b81505f8f..00000000000 --- a/include/asm-h8300/sigcontext.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _ASM_H8300_SIGCONTEXT_H -#define _ASM_H8300_SIGCONTEXT_H - -struct sigcontext { - unsigned long sc_mask; /* old sigmask */ - unsigned long sc_usp; /* old user stack pointer */ - unsigned long sc_er0; - unsigned long sc_er1; - unsigned long sc_er2; - unsigned long sc_er3; - unsigned long sc_er4; - unsigned long sc_er5; - unsigned long sc_er6; - unsigned short sc_ccr; - unsigned long sc_pc; -}; - -#endif diff --git a/include/asm-h8300/siginfo.h b/include/asm-h8300/siginfo.h deleted file mode 100644 index bc8fbea931a..00000000000 --- a/include/asm-h8300/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_SIGINFO_H -#define _H8300_SIGINFO_H - -#include - -#endif diff --git a/include/asm-h8300/signal.h b/include/asm-h8300/signal.h deleted file mode 100644 index 7bc15048a64..00000000000 --- a/include/asm-h8300/signal.h +++ /dev/null @@ -1,161 +0,0 @@ -#ifndef _H8300_SIGNAL_H -#define _H8300_SIGNAL_H - -#include - -/* Avoid too many header ordering problems. */ -struct siginfo; - -#ifdef __KERNEL__ -/* Most things should be clean enough to redefine this at will, if care - is taken to make libc match. */ - -#define _NSIG 64 -#define _NSIG_BPW 32 -#define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -typedef unsigned long old_sigset_t; /* at least 32 bits */ - -typedef struct { - unsigned long sig[_NSIG_WORDS]; -} sigset_t; - -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -#define NSIG 32 -typedef unsigned long sigset_t; - -#endif /* __KERNEL__ */ - -#define SIGHUP 1 -#define SIGINT 2 -#define SIGQUIT 3 -#define SIGILL 4 -#define SIGTRAP 5 -#define SIGABRT 6 -#define SIGIOT 6 -#define SIGBUS 7 -#define SIGFPE 8 -#define SIGKILL 9 -#define SIGUSR1 10 -#define SIGSEGV 11 -#define SIGUSR2 12 -#define SIGPIPE 13 -#define SIGALRM 14 -#define SIGTERM 15 -#define SIGSTKFLT 16 -#define SIGCHLD 17 -#define SIGCONT 18 -#define SIGSTOP 19 -#define SIGTSTP 20 -#define SIGTTIN 21 -#define SIGTTOU 22 -#define SIGURG 23 -#define SIGXCPU 24 -#define SIGXFSZ 25 -#define SIGVTALRM 26 -#define SIGPROF 27 -#define SIGWINCH 28 -#define SIGIO 29 -#define SIGPOLL SIGIO -/* -#define SIGLOST 29 -*/ -#define SIGPWR 30 -#define SIGSYS 31 -#define SIGUNUSED 31 - -/* These should not be considered constants from userland. */ -#define SIGRTMIN 32 -#define SIGRTMAX _NSIG - -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - -#define SA_RESTORER 0x04000000 - -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - -#define MINSIGSTKSZ 2048 -#define SIGSTKSZ 8192 - -#include - -#ifdef __KERNEL__ -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - void (*sa_restorer)(void); - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -}; -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -struct sigaction { - union { - __sighandler_t _sa_handler; - void (*_sa_sigaction)(int, struct siginfo *, void *); - } _u; - sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -#define sa_handler _u._sa_handler -#define sa_sigaction _u._sa_sigaction - -#endif /* __KERNEL__ */ - -typedef struct sigaltstack { - void *ss_sp; - int ss_flags; - size_t ss_size; -} stack_t; - -#ifdef __KERNEL__ - -#include -#undef __HAVE_ARCH_SIG_BITOPS - -#define ptrace_signal_deliver(regs, cookie) do { } while (0) - -#endif /* __KERNEL__ */ - -#endif /* _H8300_SIGNAL_H */ diff --git a/include/asm-h8300/smp.h b/include/asm-h8300/smp.h deleted file mode 100644 index 9e9bd7e5892..00000000000 --- a/include/asm-h8300/smp.h +++ /dev/null @@ -1 +0,0 @@ -/* nothing required here yet */ diff --git a/include/asm-h8300/socket.h b/include/asm-h8300/socket.h deleted file mode 100644 index da2520dbf25..00000000000 --- a/include/asm-h8300/socket.h +++ /dev/null @@ -1,57 +0,0 @@ -#ifndef _ASM_SOCKET_H -#define _ASM_SOCKET_H - -#include - -/* For setsockoptions(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -/* To add :#define SO_REUSEPORT 15 */ -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 - -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#endif /* _ASM_SOCKET_H */ diff --git a/include/asm-h8300/sockios.h b/include/asm-h8300/sockios.h deleted file mode 100644 index e9c7ec810c2..00000000000 --- a/include/asm-h8300/sockios.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ARCH_H8300_SOCKIOS__ -#define __ARCH_H8300_SOCKIOS__ - -/* Socket-level I/O control calls. */ -#define FIOSETOWN 0x8901 -#define SIOCSPGRP 0x8902 -#define FIOGETOWN 0x8903 -#define SIOCGPGRP 0x8904 -#define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ -#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ - -#endif /* __ARCH_H8300_SOCKIOS__ */ diff --git a/include/asm-h8300/spinlock.h b/include/asm-h8300/spinlock.h deleted file mode 100644 index d5407fa173e..00000000000 --- a/include/asm-h8300/spinlock.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __H8300_SPINLOCK_H -#define __H8300_SPINLOCK_H - -#error "H8/300 doesn't do SMP yet" - -#endif diff --git a/include/asm-h8300/stat.h b/include/asm-h8300/stat.h deleted file mode 100644 index 62c3cc24dfe..00000000000 --- a/include/asm-h8300/stat.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef _H8300_STAT_H -#define _H8300_STAT_H - -struct __old_kernel_stat { - unsigned short st_dev; - unsigned short st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned long st_size; - unsigned long st_atime; - unsigned long st_mtime; - unsigned long st_ctime; -}; - -struct stat { - unsigned short st_dev; - unsigned short __pad1; - unsigned long st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned short __pad2; - unsigned long st_size; - unsigned long st_blksize; - unsigned long st_blocks; - unsigned long st_atime; - unsigned long __unused1; - unsigned long st_mtime; - unsigned long __unused2; - unsigned long st_ctime; - unsigned long __unused3; - unsigned long __unused4; - unsigned long __unused5; -}; - -/* This matches struct stat64 in glibc2.1, hence the absolutely - * insane amounts of padding around dev_t's. - */ -struct stat64 { - unsigned long long st_dev; - unsigned char __pad1[2]; - -#define STAT64_HAS_BROKEN_ST_INO 1 - unsigned long __st_ino; - - unsigned int st_mode; - unsigned int st_nlink; - - unsigned long st_uid; - unsigned long st_gid; - - unsigned long long st_rdev; - unsigned char __pad3[2]; - - long long st_size; - unsigned long st_blksize; - - unsigned long __pad4; /* future possible st_blocks high bits */ - unsigned long st_blocks; /* Number 512-byte blocks allocated. */ - - unsigned long st_atime; - unsigned long st_atime_nsec; - - unsigned long st_mtime; - unsigned long st_mtime_nsec; - - unsigned long st_ctime; - unsigned long st_ctime_nsec; - - unsigned long long st_ino; -}; - -#endif /* _H8300_STAT_H */ diff --git a/include/asm-h8300/statfs.h b/include/asm-h8300/statfs.h deleted file mode 100644 index b96efa712aa..00000000000 --- a/include/asm-h8300/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_STATFS_H -#define _H8300_STATFS_H - -#include - -#endif /* _H8300_STATFS_H */ diff --git a/include/asm-h8300/string.h b/include/asm-h8300/string.h deleted file mode 100644 index ca5034897d8..00000000000 --- a/include/asm-h8300/string.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _H8300_STRING_H_ -#define _H8300_STRING_H_ - -#ifdef __KERNEL__ /* only set these up for kernel code */ - -#include -#include - -#define __HAVE_ARCH_MEMSET -extern void * memset(void * s, int c, size_t count); - -#define __HAVE_ARCH_MEMCPY -extern void * memcpy(void *d, const void *s, size_t count); - -#else /* KERNEL */ - -/* - * let user libraries deal with these, - * IMHO the kernel has no place defining these functions for user apps - */ - -#define __HAVE_ARCH_STRCPY 1 -#define __HAVE_ARCH_STRNCPY 1 -#define __HAVE_ARCH_STRCAT 1 -#define __HAVE_ARCH_STRNCAT 1 -#define __HAVE_ARCH_STRCMP 1 -#define __HAVE_ARCH_STRNCMP 1 -#define __HAVE_ARCH_STRNICMP 1 -#define __HAVE_ARCH_STRCHR 1 -#define __HAVE_ARCH_STRRCHR 1 -#define __HAVE_ARCH_STRSTR 1 -#define __HAVE_ARCH_STRLEN 1 -#define __HAVE_ARCH_STRNLEN 1 -#define __HAVE_ARCH_MEMSET 1 -#define __HAVE_ARCH_MEMCPY 1 -#define __HAVE_ARCH_MEMMOVE 1 -#define __HAVE_ARCH_MEMSCAN 1 -#define __HAVE_ARCH_MEMCMP 1 -#define __HAVE_ARCH_MEMCHR 1 -#define __HAVE_ARCH_STRTOK 1 - -#endif /* KERNEL */ - -#endif /* _M68K_STRING_H_ */ diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h deleted file mode 100644 index 4b8e475908a..00000000000 --- a/include/asm-h8300/system.h +++ /dev/null @@ -1,158 +0,0 @@ -#ifndef _H8300_SYSTEM_H -#define _H8300_SYSTEM_H - -#include - -/* - * switch_to(n) should switch tasks to task ptr, first checking that - * ptr isn't the current task, in which case it does nothing. This - * also clears the TS-flag if the task we switched to has used the - * math co-processor latest. - */ -/* - * switch_to() saves the extra registers, that are not saved - * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and - * a0-a1. Some of these are used by schedule() and its predecessors - * and so we might get see unexpected behaviors when a task returns - * with unexpected register values. - * - * syscall stores these registers itself and none of them are used - * by syscall after the function in the syscall has been called. - * - * Beware that resume now expects *next to be in d1 and the offset of - * tss to be in a1. This saves a few instructions as we no longer have - * to push them onto the stack and read them back right after. - * - * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) - * - * Changed 96/09/19 by Andreas Schwab - * pass prev in a0, next in a1, offset of tss in d1, and whether - * the mm structures are shared in d2 (to avoid atc flushing). - * - * H8/300 Porting 2002/09/04 Yoshinori Sato - */ - -asmlinkage void resume(void); -#define switch_to(prev,next,last) { \ - void *_last; \ - __asm__ __volatile__( \ - "mov.l %1, er0\n\t" \ - "mov.l %2, er1\n\t" \ - "mov.l %3, er2\n\t" \ - "jsr @_resume\n\t" \ - "mov.l er2,%0\n\t" \ - : "=r" (_last) \ - : "r" (&(prev->thread)), \ - "r" (&(next->thread)), \ - "g" (prev) \ - : "cc", "er0", "er1", "er2", "er3"); \ - (last) = _last; \ -} - -#define __sti() asm volatile ("andc #0x7f,ccr") -#define __cli() asm volatile ("orc #0x80,ccr") - -#define __save_flags(x) \ - asm volatile ("stc ccr,%w0":"=r" (x)) - -#define __restore_flags(x) \ - asm volatile ("ldc %w0,ccr": :"r" (x)) - -#define irqs_disabled() \ -({ \ - unsigned char flags; \ - __save_flags(flags); \ - ((flags & 0x80) == 0x80); \ -}) - -#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") - -/* For spinlocks etc */ -#define local_irq_disable() __cli() -#define local_irq_enable() __sti() -#define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); }) -#define local_irq_restore(x) __restore_flags(x) -#define local_save_flags(x) __save_flags(x) - -/* - * Force strict CPU ordering. - * Not really required on H8... - */ -#define nop() asm volatile ("nop"::) -#define mb() asm volatile ("" : : :"memory") -#define rmb() asm volatile ("" : : :"memory") -#define wmb() asm volatile ("" : : :"memory") -#define set_mb(var, value) do { xchg(&var, value); } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -struct __xchg_dummy { unsigned long a[100]; }; -#define __xg(x) ((volatile struct __xchg_dummy *)(x)) - -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - unsigned long tmp, flags; - - local_irq_save(flags); - - switch (size) { - case 1: - __asm__ __volatile__ - ("mov.b %2,%0\n\t" - "mov.b %1,%2" - : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); - break; - case 2: - __asm__ __volatile__ - ("mov.w %2,%0\n\t" - "mov.w %1,%2" - : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); - break; - case 4: - __asm__ __volatile__ - ("mov.l %2,%0\n\t" - "mov.l %1,%2" - : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); - break; - default: - tmp = 0; - } - local_irq_restore(flags); - return tmp; -} - -#define HARD_RESET_NOW() ({ \ - local_irq_disable(); \ - asm("jmp @@0"); \ -}) - -#include - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#ifndef CONFIG_SMP -#include -#endif - -#define arch_align_stack(x) (x) - -#endif /* _H8300_SYSTEM_H */ diff --git a/include/asm-h8300/target_time.h b/include/asm-h8300/target_time.h deleted file mode 100644 index 9f2a9aa1fe6..00000000000 --- a/include/asm-h8300/target_time.h +++ /dev/null @@ -1,4 +0,0 @@ -extern int platform_timer_setup(void (*timer_int)(int, void *, struct pt_regs *)); -extern void platform_timer_eoi(void); -extern void platform_gettod(unsigned int *year, unsigned int *mon, unsigned int *day, - unsigned int *hour, unsigned int *min, unsigned int *sec); diff --git a/include/asm-h8300/termbits.h b/include/asm-h8300/termbits.h deleted file mode 100644 index 31eca81db3f..00000000000 --- a/include/asm-h8300/termbits.h +++ /dev/null @@ -1,200 +0,0 @@ -#ifndef __ARCH_H8300_TERMBITS_H__ -#define __ARCH_H8300_TERMBITS_H__ - -#include - -typedef unsigned char cc_t; -typedef unsigned int speed_t; -typedef unsigned int tcflag_t; - -#define NCCS 19 -struct termios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ -}; - -struct termios2 { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -struct ktermios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -/* c_cc characters */ -#define VINTR 0 -#define VQUIT 1 -#define VERASE 2 -#define VKILL 3 -#define VEOF 4 -#define VTIME 5 -#define VMIN 6 -#define VSWTC 7 -#define VSTART 8 -#define VSTOP 9 -#define VSUSP 10 -#define VEOL 11 -#define VREPRINT 12 -#define VDISCARD 13 -#define VWERASE 14 -#define VLNEXT 15 -#define VEOL2 16 - - -/* c_iflag bits */ -#define IGNBRK 0000001 -#define BRKINT 0000002 -#define IGNPAR 0000004 -#define PARMRK 0000010 -#define INPCK 0000020 -#define ISTRIP 0000040 -#define INLCR 0000100 -#define IGNCR 0000200 -#define ICRNL 0000400 -#define IUCLC 0001000 -#define IXON 0002000 -#define IXANY 0004000 -#define IXOFF 0010000 -#define IMAXBEL 0020000 -#define IUTF8 0040000 - -/* c_oflag bits */ -#define OPOST 0000001 -#define OLCUC 0000002 -#define ONLCR 0000004 -#define OCRNL 0000010 -#define ONOCR 0000020 -#define ONLRET 0000040 -#define OFILL 0000100 -#define OFDEL 0000200 -#define NLDLY 0000400 -#define NL0 0000000 -#define NL1 0000400 -#define CRDLY 0003000 -#define CR0 0000000 -#define CR1 0001000 -#define CR2 0002000 -#define CR3 0003000 -#define TABDLY 0014000 -#define TAB0 0000000 -#define TAB1 0004000 -#define TAB2 0010000 -#define TAB3 0014000 -#define XTABS 0014000 -#define BSDLY 0020000 -#define BS0 0000000 -#define BS1 0020000 -#define VTDLY 0040000 -#define VT0 0000000 -#define VT1 0040000 -#define FFDLY 0100000 -#define FF0 0000000 -#define FF1 0100000 - -/* c_cflag bit meaning */ -#define CBAUD 0010017 -#define B0 0000000 /* hang up */ -#define B50 0000001 -#define B75 0000002 -#define B110 0000003 -#define B134 0000004 -#define B150 0000005 -#define B200 0000006 -#define B300 0000007 -#define B600 0000010 -#define B1200 0000011 -#define B1800 0000012 -#define B2400 0000013 -#define B4800 0000014 -#define B9600 0000015 -#define B19200 0000016 -#define B38400 0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CSIZE 0000060 -#define CS5 0000000 -#define CS6 0000020 -#define CS7 0000040 -#define CS8 0000060 -#define CSTOPB 0000100 -#define CREAD 0000200 -#define PARENB 0000400 -#define PARODD 0001000 -#define HUPCL 0002000 -#define CLOCAL 0004000 -#define CBAUDEX 0010000 -#define BOTHER 0010000 -#define B57600 0010001 -#define B115200 0010002 -#define B230400 0010003 -#define B460800 0010004 -#define B500000 0010005 -#define B576000 0010006 -#define B921600 0010007 -#define B1000000 0010010 -#define B1152000 0010011 -#define B1500000 0010012 -#define B2000000 0010013 -#define B2500000 0010014 -#define B3000000 0010015 -#define B3500000 0010016 -#define B4000000 0010017 -#define CIBAUD 002003600000 /* input baud rate */ -#define CMSPAR 010000000000 /* mark or space (stick) parity */ -#define CRTSCTS 020000000000 /* flow control */ - -#define IBSHIFT 16 /* shift from CBAUD to CIBAUD */ - -/* c_lflag bits */ -#define ISIG 0000001 -#define ICANON 0000002 -#define XCASE 0000004 -#define ECHO 0000010 -#define ECHOE 0000020 -#define ECHOK 0000040 -#define ECHONL 0000100 -#define NOFLSH 0000200 -#define TOSTOP 0000400 -#define ECHOCTL 0001000 -#define ECHOPRT 0002000 -#define ECHOKE 0004000 -#define FLUSHO 0010000 -#define PENDIN 0040000 -#define IEXTEN 0100000 - - -/* tcflow() and TCXONC use these */ -#define TCOOFF 0 -#define TCOON 1 -#define TCIOFF 2 -#define TCION 3 - -/* tcflush() and TCFLSH use these */ -#define TCIFLUSH 0 -#define TCOFLUSH 1 -#define TCIOFLUSH 2 - -/* tcsetattr uses these */ -#define TCSANOW 0 -#define TCSADRAIN 1 -#define TCSAFLUSH 2 - -#endif /* __ARCH_H8300_TERMBITS_H__ */ diff --git a/include/asm-h8300/termios.h b/include/asm-h8300/termios.h deleted file mode 100644 index 70eea64b421..00000000000 --- a/include/asm-h8300/termios.h +++ /dev/null @@ -1,92 +0,0 @@ -#ifndef _H8300_TERMIOS_H -#define _H8300_TERMIOS_H - -#include -#include - -struct winsize { - unsigned short ws_row; - unsigned short ws_col; - unsigned short ws_xpixel; - unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { - unsigned short c_iflag; /* input mode flags */ - unsigned short c_oflag; /* output mode flags */ - unsigned short c_cflag; /* control mode flags */ - unsigned short c_lflag; /* local mode flags */ - unsigned char c_line; /* line discipline */ - unsigned char c_cc[NCC]; /* control characters */ -}; - -#ifdef __KERNEL__ -/* intr=^C quit=^| erase=del kill=^U - eof=^D vtime=\0 vmin=\1 sxtc=\0 - start=^Q stop=^S susp=^Z eol=\0 - reprint=^R discard=^U werase=^W lnext=^V - eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" -#endif - -/* modem lines */ -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG -#define TIOCM_OUT1 0x2000 -#define TIOCM_OUT2 0x4000 -#define TIOCM_LOOP 0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - -#ifdef __KERNEL__ - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - unsigned short tmp; \ - get_user(tmp, &(termio)->c_iflag); \ - (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \ - get_user(tmp, &(termio)->c_oflag); \ - (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \ - get_user(tmp, &(termio)->c_cflag); \ - (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \ - get_user(tmp, &(termio)->c_lflag); \ - (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \ - get_user((termios)->c_line, &(termio)->c_line); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) - -#endif /* __KERNEL__ */ - -#endif /* _H8300_TERMIOS_H */ diff --git a/include/asm-h8300/thread_info.h b/include/asm-h8300/thread_info.h deleted file mode 100644 index aafd4d322ec..00000000000 --- a/include/asm-h8300/thread_info.h +++ /dev/null @@ -1,104 +0,0 @@ -/* thread_info.h: h8300 low-level thread information - * adapted from the i386 and PPC versions by Yoshinori Sato - * - * Copyright (C) 2002 David Howells (dhowells@redhat.com) - * - Incorporating suggestions made by Linus Torvalds and Dave Miller - */ - -#ifndef _ASM_THREAD_INFO_H -#define _ASM_THREAD_INFO_H - -#include - -#ifdef __KERNEL__ - -#ifndef __ASSEMBLY__ - -/* - * low level task data. - * If you change this, change the TI_* offsets below to match. - */ -struct thread_info { - struct task_struct *task; /* main task structure */ - struct exec_domain *exec_domain; /* execution domain */ - unsigned long flags; /* low level flags */ - int cpu; /* cpu we're on */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ - struct restart_block restart_block; -}; - -/* - * macros/functions for gaining access to the thread information structure - */ -#define INIT_THREAD_INFO(tsk) \ -{ \ - .task = &tsk, \ - .exec_domain = &default_exec_domain, \ - .flags = 0, \ - .cpu = 0, \ - .preempt_count = 1, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ -} - -#define init_thread_info (init_thread_union.thread_info) -#define init_stack (init_thread_union.stack) - - -/* - * Size of kernel stack for each process. This must be a power of 2... - */ -#define THREAD_SIZE_ORDER 1 -#define THREAD_SIZE 8192 /* 2 pages */ - - -/* how to get the thread information struct from C */ -static inline struct thread_info *current_thread_info(void) -{ - struct thread_info *ti; - __asm__( - "mov.l sp, %0 \n\t" - "and.l %1, %0" - : "=&r"(ti) - : "i" (~(THREAD_SIZE-1)) - ); - return ti; -} - -#endif /* __ASSEMBLY__ */ - -/* - * Offsets in thread_info structure, used in assembly code - */ -#define TI_TASK 0 -#define TI_EXECDOMAIN 4 -#define TI_FLAGS 8 -#define TI_CPU 12 -#define TI_PRE_COUNT 16 - -#define PREEMPT_ACTIVE 0x4000000 - -/* - * thread information flag bit numbers - */ -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_SIGPENDING 1 /* signal pending */ -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling - TIF_NEED_RESCHED */ -#define TIF_MEMDIE 4 -#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ - -/* as above, but as bit values */ -#define _TIF_SYSCALL_TRACE (1< - -#endif - -#endif diff --git a/include/asm-h8300/tlbflush.h b/include/asm-h8300/tlbflush.h deleted file mode 100644 index 41c148a9208..00000000000 --- a/include/asm-h8300/tlbflush.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef _H8300_TLBFLUSH_H -#define _H8300_TLBFLUSH_H - -/* - * Copyright (C) 2000 Lineo, David McCullough - * Copyright (C) 2000-2002, Greg Ungerer - */ - -#include - -/* - * flush all user-space atc entries. - */ -static inline void __flush_tlb(void) -{ - BUG(); -} - -static inline void __flush_tlb_one(unsigned long addr) -{ - BUG(); -} - -#define flush_tlb() __flush_tlb() - -/* - * flush all atc entries (both kernel and user-space entries). - */ -static inline void flush_tlb_all(void) -{ - BUG(); -} - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - BUG(); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) -{ - BUG(); -} - -static inline void flush_tlb_range(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - BUG(); -} - -static inline void flush_tlb_kernel_page(unsigned long addr) -{ - BUG(); -} - -#endif /* _H8300_TLBFLUSH_H */ diff --git a/include/asm-h8300/topology.h b/include/asm-h8300/topology.h deleted file mode 100644 index fdc121924d4..00000000000 --- a/include/asm-h8300/topology.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_H8300_TOPOLOGY_H -#define _ASM_H8300_TOPOLOGY_H - -#include - -#endif /* _ASM_H8300_TOPOLOGY_H */ diff --git a/include/asm-h8300/traps.h b/include/asm-h8300/traps.h deleted file mode 100644 index 41cf6be02f6..00000000000 --- a/include/asm-h8300/traps.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * linux/include/asm-h8300/traps.h - * - * Copyright (C) 2003 Yoshinori Sato - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive - * for more details. - */ - -#ifndef _H8300_TRAPS_H -#define _H8300_TRAPS_H - -extern void system_call(void); -extern void interrupt_entry(void); -extern void trace_break(void); - -#define JMP_OP 0x5a000000 -#define JSR_OP 0x5e000000 -#define VECTOR(address) ((JMP_OP)|((unsigned long)address)) -#define REDIRECT(address) ((JSR_OP)|((unsigned long)address)) - -#define TRACE_VEC 5 - -#define TRAP0_VEC 8 -#define TRAP1_VEC 9 -#define TRAP2_VEC 10 -#define TRAP3_VEC 11 - -#if defined(__H8300H__) -#define NR_TRAPS 12 -#endif -#if defined(__H8300S__) -#define NR_TRAPS 16 -#endif - -#endif /* _H8300_TRAPS_H */ diff --git a/include/asm-h8300/types.h b/include/asm-h8300/types.h deleted file mode 100644 index 12875190b15..00000000000 --- a/include/asm-h8300/types.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _H8300_TYPES_H -#define _H8300_TYPES_H - -#include - -#if !defined(__ASSEMBLY__) - -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ - -typedef unsigned short umode_t; - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ - -#define BITS_PER_LONG 32 - -/* Dma addresses are 32-bits wide. */ - -typedef u32 dma_addr_t; - -#endif /* __KERNEL__ */ - -#endif /* __ASSEMBLY__ */ - -#endif /* _H8300_TYPES_H */ diff --git a/include/asm-h8300/uaccess.h b/include/asm-h8300/uaccess.h deleted file mode 100644 index 356068cd087..00000000000 --- a/include/asm-h8300/uaccess.h +++ /dev/null @@ -1,162 +0,0 @@ -#ifndef __H8300_UACCESS_H -#define __H8300_UACCESS_H - -/* - * User space memory access functions - */ -#include -#include -#include - -#include - -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - -/* We let the MMU do all checking */ -#define access_ok(type, addr, size) __access_ok((unsigned long)addr,size) -static inline int __access_ok(unsigned long addr, unsigned long size) -{ -#define RANGE_CHECK_OK(addr, size, lower, upper) \ - (((addr) >= (lower)) && (((addr) + (size)) < (upper))) - - extern unsigned long _ramend; - return(RANGE_CHECK_OK(addr, size, 0L, (unsigned long)&_ramend)); -} - -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -/* Returns 0 if exception not found and fixup otherwise. */ -extern unsigned long search_exception_table(unsigned long); - - -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - */ - -#define put_user(x, ptr) \ -({ \ - int __pu_err = 0; \ - typeof(*(ptr)) __pu_val = (x); \ - switch (sizeof (*(ptr))) { \ - case 1: \ - case 2: \ - case 4: \ - *(ptr) = (__pu_val); \ - break; \ - case 8: \ - memcpy(ptr, &__pu_val, sizeof (*(ptr))); \ - break; \ - default: \ - __pu_err = __put_user_bad(); \ - break; \ - } \ - __pu_err; \ -}) -#define __put_user(x, ptr) put_user(x, ptr) - -extern int __put_user_bad(void); - -/* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. - */ - -#define __ptr(x) ((unsigned long *)(x)) - -/* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. - */ - -#define get_user(x, ptr) \ -({ \ - int __gu_err = 0; \ - typeof(*(ptr)) __gu_val = *ptr; \ - switch (sizeof(*(ptr))) { \ - case 1: \ - case 2: \ - case 4: \ - case 8: \ - break; \ - default: \ - __gu_err = __get_user_bad(); \ - __gu_val = 0; \ - break; \ - } \ - (x) = __gu_val; \ - __gu_err; \ -}) -#define __get_user(x, ptr) get_user(x, ptr) - -extern int __get_user_bad(void); - -#define copy_from_user(to, from, n) (memcpy(to, from, n), 0) -#define copy_to_user(to, from, n) (memcpy(to, from, n), 0) - -#define __copy_from_user(to, from, n) copy_from_user(to, from, n) -#define __copy_to_user(to, from, n) copy_to_user(to, from, n) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - -#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; }) - -#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; }) - -/* - * Copy a null terminated string from userspace. - */ - -static inline long -strncpy_from_user(char *dst, const char *src, long count) -{ - char *tmp; - strncpy(dst, src, count); - for (tmp = dst; *tmp && count > 0; tmp++, count--) - ; - return(tmp - dst); /* DAVIDM should we count a NUL ? check getname */ -} - -/* - * Return the size of a string (including the ending 0) - * - * Return 0 on exception, a value greater than N if too long - */ -static inline long strnlen_user(const char *src, long n) -{ - return(strlen(src) + 1); /* DAVIDM make safer */ -} - -#define strlen_user(str) strnlen_user(str, 32767) - -/* - * Zero Userspace - */ - -static inline unsigned long -clear_user(void *to, unsigned long n) -{ - memset(to, 0, n); - return 0; -} - -#endif /* _H8300_UACCESS_H */ diff --git a/include/asm-h8300/ucontext.h b/include/asm-h8300/ucontext.h deleted file mode 100644 index 0bcf8f85fab..00000000000 --- a/include/asm-h8300/ucontext.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _H8300_UCONTEXT_H -#define _H8300_UCONTEXT_H - -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - stack_t uc_stack; - struct sigcontext uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; - -#endif diff --git a/include/asm-h8300/unaligned.h b/include/asm-h8300/unaligned.h deleted file mode 100644 index b8d06c70c2d..00000000000 --- a/include/asm-h8300/unaligned.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _ASM_H8300_UNALIGNED_H -#define _ASM_H8300_UNALIGNED_H - -#include -#include -#include - -#define get_unaligned __get_unaligned_be -#define put_unaligned __put_unaligned_be - -#endif /* _ASM_H8300_UNALIGNED_H */ diff --git a/include/asm-h8300/unistd.h b/include/asm-h8300/unistd.h deleted file mode 100644 index 99f3c3561ec..00000000000 --- a/include/asm-h8300/unistd.h +++ /dev/null @@ -1,364 +0,0 @@ -#ifndef _ASM_H8300_UNISTD_H_ -#define _ASM_H8300_UNISTD_H_ - -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_lchown 16 -#define __NR_break 17 -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 -#define __NR_stty 31 -#define __NR_gtty 32 -#define __NR_access 33 -#define __NR_nice 34 -#define __NR_ftime 35 -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 -#define __NR_prof 44 -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 -#define __NR_lock 53 -#define __NR_ioctl 54 -#define __NR_fcntl 55 -#define __NR_mpx 56 -#define __NR_setpgid 57 -#define __NR_ulimit 58 -#define __NR_oldolduname 59 -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 -#define __NR_select 82 -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 -#define __NR_profil 98 -#define __NR_statfs 99 -#define __NR_fstatfs 100 -#define __NR_ioperm 101 -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -#define __NR_olduname 109 -#define __NR_iopl 110 -#define __NR_vhangup 111 -#define __NR_idle 112 -#define __NR_vm86old 113 -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_modify_ldt 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 -#define __NR_create_module 127 -#define __NR_init_module 128 -#define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 -#define __NR_vm86 166 -#define __NR_query_module 167 -#define __NR_poll 168 -#define __NR_nfsservctl 169 -#define __NR_setresgid 170 -#define __NR_getresgid 171 -#define __NR_prctl 172 -#define __NR_rt_sigreturn 173 -#define __NR_rt_sigaction 174 -#define __NR_rt_sigprocmask 175 -#define __NR_rt_sigpending 176 -#define __NR_rt_sigtimedwait 177 -#define __NR_rt_sigqueueinfo 178 -#define __NR_rt_sigsuspend 179 -#define __NR_pread64 180 -#define __NR_pwrite64 181 -#define __NR_chown 182 -#define __NR_getcwd 183 -#define __NR_capget 184 -#define __NR_capset 185 -#define __NR_sigaltstack 186 -#define __NR_sendfile 187 -#define __NR_getpmsg 188 /* some people actually want streams */ -#define __NR_putpmsg 189 /* some people actually want streams */ -#define __NR_vfork 190 -#define __NR_ugetrlimit 191 -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#define __NR_lchown32 198 -#define __NR_getuid32 199 -#define __NR_getgid32 200 -#define __NR_geteuid32 201 -#define __NR_getegid32 202 -#define __NR_setreuid32 203 -#define __NR_setregid32 204 -#define __NR_getgroups32 205 -#define __NR_setgroups32 206 -#define __NR_fchown32 207 -#define __NR_setresuid32 208 -#define __NR_getresuid32 209 -#define __NR_setresgid32 210 -#define __NR_getresgid32 211 -#define __NR_chown32 212 -#define __NR_setuid32 213 -#define __NR_setgid32 214 -#define __NR_setfsuid32 215 -#define __NR_setfsgid32 216 -#define __NR_pivot_root 217 -#define __NR_mincore 218 -#define __NR_madvise 219 -#define __NR_madvise1 219 -#define __NR_getdents64 220 -#define __NR_fcntl64 221 -/* 223 is unused */ -#define __NR_gettid 224 -#define __NR_readahead 225 -#define __NR_setxattr 226 -#define __NR_lsetxattr 227 -#define __NR_fsetxattr 228 -#define __NR_getxattr 229 -#define __NR_lgetxattr 230 -#define __NR_fgetxattr 231 -#define __NR_listxattr 232 -#define __NR_llistxattr 233 -#define __NR_flistxattr 234 -#define __NR_removexattr 235 -#define __NR_lremovexattr 236 -#define __NR_fremovexattr 237 -#define __NR_tkill 238 -#define __NR_sendfile64 239 -#define __NR_futex 240 -#define __NR_sched_setaffinity 241 -#define __NR_sched_getaffinity 242 -#define __NR_set_thread_area 243 -#define __NR_get_thread_area 244 -#define __NR_io_setup 245 -#define __NR_io_destroy 246 -#define __NR_io_getevents 247 -#define __NR_io_submit 248 -#define __NR_io_cancel 249 -#define __NR_fadvise64 250 -/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ -#define __NR_exit_group 252 -#define __NR_lookup_dcookie 253 -#define __NR_epoll_create 254 -#define __NR_epoll_ctl 255 -#define __NR_epoll_wait 256 -#define __NR_remap_file_pages 257 -#define __NR_set_tid_address 258 -#define __NR_timer_create 259 -#define __NR_timer_settime (__NR_timer_create+1) -#define __NR_timer_gettime (__NR_timer_create+2) -#define __NR_timer_getoverrun (__NR_timer_create+3) -#define __NR_timer_delete (__NR_timer_create+4) -#define __NR_clock_settime (__NR_timer_create+5) -#define __NR_clock_gettime (__NR_timer_create+6) -#define __NR_clock_getres (__NR_timer_create+7) -#define __NR_clock_nanosleep (__NR_timer_create+8) -#define __NR_statfs64 268 -#define __NR_fstatfs64 269 -#define __NR_tgkill 270 -#define __NR_utimes 271 -#define __NR_fadvise64_64 272 -#define __NR_vserver 273 -#define __NR_mbind 274 -#define __NR_get_mempolicy 275 -#define __NR_set_mempolicy 276 -#define __NR_mq_open 277 -#define __NR_mq_unlink (__NR_mq_open+1) -#define __NR_mq_timedsend (__NR_mq_open+2) -#define __NR_mq_timedreceive (__NR_mq_open+3) -#define __NR_mq_notify (__NR_mq_open+4) -#define __NR_mq_getsetattr (__NR_mq_open+5) -#define __NR_kexec_load 283 -#define __NR_waitid 284 -/* #define __NR_sys_setaltroot 285 */ -#define __NR_add_key 286 -#define __NR_request_key 287 -#define __NR_keyctl 288 -#define __NR_ioprio_set 289 -#define __NR_ioprio_get 290 -#define __NR_inotify_init 291 -#define __NR_inotify_add_watch 292 -#define __NR_inotify_rm_watch 293 -#define __NR_migrate_pages 294 -#define __NR_openat 295 -#define __NR_mkdirat 296 -#define __NR_mknodat 297 -#define __NR_fchownat 298 -#define __NR_futimesat 299 -#define __NR_fstatat64 300 -#define __NR_unlinkat 301 -#define __NR_renameat 302 -#define __NR_linkat 303 -#define __NR_symlinkat 304 -#define __NR_readlinkat 305 -#define __NR_fchmodat 306 -#define __NR_faccessat 307 -#define __NR_pselect6 308 -#define __NR_ppoll 309 -#define __NR_unshare 310 -#define __NR_set_robust_list 311 -#define __NR_get_robust_list 312 -#define __NR_splice 313 -#define __NR_sync_file_range 314 -#define __NR_tee 315 -#define __NR_vmsplice 316 -#define __NR_move_pages 317 -#define __NR_getcpu 318 -#define __NR_epoll_pwait 319 - -#ifdef __KERNEL__ - -#define NR_syscalls 320 - -#define __ARCH_WANT_IPC_PARSE_VERSION -#define __ARCH_WANT_OLD_READDIR -#define __ARCH_WANT_OLD_STAT -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SYS_ALARM -#define __ARCH_WANT_SYS_GETHOSTNAME -#define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK -#define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME -#define __ARCH_WANT_SYS_WAITPID -#define __ARCH_WANT_SYS_SOCKETCALL -#define __ARCH_WANT_SYS_FADVISE64 -#define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK -#define __ARCH_WANT_SYS_NICE -#define __ARCH_WANT_SYS_OLD_GETRLIMIT -#define __ARCH_WANT_SYS_OLDUMOUNT -#define __ARCH_WANT_SYS_SIGPENDING -#define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION - -/* - * "Conditional" syscalls - */ -#define cond_syscall(name) \ - asm (".weak\t_" #name "\n" \ - ".set\t_" #name ",_sys_ni_syscall"); - -#endif /* __KERNEL__ */ -#endif /* _ASM_H8300_UNISTD_H_ */ diff --git a/include/asm-h8300/user.h b/include/asm-h8300/user.h deleted file mode 100644 index 14a9e18950f..00000000000 --- a/include/asm-h8300/user.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef _H8300_USER_H -#define _H8300_USER_H - -#include - -/* Core file format: The core file is written in such a way that gdb - can understand it and provide useful information to the user (under - linux we use the 'trad-core' bfd). There are quite a number of - obstacles to being able to view the contents of the floating point - registers, and until these are solved you will not be able to view the - contents of them. Actually, you can read in the core file and look at - the contents of the user struct to find out what the floating point - registers contain. - The actual file contents are as follows: - UPAGE: 1 page consisting of a user struct that tells gdb what is present - in the file. Directly after this is a copy of the task_struct, which - is currently not used by gdb, but it may come in useful at some point. - All of the registers are stored as part of the upage. The upage should - always be only one page. - DATA: The data area is stored. We use current->end_text to - current->brk to pick up all of the user variables, plus any memory - that may have been malloced. No attempt is made to determine if a page - is demand-zero or if a page is totally unused, we just cover the entire - range. All of the addresses are rounded in such a way that an integral - number of pages is written. - STACK: We need the stack information in order to get a meaningful - backtrace. We need to write the data from (esp) to - current->start_stack, so we round each of these off in order to be able - to write an integer number of pages. - The minimum core file size is 3 pages, or 12288 bytes. -*/ - -/* This is the old layout of "struct pt_regs" as of Linux 1.x, and - is still the layout used by user (the new pt_regs doesn't have - all registers). */ -struct user_regs_struct { - long er1,er2,er3,er4,er5,er6; - long er0; - long usp; - long orig_er0; - short ccr; - long pc; -}; - - -/* When the kernel dumps core, it starts by dumping the user struct - - this will be used by gdb to figure out where the data and stack segments - are within the file, and what virtual addresses to use. */ -struct user{ -/* We start with the registers, to mimic the way that "memory" is returned - from the ptrace(3,...) function. */ - struct user_regs_struct regs; /* Where the registers are actually stored */ -/* ptrace does not yet supply these. Someday.... */ -/* The rest of this junk is to help gdb figure out what goes where */ - unsigned long int u_tsize; /* Text segment size (pages). */ - unsigned long int u_dsize; /* Data segment size (pages). */ - unsigned long int u_ssize; /* Stack segment size (pages). */ - unsigned long start_code; /* Starting virtual address of text. */ - unsigned long start_stack; /* Starting virtual address of stack area. - This is actually the bottom of the stack, - the top of the stack is always found in the - esp register. */ - long int signal; /* Signal that caused the core dump. */ - int reserved; /* No longer used */ - unsigned long u_ar0; /* Used by gdb to help find the values for */ - /* the registers. */ - unsigned long magic; /* To uniquely identify a core file */ - char u_comm[32]; /* User command that was responsible */ -}; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - -#endif diff --git a/include/asm-h8300/virtconvert.h b/include/asm-h8300/virtconvert.h deleted file mode 100644 index 19cfd62b11c..00000000000 --- a/include/asm-h8300/virtconvert.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __H8300_VIRT_CONVERT__ -#define __H8300_VIRT_CONVERT__ - -/* - * Macros used for converting between virtual and physical mappings. - */ - -#ifdef __KERNEL__ - -#include -#include - -#define phys_to_virt(vaddr) ((void *) (vaddr)) -#define virt_to_phys(vaddr) ((unsigned long) (vaddr)) - -#define virt_to_bus virt_to_phys -#define bus_to_virt phys_to_virt - -#endif -#endif -- cgit v1.2.3-70-g09d2 From 9e2b2dc4133f65272a6d3c5dcb2ce63f8a87cae9 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 13 Aug 2008 16:20:04 +0100 Subject: CRED: Introduce credential access wrappers The patches that are intended to introduce copy-on-write credentials for 2.6.28 require abstraction of access to some fields of the task structure, particularly for the case of one task accessing another's credentials where RCU will have to be observed. Introduced here are trivial no-op versions of the desired accessors for current and other tasks so that other subsystems can start to be converted over more easily. Wrappers are introduced into a new header (linux/cred.h) for UID/GID, EUID/EGID, SUID/SGID, FSUID/FSGID, cap_effective and current's subscribed user_struct. These wrappers are macros because the ordering between header files mitigates against making them inline functions. linux/cred.h is #included from linux/sched.h. Further, XFS is modified such that it no longer defines and uses parameterised versions of current_fs[ug]id(), thus getting rid of the namespace collision otherwise incurred. Signed-off-by: David Howells Signed-off-by: James Morris --- fs/xfs/linux-2.6/xfs_linux.h | 2 -- fs/xfs/xfs_inode.c | 4 ++-- fs/xfs/xfs_vnodeops.c | 8 +++---- include/linux/cred.h | 50 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/sched.h | 1 + 5 files changed, 57 insertions(+), 8 deletions(-) create mode 100644 include/linux/cred.h (limited to 'include') diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 3b7c4ff48ba..cc0f7b3a979 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -126,8 +126,6 @@ #define current_cpu() (raw_smp_processor_id()) #define current_pid() (current->pid) -#define current_fsuid(cred) (current->fsuid) -#define current_fsgid(cred) (current->fsgid) #define current_test_flags(f) (current->flags & (f)) #define current_set_flags_nested(sp, f) \ (*(sp) = current->flags, current->flags |= (f)) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 358511b85ce..00e80df9dd9 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1081,8 +1081,8 @@ xfs_ialloc( ip->i_d.di_onlink = 0; ip->i_d.di_nlink = nlink; ASSERT(ip->i_d.di_nlink == nlink); - ip->i_d.di_uid = current_fsuid(cr); - ip->i_d.di_gid = current_fsgid(cr); + ip->i_d.di_uid = current_fsuid(); + ip->i_d.di_gid = current_fsgid(); ip->i_d.di_projid = prid; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 588bb4aa215..aa238c8fbd7 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -182,7 +182,7 @@ xfs_setattr( xfs_ilock(ip, lock_flags); /* boolean: are we the file owner? */ - file_owner = (current_fsuid(credp) == ip->i_d.di_uid); + file_owner = (current_fsuid() == ip->i_d.di_uid); /* * Change various properties of a file. @@ -1533,7 +1533,7 @@ xfs_create( * Make sure that we have allocated dquot(s) on disk. */ error = XFS_QM_DQVOPALLOC(mp, dp, - current_fsuid(credp), current_fsgid(credp), prid, + current_fsuid(), current_fsgid(), prid, XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp); if (error) goto std_return; @@ -2269,7 +2269,7 @@ xfs_mkdir( * Make sure that we have allocated dquot(s) on disk. */ error = XFS_QM_DQVOPALLOC(mp, dp, - current_fsuid(credp), current_fsgid(credp), prid, + current_fsuid(), current_fsgid(), prid, XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); if (error) goto std_return; @@ -2495,7 +2495,7 @@ xfs_symlink( * Make sure that we have allocated dquot(s) on disk. */ error = XFS_QM_DQVOPALLOC(mp, dp, - current_fsuid(credp), current_fsgid(credp), prid, + current_fsuid(), current_fsgid(), prid, XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); if (error) goto std_return; diff --git a/include/linux/cred.h b/include/linux/cred.h new file mode 100644 index 00000000000..b69222cc1fd --- /dev/null +++ b/include/linux/cred.h @@ -0,0 +1,50 @@ +/* Credentials management + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_CRED_H +#define _LINUX_CRED_H + +#define get_current_user() (get_uid(current->user)) + +#define task_uid(task) ((task)->uid) +#define task_gid(task) ((task)->gid) +#define task_euid(task) ((task)->euid) +#define task_egid(task) ((task)->egid) + +#define current_uid() (current->uid) +#define current_gid() (current->gid) +#define current_euid() (current->euid) +#define current_egid() (current->egid) +#define current_suid() (current->suid) +#define current_sgid() (current->sgid) +#define current_fsuid() (current->fsuid) +#define current_fsgid() (current->fsgid) +#define current_cap() (current->cap_effective) + +#define current_uid_gid(_uid, _gid) \ +do { \ + *(_uid) = current->uid; \ + *(_gid) = current->gid; \ +} while(0) + +#define current_euid_egid(_uid, _gid) \ +do { \ + *(_uid) = current->euid; \ + *(_gid) = current->egid; \ +} while(0) + +#define current_fsuid_fsgid(_uid, _gid) \ +do { \ + *(_uid) = current->fsuid; \ + *(_gid) = current->fsgid; \ +} while(0) + +#endif /* _LINUX_CRED_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 5850bfb968a..cfb0d87b99f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -87,6 +87,7 @@ struct sched_param { #include #include #include +#include #include -- cgit v1.2.3-70-g09d2 From f4f4d58734916e816d4b4a7cf61b3fc22ce02683 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 28 Jul 2008 10:39:28 -0400 Subject: USB: add missing kerneldoc line for "needs_binding" This patch (as1117) adds a kerneldoc line for the "needs_binding" field in struct usb_interface. It was accidentally omitted when the field was added. Signed-off-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- include/linux/usb.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/usb.h b/include/linux/usb.h index 5811c5da69f..0924cd9c30f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -110,6 +110,8 @@ enum usb_interface_condition { * @sysfs_files_created: sysfs attributes exist * @needs_remote_wakeup: flag set when the driver requires remote-wakeup * capability during autosuspend. + * @needs_binding: flag set when the driver should be re-probed or unbound + * following a reset or suspend operation it doesn't support. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. -- cgit v1.2.3-70-g09d2 From 0282b7f2a874e72c18fcd5a112ccf67f71ba7f5c Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 29 Jul 2008 12:01:04 -0400 Subject: usb-serial: don't release unregistered minors This patch (as1121) fixes a bug in the USB serial core. When a device is unregistered, the core will give back its minors -- even if the device hasn't been assigned any! The patch reserves the highest minor value (255) to mean that no minor was assigned. It also removes some dead code and does a small style fixup. Signed-off-by: Alan Stern Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/usb-serial.c | 7 +++---- include/linux/usb/serial.h | 3 ++- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 8c2d531eede..b157c48e8b7 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -122,9 +122,6 @@ static void return_serial(struct usb_serial *serial) dbg("%s", __func__); - if (serial == NULL) - return; - for (i = 0; i < serial->num_ports; ++i) serial_table[serial->minor + i] = NULL; } @@ -142,7 +139,8 @@ static void destroy_serial(struct kref *kref) serial->type->shutdown(serial); /* return the minor range that this device had */ - return_serial(serial); + if (serial->minor != SERIAL_TTY_NO_MINOR) + return_serial(serial); for (i = 0; i < serial->num_ports; ++i) serial->port[i]->port.count = 0; @@ -575,6 +573,7 @@ static struct usb_serial *create_serial(struct usb_device *dev, serial->interface = interface; kref_init(&serial->kref); mutex_init(&serial->disc_mutex); + serial->minor = SERIAL_TTY_NO_MINOR; return serial; } diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 09a3e6a7518..655341d0f53 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -17,7 +17,8 @@ #include #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ -#define SERIAL_TTY_MINORS 255 /* loads of devices :) */ +#define SERIAL_TTY_MINORS 254 /* loads of devices :) */ +#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */ /* The maximum number of ports one device can grab at once */ #define MAX_NUM_PORTS 8 -- cgit v1.2.3-70-g09d2 From 550a7375fe720924241f0eb76e4a5c1a3eb8c32f Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Thu, 24 Jul 2008 12:27:36 +0300 Subject: USB: Add MUSB and TUSB support This patch adds support for MUSB and TUSB controllers integrated into omap2430 and davinci. It also adds support for external tusb6010 controller. Cc: David Brownell Cc: Tony Lindgren Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 6 + drivers/Makefile | 1 + drivers/usb/Kconfig | 2 + drivers/usb/gadget/Kconfig | 10 + drivers/usb/musb/Kconfig | 176 +++ drivers/usb/musb/Makefile | 86 ++ drivers/usb/musb/cppi_dma.c | 1540 ++++++++++++++++++++++++ drivers/usb/musb/cppi_dma.h | 133 +++ drivers/usb/musb/davinci.c | 462 ++++++++ drivers/usb/musb/davinci.h | 100 ++ drivers/usb/musb/musb_core.c | 2266 ++++++++++++++++++++++++++++++++++++ drivers/usb/musb/musb_core.h | 517 ++++++++ drivers/usb/musb/musb_debug.h | 66 ++ drivers/usb/musb/musb_dma.h | 172 +++ drivers/usb/musb/musb_gadget.c | 2033 ++++++++++++++++++++++++++++++++ drivers/usb/musb/musb_gadget.h | 108 ++ drivers/usb/musb/musb_gadget_ep0.c | 981 ++++++++++++++++ drivers/usb/musb/musb_host.c | 2170 ++++++++++++++++++++++++++++++++++ drivers/usb/musb/musb_host.h | 110 ++ drivers/usb/musb/musb_io.h | 115 ++ drivers/usb/musb/musb_procfs.c | 830 +++++++++++++ drivers/usb/musb/musb_regs.h | 300 +++++ drivers/usb/musb/musb_virthub.c | 425 +++++++ drivers/usb/musb/musbhsdma.c | 433 +++++++ drivers/usb/musb/omap2430.c | 324 ++++++ drivers/usb/musb/omap2430.h | 56 + drivers/usb/musb/tusb6010.c | 1151 ++++++++++++++++++ drivers/usb/musb/tusb6010.h | 402 +++++++ drivers/usb/musb/tusb6010_omap.c | 719 ++++++++++++ include/linux/usb/musb.h | 70 ++ 30 files changed, 15764 insertions(+) create mode 100644 drivers/usb/musb/Kconfig create mode 100644 drivers/usb/musb/Makefile create mode 100644 drivers/usb/musb/cppi_dma.c create mode 100644 drivers/usb/musb/cppi_dma.h create mode 100644 drivers/usb/musb/davinci.c create mode 100644 drivers/usb/musb/davinci.h create mode 100644 drivers/usb/musb/musb_core.c create mode 100644 drivers/usb/musb/musb_core.h create mode 100644 drivers/usb/musb/musb_debug.h create mode 100644 drivers/usb/musb/musb_dma.h create mode 100644 drivers/usb/musb/musb_gadget.c create mode 100644 drivers/usb/musb/musb_gadget.h create mode 100644 drivers/usb/musb/musb_gadget_ep0.c create mode 100644 drivers/usb/musb/musb_host.c create mode 100644 drivers/usb/musb/musb_host.h create mode 100644 drivers/usb/musb/musb_io.h create mode 100644 drivers/usb/musb/musb_procfs.c create mode 100644 drivers/usb/musb/musb_regs.h create mode 100644 drivers/usb/musb/musb_virthub.c create mode 100644 drivers/usb/musb/musbhsdma.c create mode 100644 drivers/usb/musb/omap2430.c create mode 100644 drivers/usb/musb/omap2430.h create mode 100644 drivers/usb/musb/tusb6010.c create mode 100644 drivers/usb/musb/tusb6010.h create mode 100644 drivers/usb/musb/tusb6010_omap.c create mode 100644 include/linux/usb/musb.h (limited to 'include') diff --git a/MAINTAINERS b/MAINTAINERS index 0c42dc25e0e..773d6bc3a9a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2928,6 +2928,12 @@ M: jirislaby@gmail.com L: linux-kernel@vger.kernel.org S: Maintained +MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER +P: Felipe Balbi +M: felipe.balbi@nokia.com +L: linux-usb@vger.kernel.org +S: Maintained + MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) P: Andrew Gallatin M: gallatin@myri.com diff --git a/drivers/Makefile b/drivers/Makefile index a280ab3d083..2735bde7347 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/ obj-$(CONFIG_PARIDE) += block/paride/ obj-$(CONFIG_TC) += tc/ obj-$(CONFIG_USB) += usb/ +obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/ obj-$(CONFIG_PCI) += usb/ obj-$(CONFIG_USB_GADGET) += usb/gadget/ obj-$(CONFIG_SERIO) += input/serio/ diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 4f9b5ecfb72..bcefbddeba5 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -99,6 +99,8 @@ source "drivers/usb/mon/Kconfig" source "drivers/usb/host/Kconfig" +source "drivers/usb/musb/Kconfig" + source "drivers/usb/class/Kconfig" source "drivers/usb/storage/Kconfig" diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index c6a8c6b1116..acc95b2ac6f 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -284,6 +284,16 @@ config USB_LH7A40X default USB_GADGET select USB_GADGET_SELECTED +# built in ../musb along with host support +config USB_GADGET_MUSB_HDRC + boolean "Inventra HDRC USB Peripheral (TI, ...)" + depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) + select USB_GADGET_DUALSPEED + select USB_GADGET_SELECTED + help + This OTG-capable silicon IP is used in dual designs including + the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010. + config USB_GADGET_OMAP boolean "OMAP USB Device Controller" depends on ARCH_OMAP diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig new file mode 100644 index 00000000000..faca4333f27 --- /dev/null +++ b/drivers/usb/musb/Kconfig @@ -0,0 +1,176 @@ +# +# USB Dual Role (OTG-ready) Controller Drivers +# for silicon based on Mentor Graphics INVENTRA designs +# + +comment "Enable Host or Gadget support to see Inventra options" + depends on !USB && USB_GADGET=n + +# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller +config USB_MUSB_HDRC + depends on (USB || USB_GADGET) && HAVE_CLK + select TWL4030_USB if MACH_OMAP_3430SDP + tristate 'Inventra Highspeed Dual Role Controller (TI, ...)' + help + Say Y here if your system has a dual role high speed USB + controller based on the Mentor Graphics silicon IP. Then + configure options to match your silicon and the board + it's being used with, including the USB peripheral role, + or the USB host role, or both. + + Texas Instruments parts using this IP include DaVinci 644x, + OMAP 243x, OMAP 343x, and TUSB 6010. + + If you do not know what this is, please say N. + + To compile this driver as a module, choose M here; the + module will be called "musb_hdrc". + +config USB_MUSB_SOC + boolean + depends on USB_MUSB_HDRC + default y if ARCH_DAVINCI + default y if ARCH_OMAP2430 + default y if ARCH_OMAP34XX + help + Use a static file to describe how the + controller is configured (endpoints, mechanisms, etc) on the + current iteration of a given system-on-chip. + +comment "DaVinci 644x USB support" + depends on USB_MUSB_HDRC && ARCH_DAVINCI + +comment "OMAP 243x high speed USB support" + depends on USB_MUSB_HDRC && ARCH_OMAP2430 + +comment "OMAP 343x high speed USB support" + depends on USB_MUSB_HDRC && ARCH_OMAP34XX + +config USB_TUSB6010 + boolean "TUSB 6010 support" + depends on USB_MUSB_HDRC && !USB_MUSB_SOC + default y + help + The TUSB 6010 chip, from Texas Instruments, connects a discrete + HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ + (a high speed serial link). It can use system-specific external + DMA controllers. + +choice + prompt "Driver Mode" + depends on USB_MUSB_HDRC + help + Dual-Role devices can support both host and peripheral roles, + as well as a the special "OTG Device" role which can switch + between both roles as needed. + +# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support; +# OTG needs both roles, not just USB_MUSB_HOST. +config USB_MUSB_HOST + depends on USB + bool "USB Host" + help + Say Y here if your system supports the USB host role. + If it has a USB "A" (rectangular), "Mini-A" (uncommon), + or "Mini-AB" connector, it supports the host role. + (With a "Mini-AB" connector, you should enable USB OTG.) + +# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral +# side support ... OTG needs both roles +config USB_MUSB_PERIPHERAL + depends on USB_GADGET + bool "USB Peripheral (gadget stack)" + select USB_GADGET_MUSB_HDRC + help + Say Y here if your system supports the USB peripheral role. + If it has a USB "B" (squarish), "Mini-B", or "Mini-AB" + connector, it supports the peripheral role. + (With a "Mini-AB" connector, you should enable USB OTG.) + +config USB_MUSB_OTG + depends on USB && USB_GADGET && PM && EXPERIMENTAL + bool "Both host and peripheral: USB OTG (On The Go) Device" + select USB_GADGET_MUSB_HDRC + select USB_OTG + help + The most notable feature of USB OTG is support for a + "Dual-Role" device, which can act as either a device + or a host. The initial role choice can be changed + later, when two dual-role devices talk to each other. + + At this writing, the OTG support in this driver is incomplete, + omitting the mandatory HNP or SRP protocols. However, some + of the cable based role switching works. (That is, grounding + the ID pin switches the controller to host mode, while leaving + it floating leaves it in peripheral mode.) + + Select this if your system has a Mini-AB connector, or + to simplify certain kinds of configuration. + + To implement your OTG Targeted Peripherals List (TPL), enable + USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h" + to match your requirements. + +endchoice + +# enable peripheral support (including with OTG) +config USB_GADGET_MUSB_HDRC + bool + depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) +# default y +# select USB_GADGET_DUALSPEED +# select USB_GADGET_SELECTED + +# enables host support (including with OTG) +config USB_MUSB_HDRC_HCD + bool + depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG) + select USB_OTG if USB_GADGET_MUSB_HDRC + default y + + +config MUSB_PIO_ONLY + bool 'Disable DMA (always use PIO)' + depends on USB_MUSB_HDRC + default y if USB_TUSB6010 + help + All data is copied between memory and FIFO by the CPU. + DMA controllers are ignored. + + Do not select 'n' here unless DMA support for your SOC or board + is unavailable (or unstable). When DMA is enabled at compile time, + you can still disable it at run time using the "use_dma=n" module + parameter. + +config USB_INVENTRA_DMA + bool + depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY + default ARCH_OMAP2430 || ARCH_OMAP34XX + help + Enable DMA transfers using Mentor's engine. + +config USB_TI_CPPI_DMA + bool + depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY + default ARCH_DAVINCI + help + Enable DMA transfers when TI CPPI DMA is available. + +config USB_TUSB_OMAP_DMA + bool + depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY + depends on USB_TUSB6010 + depends on ARCH_OMAP + default y + help + Enable DMA transfers on TUSB 6010 when OMAP DMA is available. + +config USB_MUSB_LOGLEVEL + depends on USB_MUSB_HDRC + int 'Logging Level (0 - none / 3 - annoying / ... )' + default 0 + help + Set the logging level. 0 disables the debugging altogether, + although when USB_DEBUG is set the value is at least 1. + Starting at level 3, per-transfer (urb, usb_request, packet, + or dma transfer) tracing may kick in. diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile new file mode 100644 index 00000000000..88eb67de08a --- /dev/null +++ b/drivers/usb/musb/Makefile @@ -0,0 +1,86 @@ +# +# for USB OTG silicon based on Mentor Graphics INVENTRA designs +# + +musb_hdrc-objs := musb_core.o + +obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o + +ifeq ($(CONFIG_ARCH_DAVINCI),y) + musb_hdrc-objs += davinci.o +endif + +ifeq ($(CONFIG_USB_TUSB6010),y) + musb_hdrc-objs += tusb6010.o +endif + +ifeq ($(CONFIG_ARCH_OMAP2430),y) + musb_hdrc-objs += omap2430.o +endif + +ifeq ($(CONFIG_ARCH_OMAP3430),y) + musb_hdrc-objs += omap2430.o +endif + +ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y) + musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o +endif + +ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y) + musb_hdrc-objs += musb_virthub.o musb_host.o +endif + +# the kconfig must guarantee that only one of the +# possible I/O schemes will be enabled at a time ... +# PIO only, or DMA (several potential schemes). +# though PIO is always there to back up DMA, and for ep0 + +ifneq ($(CONFIG_MUSB_PIO_ONLY),y) + + ifeq ($(CONFIG_USB_INVENTRA_DMA),y) + musb_hdrc-objs += musbhsdma.o + + else + ifeq ($(CONFIG_USB_TI_CPPI_DMA),y) + musb_hdrc-objs += cppi_dma.o + + else + ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y) + musb_hdrc-objs += tusb6010_omap.o + + endif + endif + endif +endif + + +################################################################################ + +# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_* + +ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y) + EXTRA_CFLAGS += -DMUSB_AHB_ID +endif + +# Debugging + +MUSB_DEBUG:=$(CONFIG_USB_MUSB_LOGLEVEL) + +ifeq ("$(strip $(MUSB_DEBUG))","") + ifdef CONFIG_USB_DEBUG + MUSB_DEBUG:=1 + else + MUSB_DEBUG:=0 + endif +endif + +ifneq ($(MUSB_DEBUG),0) + EXTRA_CFLAGS += -DDEBUG + + ifeq ($(CONFIG_PROC_FS),y) + musb_hdrc-objs += musb_procfs.o + endif + +endif + +EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG) diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c new file mode 100644 index 00000000000..5ad6d0893cb --- /dev/null +++ b/drivers/usb/musb/cppi_dma.c @@ -0,0 +1,1540 @@ +/* + * Copyright (C) 2005-2006 by Texas Instruments + * + * This file implements a DMA interface using TI's CPPI DMA. + * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. + * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. + */ + +#include + +#include "musb_core.h" +#include "cppi_dma.h" + + +/* CPPI DMA status 7-mar-2006: + * + * - See musb_{host,gadget}.c for more info + * + * - Correct RX DMA generally forces the engine into irq-per-packet mode, + * which can easily saturate the CPU under non-mass-storage loads. + * + * NOTES 24-aug-2006 (2.6.18-rc4): + * + * - peripheral RXDMA wedged in a test with packets of length 512/512/1. + * evidently after the 1 byte packet was received and acked, the queue + * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, + * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 + * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx + * of its next (512 byte) packet. IRQ issues? + * + * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will + * evidently also directly update the RX and TX CSRs ... so audit all + * host and peripheral side DMA code to avoid CSR access after DMA has + * been started. + */ + +/* REVISIT now we can avoid preallocating these descriptors; or + * more simply, switch to a global freelist not per-channel ones. + * Note: at full speed, 64 descriptors == 4K bulk data. + */ +#define NUM_TXCHAN_BD 64 +#define NUM_RXCHAN_BD 64 + +static inline void cpu_drain_writebuffer(void) +{ + wmb(); +#ifdef CONFIG_CPU_ARM926T + /* REVISIT this "should not be needed", + * but lack of it sure seemed to hurt ... + */ + asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); +#endif +} + +static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) +{ + struct cppi_descriptor *bd = c->freelist; + + if (bd) + c->freelist = bd->next; + return bd; +} + +static inline void +cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) +{ + if (!bd) + return; + bd->next = c->freelist; + c->freelist = bd; +} + +/* + * Start DMA controller + * + * Initialize the DMA controller as necessary. + */ + +/* zero out entire rx state RAM entry for the channel */ +static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) +{ + musb_writel(&rx->rx_skipbytes, 0, 0); + musb_writel(&rx->rx_head, 0, 0); + musb_writel(&rx->rx_sop, 0, 0); + musb_writel(&rx->rx_current, 0, 0); + musb_writel(&rx->rx_buf_current, 0, 0); + musb_writel(&rx->rx_len_len, 0, 0); + musb_writel(&rx->rx_cnt_cnt, 0, 0); +} + +/* zero out entire tx state RAM entry for the channel */ +static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) +{ + musb_writel(&tx->tx_head, 0, 0); + musb_writel(&tx->tx_buf, 0, 0); + musb_writel(&tx->tx_current, 0, 0); + musb_writel(&tx->tx_buf_current, 0, 0); + musb_writel(&tx->tx_info, 0, 0); + musb_writel(&tx->tx_rem_len, 0, 0); + /* musb_writel(&tx->tx_dummy, 0, 0); */ + musb_writel(&tx->tx_complete, 0, ptr); +} + +static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) +{ + int j; + + /* initialize channel fields */ + c->head = NULL; + c->tail = NULL; + c->last_processed = NULL; + c->channel.status = MUSB_DMA_STATUS_UNKNOWN; + c->controller = cppi; + c->is_rndis = 0; + c->freelist = NULL; + + /* build the BD Free list for the channel */ + for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { + struct cppi_descriptor *bd; + dma_addr_t dma; + + bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); + bd->dma = dma; + cppi_bd_free(c, bd); + } +} + +static int cppi_channel_abort(struct dma_channel *); + +static void cppi_pool_free(struct cppi_channel *c) +{ + struct cppi *cppi = c->controller; + struct cppi_descriptor *bd; + + (void) cppi_channel_abort(&c->channel); + c->channel.status = MUSB_DMA_STATUS_UNKNOWN; + c->controller = NULL; + + /* free all its bds */ + bd = c->last_processed; + do { + if (bd) + dma_pool_free(cppi->pool, bd, bd->dma); + bd = cppi_bd_alloc(c); + } while (bd); + c->last_processed = NULL; +} + +static int __init cppi_controller_start(struct dma_controller *c) +{ + struct cppi *controller; + void __iomem *tibase; + int i; + + controller = container_of(c, struct cppi, controller); + + /* do whatever is necessary to start controller */ + for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { + controller->tx[i].transmit = true; + controller->tx[i].index = i; + } + for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { + controller->rx[i].transmit = false; + controller->rx[i].index = i; + } + + /* setup BD list on a per channel basis */ + for (i = 0; i < ARRAY_SIZE(controller->tx); i++) + cppi_pool_init(controller, controller->tx + i); + for (i = 0; i < ARRAY_SIZE(controller->rx); i++) + cppi_pool_init(controller, controller->rx + i); + + tibase = controller->tibase; + INIT_LIST_HEAD(&controller->tx_complete); + + /* initialise tx/rx channel head pointers to zero */ + for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { + struct cppi_channel *tx_ch = controller->tx + i; + struct cppi_tx_stateram __iomem *tx; + + INIT_LIST_HEAD(&tx_ch->tx_complete); + + tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); + tx_ch->state_ram = tx; + cppi_reset_tx(tx, 0); + } + for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { + struct cppi_channel *rx_ch = controller->rx + i; + struct cppi_rx_stateram __iomem *rx; + + INIT_LIST_HEAD(&rx_ch->tx_complete); + + rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); + rx_ch->state_ram = rx; + cppi_reset_rx(rx); + } + + /* enable individual cppi channels */ + musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, + DAVINCI_DMA_ALL_CHANNELS_ENABLE); + musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, + DAVINCI_DMA_ALL_CHANNELS_ENABLE); + + /* enable tx/rx CPPI control */ + musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); + musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); + + /* disable RNDIS mode, also host rx RNDIS autorequest */ + musb_writel(tibase, DAVINCI_RNDIS_REG, 0); + musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); + + return 0; +} + +/* + * Stop DMA controller + * + * De-Init the DMA controller as necessary. + */ + +static int cppi_controller_stop(struct dma_controller *c) +{ + struct cppi *controller; + void __iomem *tibase; + int i; + + controller = container_of(c, struct cppi, controller); + + tibase = controller->tibase; + /* DISABLE INDIVIDUAL CHANNEL Interrupts */ + musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, + DAVINCI_DMA_ALL_CHANNELS_ENABLE); + musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, + DAVINCI_DMA_ALL_CHANNELS_ENABLE); + + DBG(1, "Tearing down RX and TX Channels\n"); + for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { + /* FIXME restructure of txdma to use bds like rxdma */ + controller->tx[i].last_processed = NULL; + cppi_pool_free(controller->tx + i); + } + for (i = 0; i < ARRAY_SIZE(controller->rx); i++) + cppi_pool_free(controller->rx + i); + + /* in Tx Case proper teardown is supported. We resort to disabling + * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is + * complete TX CPPI cannot be disabled. + */ + /*disable tx/rx cppi */ + musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); + musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); + + return 0; +} + +/* While dma channel is allocated, we only want the core irqs active + * for fault reports, otherwise we'd get irqs that we don't care about. + * Except for TX irqs, where dma done != fifo empty and reusable ... + * + * NOTE: docs don't say either way, but irq masking **enables** irqs. + * + * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... + */ +static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) +{ + musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); +} + +static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) +{ + musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); +} + + +/* + * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to + * each transfer direction of a non-control endpoint, so allocating + * (and deallocating) is mostly a way to notice bad housekeeping on + * the software side. We assume the irqs are always active. + */ +static struct dma_channel * +cppi_channel_allocate(struct dma_controller *c, + struct musb_hw_ep *ep, u8 transmit) +{ + struct cppi *controller; + u8 index; + struct cppi_channel *cppi_ch; + void __iomem *tibase; + + controller = container_of(c, struct cppi, controller); + tibase = controller->tibase; + + /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ + index = ep->epnum - 1; + + /* return the corresponding CPPI Channel Handle, and + * probably disable the non-CPPI irq until we need it. + */ + if (transmit) { + if (index >= ARRAY_SIZE(controller->tx)) { + DBG(1, "no %cX%d CPPI channel\n", 'T', index); + return NULL; + } + cppi_ch = controller->tx + index; + } else { + if (index >= ARRAY_SIZE(controller->rx)) { + DBG(1, "no %cX%d CPPI channel\n", 'R', index); + return NULL; + } + cppi_ch = controller->rx + index; + core_rxirq_disable(tibase, ep->epnum); + } + + /* REVISIT make this an error later once the same driver code works + * with the other DMA engine too + */ + if (cppi_ch->hw_ep) + DBG(1, "re-allocating DMA%d %cX channel %p\n", + index, transmit ? 'T' : 'R', cppi_ch); + cppi_ch->hw_ep = ep; + cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; + + DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); + return &cppi_ch->channel; +} + +/* Release a CPPI Channel. */ +static void cppi_channel_release(struct dma_channel *channel) +{ + struct cppi_channel *c; + void __iomem *tibase; + + /* REVISIT: for paranoia, check state and abort if needed... */ + + c = container_of(channel, struct cppi_channel, channel); + tibase = c->controller->tibase; + if (!c->hw_ep) + DBG(1, "releasing idle DMA channel %p\n", c); + else if (!c->transmit) + core_rxirq_enable(tibase, c->index + 1); + + /* for now, leave its cppi IRQ enabled (we won't trigger it) */ + c->hw_ep = NULL; + channel->status = MUSB_DMA_STATUS_UNKNOWN; +} + +/* Context: controller irqlocked */ +static void +cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) +{ + void __iomem *base = c->controller->mregs; + struct cppi_rx_stateram __iomem *rx = c->state_ram; + + musb_ep_select(base, c->index + 1); + + DBG(level, "RX DMA%d%s: %d left, csr %04x, " + "%08x H%08x S%08x C%08x, " + "B%08x L%08x %08x .. %08x" + "\n", + c->index, tag, + musb_readl(c->controller->tibase, + DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), + musb_readw(c->hw_ep->regs, MUSB_RXCSR), + + musb_readl(&rx->rx_skipbytes, 0), + musb_readl(&rx->rx_head, 0), + musb_readl(&rx->rx_sop, 0), + musb_readl(&rx->rx_current, 0), + + musb_readl(&rx->rx_buf_current, 0), + musb_readl(&rx->rx_len_len, 0), + musb_readl(&rx->rx_cnt_cnt, 0), + musb_readl(&rx->rx_complete, 0) + ); +} + +/* Context: controller irqlocked */ +static void +cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) +{ + void __iomem *base = c->controller->mregs; + struct cppi_tx_stateram __iomem *tx = c->state_ram; + + musb_ep_select(base, c->index + 1); + + DBG(level, "TX DMA%d%s: csr %04x, " + "H%08x S%08x C%08x %08x, " + "F%08x L%08x .. %08x" + "\n", + c->index, tag, + musb_readw(c->hw_ep->regs, MUSB_TXCSR), + + musb_readl(&tx->tx_head, 0), + musb_readl(&tx->tx_buf, 0), + musb_readl(&tx->tx_current, 0), + musb_readl(&tx->tx_buf_current, 0), + + musb_readl(&tx->tx_info, 0), + musb_readl(&tx->tx_rem_len, 0), + /* dummy/unused word 6 */ + musb_readl(&tx->tx_complete, 0) + ); +} + +/* Context: controller irqlocked */ +static inline void +cppi_rndis_update(struct cppi_channel *c, int is_rx, + void __iomem *tibase, int is_rndis) +{ + /* we may need to change the rndis flag for this cppi channel */ + if (c->is_rndis != is_rndis) { + u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); + u32 temp = 1 << (c->index); + + if (is_rx) + temp <<= 16; + if (is_rndis) + value |= temp; + else + value &= ~temp; + musb_writel(tibase, DAVINCI_RNDIS_REG, value); + c->is_rndis = is_rndis; + } +} + +static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) +{ + pr_debug("RXBD/%s %08x: " + "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", + tag, bd->dma, + bd->hw_next, bd->hw_bufp, bd->hw_off_len, + bd->hw_options); +} + +static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) +{ +#if MUSB_DEBUG > 0 + struct cppi_descriptor *bd; + + if (!_dbg_level(level)) + return; + cppi_dump_rx(level, rx, tag); + if (rx->last_processed) + cppi_dump_rxbd("last", rx->last_processed); + for (bd = rx->head; bd; bd = bd->next) + cppi_dump_rxbd("active", bd); +#endif +} + + +/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; + * so we won't ever use it (see "CPPI RX Woes" below). + */ +static inline int cppi_autoreq_update(struct cppi_channel *rx, + void __iomem *tibase, int onepacket, unsigned n_bds) +{ + u32 val; + +#ifdef RNDIS_RX_IS_USABLE + u32 tmp; + /* assert(is_host_active(musb)) */ + + /* start from "AutoReq never" */ + tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); + val = tmp & ~((0x3) << (rx->index * 2)); + + /* HCD arranged reqpkt for packet #1. we arrange int + * for all but the last one, maybe in two segments. + */ + if (!onepacket) { +#if 0 + /* use two segments, autoreq "all" then the last "never" */ + val |= ((0x3) << (rx->index * 2)); + n_bds--; +#else + /* one segment, autoreq "all-but-last" */ + val |= ((0x1) << (rx->index * 2)); +#endif + } + + if (val != tmp) { + int n = 100; + + /* make sure that autoreq is updated before continuing */ + musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); + do { + tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); + if (tmp == val) + break; + cpu_relax(); + } while (n-- > 0); + } +#endif + + /* REQPKT is turned off after each segment */ + if (n_bds && rx->channel.actual_len) { + void __iomem *regs = rx->hw_ep->regs; + + val = musb_readw(regs, MUSB_RXCSR); + if (!(val & MUSB_RXCSR_H_REQPKT)) { + val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; + musb_writew(regs, MUSB_RXCSR, val); + /* flush writebufer */ + val = musb_readw(regs, MUSB_RXCSR); + } + } + return n_bds; +} + + +/* Buffer enqueuing Logic: + * + * - RX builds new queues each time, to help handle routine "early + * termination" cases (faults, including errors and short reads) + * more correctly. + * + * - for now, TX reuses the same queue of BDs every time + * + * REVISIT long term, we want a normal dynamic model. + * ... the goal will be to append to the + * existing queue, processing completed "dma buffers" (segments) on the fly. + * + * Otherwise we force an IRQ latency between requests, which slows us a lot + * (especially in "transparent" dma). Unfortunately that model seems to be + * inherent in the DMA model from the Mentor code, except in the rare case + * of transfers big enough (~128+ KB) that we could append "middle" segments + * in the TX paths. (RX can't do this, see below.) + * + * That's true even in the CPPI- friendly iso case, where most urbs have + * several small segments provided in a group and where the "packet at a time" + * "transparent" DMA model is always correct, even on the RX side. + */ + +/* + * CPPI TX: + * ======== + * TX is a lot more reasonable than RX; it doesn't need to run in + * irq-per-packet mode very often. RNDIS mode seems to behave too + * (except how it handles the exactly-N-packets case). Building a + * txdma queue with multiple requests (urb or usb_request) looks + * like it would work ... but fault handling would need much testing. + * + * The main issue with TX mode RNDIS relates to transfer lengths that + * are an exact multiple of the packet length. It appears that there's + * a hiccup in that case (maybe the DMA completes before the ZLP gets + * written?) boiling down to not being able to rely on CPPI writing any + * terminating zero length packet before the next transfer is written. + * So that's punted to PIO; better yet, gadget drivers can avoid it. + * + * Plus, there's allegedly an undocumented constraint that rndis transfer + * length be a multiple of 64 bytes ... but the chip doesn't act that + * way, and we really don't _want_ that behavior anyway. + * + * On TX, "transparent" mode works ... although experiments have shown + * problems trying to use the SOP/EOP bits in different USB packets. + * + * REVISIT try to handle terminating zero length packets using CPPI + * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet + * links avoid that issue by forcing them to avoid zlps.) + */ +static void +cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) +{ + unsigned maxpacket = tx->maxpacket; + dma_addr_t addr = tx->buf_dma + tx->offset; + size_t length = tx->buf_len - tx->offset; + struct cppi_descriptor *bd; + unsigned n_bds; + unsigned i; + struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; + int rndis; + + /* TX can use the CPPI "rndis" mode, where we can probably fit this + * transfer in one BD and one IRQ. The only time we would NOT want + * to use it is when hardware constraints prevent it, or if we'd + * trigger the "send a ZLP?" confusion. + */ + rndis = (maxpacket & 0x3f) == 0 + && length < 0xffff + && (length % maxpacket) != 0; + + if (rndis) { + maxpacket = length; + n_bds = 1; + } else { + n_bds = length / maxpacket; + if (!length || (length % maxpacket)) + n_bds++; + n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); + length = min(n_bds * maxpacket, length); + } + + DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", + tx->index, + maxpacket, + rndis ? "rndis" : "transparent", + n_bds, + addr, length); + + cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); + + /* assuming here that channel_program is called during + * transfer initiation ... current code maintains state + * for one outstanding request only (no queues, not even + * the implicit ones of an iso urb). + */ + + bd = tx->freelist; + tx->head = bd; + tx->last_processed = NULL; + + /* FIXME use BD pool like RX side does, and just queue + * the minimum number for this request. + */ + + /* Prepare queue of BDs first, then hand it to hardware. + * All BDs except maybe the last should be of full packet + * size; for RNDIS there _is_ only that last packet. + */ + for (i = 0; i < n_bds; ) { + if (++i < n_bds && bd->next) + bd->hw_next = bd->next->dma; + else + bd->hw_next = 0; + + bd->hw_bufp = tx->buf_dma + tx->offset; + + /* FIXME set EOP only on the last packet, + * SOP only on the first ... avoid IRQs + */ + if ((tx->offset + maxpacket) <= tx->buf_len) { + tx->offset += maxpacket; + bd->hw_off_len = maxpacket; + bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET + | CPPI_OWN_SET | maxpacket; + } else { + /* only this one may be a partial USB Packet */ + u32 partial_len; + + partial_len = tx->buf_len - tx->offset; + tx->offset = tx->buf_len; + bd->hw_off_len = partial_len; + + bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET + | CPPI_OWN_SET | partial_len; + if (partial_len == 0) + bd->hw_options |= CPPI_ZERO_SET; + } + + DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", + bd, bd->hw_next, bd->hw_bufp, + bd->hw_off_len, bd->hw_options); + + /* update the last BD enqueued to the list */ + tx->tail = bd; + bd = bd->next; + } + + /* BDs live in DMA-coherent memory, but writes might be pending */ + cpu_drain_writebuffer(); + + /* Write to the HeadPtr in state RAM to trigger */ + musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); + + cppi_dump_tx(5, tx, "/S"); +} + +/* + * CPPI RX Woes: + * ============= + * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte + * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. + * (Full speed transfers have similar scenarios.) + * + * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, + * and the next packet goes into a buffer that's queued later; while (b) fills + * the buffer with 1024 bytes. How to do that with CPPI? + * + * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but + * (b) loses **BADLY** because nothing (!) happens when that second packet + * fills the buffer, much less when a third one arrives. (Which makes this + * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination + * is optional, and it's fine if peripherals -- not hosts! -- pad messages + * out to end-of-buffer. Standard PCI host controller DMA descriptors + * implement that mode by default ... which is no accident.) + * + * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have + * converse problems: (b) is handled right, but (a) loses badly. CPPI RX + * ignores SOP/EOP markings and processes both of those BDs; so both packets + * are loaded into the buffer (with a 212 byte gap between them), and the next + * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP + * are intended as outputs for RX queues, not inputs...) + * + * - A variant of "transparent" mode -- one BD at a time -- is the only way to + * reliably make both cases work, with software handling both cases correctly + * and at the significant penalty of needing an IRQ per packet. (The lack of + * I/O overlap can be slightly ameliorated by enabling double buffering.) + * + * So how to get rid of IRQ-per-packet? The transparent multi-BD case could + * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK + * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors + * with guaranteed driver level fault recovery and scrubbing out what's left + * of that garbaged datastream. + * + * But there seems to be no way to identify the cases where CPPI RNDIS mode + * is appropriate -- which do NOT include RNDIS host drivers, but do include + * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. + * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic + * that applies best on the peripheral side (and which could fail rudely). + * + * Leaving only "transparent" mode; we avoid multi-bd modes in almost all + * cases other than mass storage class. Otherwise we're correct but slow, + * since CPPI penalizes our need for a "true RNDIS" default mode. + */ + + +/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY + * + * IFF + * (a) peripheral mode ... since rndis peripherals could pad their + * writes to hosts, causing i/o failure; or we'd have to cope with + * a largely unknowable variety of host side protocol variants + * (b) and short reads are NOT errors ... since full reads would + * cause those same i/o failures + * (c) and read length is + * - less than 64KB (max per cppi descriptor) + * - not a multiple of 4096 (g_zero default, full reads typical) + * - N (>1) packets long, ditto (full reads not EXPECTED) + * THEN + * try rx rndis mode + * + * Cost of heuristic failing: RXDMA wedges at the end of transfers that + * fill out the whole buffer. Buggy host side usb network drivers could + * trigger that, but "in the field" such bugs seem to be all but unknown. + * + * So this module parameter lets the heuristic be disabled. When using + * gadgetfs, the heuristic will probably need to be disabled. + */ +static int cppi_rx_rndis = 1; + +module_param(cppi_rx_rndis, bool, 0); +MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); + + +/** + * cppi_next_rx_segment - dma read for the next chunk of a buffer + * @musb: the controller + * @rx: dma channel + * @onepacket: true unless caller treats short reads as errors, and + * performs fault recovery above usbcore. + * Context: controller irqlocked + * + * See above notes about why we can't use multi-BD RX queues except in + * rare cases (mass storage class), and can never use the hardware "rndis" + * mode (since it's not a "true" RNDIS mode) with complete safety.. + * + * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in + * code to recover from corrupted datastreams after each short transfer. + */ +static void +cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) +{ + unsigned maxpacket = rx->maxpacket; + dma_addr_t addr = rx->buf_dma + rx->offset; + size_t length = rx->buf_len - rx->offset; + struct cppi_descriptor *bd, *tail; + unsigned n_bds; + unsigned i; + void __iomem *tibase = musb->ctrl_base; + int is_rndis = 0; + struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; + + if (onepacket) { + /* almost every USB driver, host or peripheral side */ + n_bds = 1; + + /* maybe apply the heuristic above */ + if (cppi_rx_rndis + && is_peripheral_active(musb) + && length > maxpacket + && (length & ~0xffff) == 0 + && (length & 0x0fff) != 0 + && (length & (maxpacket - 1)) == 0) { + maxpacket = length; + is_rndis = 1; + } + } else { + /* virtually nothing except mass storage class */ + if (length > 0xffff) { + n_bds = 0xffff / maxpacket; + length = n_bds * maxpacket; + } else { + n_bds = length / maxpacket; + if (length % maxpacket) + n_bds++; + } + if (n_bds == 1) + onepacket = 1; + else + n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); + } + + /* In host mode, autorequest logic can generate some IN tokens; it's + * tricky since we can't leave REQPKT set in RXCSR after the transfer + * finishes. So: multipacket transfers involve two or more segments. + * And always at least two IRQs ... RNDIS mode is not an option. + */ + if (is_host_active(musb)) + n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); + + cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); + + length = min(n_bds * maxpacket, length); + + DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " + "dma 0x%x len %u %u/%u\n", + rx->index, maxpacket, + onepacket + ? (is_rndis ? "rndis" : "onepacket") + : "multipacket", + n_bds, + musb_readl(tibase, + DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) + & 0xffff, + addr, length, rx->channel.actual_len, rx->buf_len); + + /* only queue one segment at a time, since the hardware prevents + * correct queue shutdown after unexpected short packets + */ + bd = cppi_bd_alloc(rx); + rx->head = bd; + + /* Build BDs for all packets in this segment */ + for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { + u32 bd_len; + + if (i) { + bd = cppi_bd_alloc(rx); + if (!bd) + break; + tail->next = bd; + tail->hw_next = bd->dma; + } + bd->hw_next = 0; + + /* all but the last packet will be maxpacket size */ + if (maxpacket < length) + bd_len = maxpacket; + else + bd_len = length; + + bd->hw_bufp = addr; + addr += bd_len; + rx->offset += bd_len; + + bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; + bd->buflen = bd_len; + + bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); + length -= bd_len; + } + + /* we always expect at least one reusable BD! */ + if (!tail) { + WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); + return; + } else if (i < n_bds) + WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); + + tail->next = NULL; + tail->hw_next = 0; + + bd = rx->head; + rx->tail = tail; + + /* short reads and other faults should terminate this entire + * dma segment. we want one "dma packet" per dma segment, not + * one per USB packet, terminating the whole queue at once... + * NOTE that current hardware seems to ignore SOP and EOP. + */ + bd->hw_options |= CPPI_SOP_SET; + tail->hw_options |= CPPI_EOP_SET; + + if (debug >= 5) { + struct cppi_descriptor *d; + + for (d = rx->head; d; d = d->next) + cppi_dump_rxbd("S", d); + } + + /* in case the preceding transfer left some state... */ + tail = rx->last_processed; + if (tail) { + tail->next = bd; + tail->hw_next = bd->dma; + } + + core_rxirq_enable(tibase, rx->index + 1); + + /* BDs live in DMA-coherent memory, but writes might be pending */ + cpu_drain_writebuffer(); + + /* REVISIT specs say to write this AFTER the BUFCNT register + * below ... but that loses badly. + */ + musb_writel(&rx_ram->rx_head, 0, bd->dma); + + /* bufferCount must be at least 3, and zeroes on completion + * unless it underflows below zero, or stops at two, or keeps + * growing ... grr. + */ + i = musb_readl(tibase, + DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) + & 0xffff; + + if (!i) + musb_writel(tibase, + DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), + n_bds + 2); + else if (n_bds > (i - 3)) + musb_writel(tibase, + DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), + n_bds - (i - 3)); + + i = musb_readl(tibase, + DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) + & 0xffff; + if (i < (2 + n_bds)) { + DBG(2, "bufcnt%d underrun - %d (for %d)\n", + rx->index, i, n_bds); + musb_writel(tibase, + DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), + n_bds + 2); + } + + cppi_dump_rx(4, rx, "/S"); +} + +/** + * cppi_channel_program - program channel for data transfer + * @ch: the channel + * @maxpacket: max packet size + * @mode: For RX, 1 unless the usb protocol driver promised to treat + * all short reads as errors and kick in high level fault recovery. + * For TX, ignored because of RNDIS mode races/glitches. + * @dma_addr: dma address of buffer + * @len: length of buffer + * Context: controller irqlocked + */ +static int cppi_channel_program(struct dma_channel *ch, + u16 maxpacket, u8 mode, + dma_addr_t dma_addr, u32 len) +{ + struct cppi_channel *cppi_ch; + struct cppi *controller; + struct musb *musb; + + cppi_ch = container_of(ch, struct cppi_channel, channel); + controller = cppi_ch->controller; + musb = controller->musb; + + switch (ch->status) { + case MUSB_DMA_STATUS_BUS_ABORT: + case MUSB_DMA_STATUS_CORE_ABORT: + /* fault irq handler should have handled cleanup */ + WARNING("%cX DMA%d not cleaned up after abort!\n", + cppi_ch->transmit ? 'T' : 'R', + cppi_ch->index); + /* WARN_ON(1); */ + break; + case MUSB_DMA_STATUS_BUSY: + WARNING("program active channel? %cX DMA%d\n", + cppi_ch->transmit ? 'T' : 'R', + cppi_ch->index); + /* WARN_ON(1); */ + break; + case MUSB_DMA_STATUS_UNKNOWN: + DBG(1, "%cX DMA%d not allocated!\n", + cppi_ch->transmit ? 'T' : 'R', + cppi_ch->index); + /* FALLTHROUGH */ + case MUSB_DMA_STATUS_FREE: + break; + } + + ch->status = MUSB_DMA_STATUS_BUSY; + + /* set transfer parameters, then queue up its first segment */ + cppi_ch->buf_dma = dma_addr; + cppi_ch->offset = 0; + cppi_ch->maxpacket = maxpacket; + cppi_ch->buf_len = len; + + /* TX channel? or RX? */ + if (cppi_ch->transmit) + cppi_next_tx_segment(musb, cppi_ch); + else + cppi_next_rx_segment(musb, cppi_ch, mode); + + return true; +} + +static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) +{ + struct cppi_channel *rx = &cppi->rx[ch]; + struct cppi_rx_stateram __iomem *state = rx->state_ram; + struct cppi_descriptor *bd; + struct cppi_descriptor *last = rx->last_processed; + bool completed = false; + bool acked = false; + int i; + dma_addr_t safe2ack; + void __iomem *regs = rx->hw_ep->regs; + + cppi_dump_rx(6, rx, "/K"); + + bd = last ? last->next : rx->head; + if (!bd) + return false; + + /* run through all completed BDs */ + for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); + (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; + i++, bd = bd->next) { + u16 len; + + /* catch latest BD writes from CPPI */ + rmb(); + if (!completed && (bd->hw_options & CPPI_OWN_SET)) + break; + + DBG(5, "C/RXBD %08x: nxt %08x buf %08x " + "off.len %08x opt.len %08x (%d)\n", + bd->dma, bd->hw_next, bd->hw_bufp, + bd->hw_off_len, bd->hw_options, + rx->channel.actual_len); + + /* actual packet received length */ + if ((bd->hw_options & CPPI_SOP_SET) && !completed) + len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; + else + len = 0; + + if (bd->hw_options & CPPI_EOQ_MASK) + completed = true; + + if (!completed && len < bd->buflen) { + /* NOTE: when we get a short packet, RXCSR_H_REQPKT + * must have been cleared, and no more DMA packets may + * active be in the queue... TI docs didn't say, but + * CPPI ignores those BDs even though OWN is still set. + */ + completed = true; + DBG(3, "rx short %d/%d (%d)\n", + len, bd->buflen, + rx->channel.actual_len); + } + + /* If we got here, we expect to ack at least one BD; meanwhile + * CPPI may completing other BDs while we scan this list... + * + * RACE: we can notice OWN cleared before CPPI raises the + * matching irq by writing that BD as the completion pointer. + * In such cases, stop scanning and wait for the irq, avoiding + * lost acks and states where BD ownership is unclear. + */ + if (bd->dma == safe2ack) { + musb_writel(&state->rx_complete, 0, safe2ack); + safe2ack = musb_readl(&state->rx_complete, 0); + acked = true; + if (bd->dma == safe2ack) + safe2ack = 0; + } + + rx->channel.actual_len += len; + + cppi_bd_free(rx, last); + last = bd; + + /* stop scanning on end-of-segment */ + if (bd->hw_next == 0) + completed = true; + } + rx->last_processed = last; + + /* dma abort, lost ack, or ... */ + if (!acked && last) { + int csr; + + if (safe2ack == 0 || safe2ack == rx->last_processed->dma) + musb_writel(&state->rx_complete, 0, safe2ack); + if (safe2ack == 0) { + cppi_bd_free(rx, last); + rx->last_processed = NULL; + + /* if we land here on the host side, H_REQPKT will + * be clear and we need to restart the queue... + */ + WARN_ON(rx->head); + } + musb_ep_select(cppi->mregs, rx->index + 1); + csr = musb_readw(regs, MUSB_RXCSR); + if (csr & MUSB_RXCSR_DMAENAB) { + DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", + rx->index, + rx->head, rx->tail, + rx->last_processed + ? rx->last_processed->dma + : 0, + completed ? ", completed" : "", + csr); + cppi_dump_rxq(4, "/what?", rx); + } + } + if (!completed) { + int csr; + + rx->head = bd; + + /* REVISIT seems like "autoreq all but EOP" doesn't... + * setting it here "should" be racey, but seems to work + */ + csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); + if (is_host_active(cppi->musb) + && bd + && !(csr & MUSB_RXCSR_H_REQPKT)) { + csr |= MUSB_RXCSR_H_REQPKT; + musb_writew(regs, MUSB_RXCSR, + MUSB_RXCSR_H_WZC_BITS | csr); + csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); + } + } else { + rx->head = NULL; + rx->tail = NULL; + } + + cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); + return completed; +} + +void cppi_completion(struct musb *musb, u32 rx, u32 tx) +{ + void __iomem *tibase; + int i, index; + struct cppi *cppi; + struct musb_hw_ep *hw_ep = NULL; + + cppi = container_of(musb->dma_controller, struct cppi, controller); + + tibase = musb->ctrl_base; + + /* process TX channels */ + for (index = 0; tx; tx = tx >> 1, index++) { + struct cppi_channel *tx_ch; + struct cppi_tx_stateram __iomem *tx_ram; + bool completed = false; + struct cppi_descriptor *bd; + + if (!(tx & 1)) + continue; + + tx_ch = cppi->tx + index; + tx_ram = tx_ch->state_ram; + + /* FIXME need a cppi_tx_scan() routine, which + * can also be called from abort code + */ + + cppi_dump_tx(5, tx_ch, "/E"); + + bd = tx_ch->head; + + if (NULL == bd) { + DBG(1, "null BD\n"); + continue; + } + + /* run through all completed BDs */ + for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; + i++, bd = bd->next) { + u16 len; + + /* catch latest BD writes from CPPI */ + rmb(); + if (bd->hw_options & CPPI_OWN_SET) + break; + + DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", + bd, bd->hw_next, bd->hw_bufp, + bd->hw_off_len, bd->hw_options); + + len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; + tx_ch->channel.actual_len += len; + + tx_ch->last_processed = bd; + + /* write completion register to acknowledge + * processing of completed BDs, and possibly + * release the IRQ; EOQ might not be set ... + * + * REVISIT use the same ack strategy as rx + * + * REVISIT have observed bit 18 set; huh?? + */ + /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ + musb_writel(&tx_ram->tx_complete, 0, bd->dma); + + /* stop scanning on end-of-segment */ + if (bd->hw_next == 0) + completed = true; + } + + /* on end of segment, maybe go to next one */ + if (completed) { + /* cppi_dump_tx(4, tx_ch, "/complete"); */ + + /* transfer more, or report completion */ + if (tx_ch->offset >= tx_ch->buf_len) { + tx_ch->head = NULL; + tx_ch->tail = NULL; + tx_ch->channel.status = MUSB_DMA_STATUS_FREE; + + hw_ep = tx_ch->hw_ep; + + /* Peripheral role never repurposes the + * endpoint, so immediate completion is + * safe. Host role waits for the fifo + * to empty (TXPKTRDY irq) before going + * to the next queued bulk transfer. + */ + if (is_host_active(cppi->musb)) { +#if 0 + /* WORKAROUND because we may + * not always get TXKPTRDY ... + */ + int csr; + + csr = musb_readw(hw_ep->regs, + MUSB_TXCSR); + if (csr & MUSB_TXCSR_TXPKTRDY) +#endif + completed = false; + } + if (completed) + musb_dma_completion(musb, index + 1, 1); + + } else { + /* Bigger transfer than we could fit in + * that first batch of descriptors... + */ + cppi_next_tx_segment(musb, tx_ch); + } + } else + tx_ch->head = bd; + } + + /* Start processing the RX block */ + for (index = 0; rx; rx = rx >> 1, index++) { + + if (rx & 1) { + struct cppi_channel *rx_ch; + + rx_ch = cppi->rx + index; + + /* let incomplete dma segments finish */ + if (!cppi_rx_scan(cppi, index)) + continue; + + /* start another dma segment if needed */ + if (rx_ch->channel.actual_len != rx_ch->buf_len + && rx_ch->channel.actual_len + == rx_ch->offset) { + cppi_next_rx_segment(musb, rx_ch, 1); + continue; + } + + /* all segments completed! */ + rx_ch->channel.status = MUSB_DMA_STATUS_FREE; + + hw_ep = rx_ch->hw_ep; + + core_rxirq_disable(tibase, index + 1); + musb_dma_completion(musb, index + 1, 0); + } + } + + /* write to CPPI EOI register to re-enable interrupts */ + musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); +} + +/* Instantiate a software object representing a DMA controller. */ +struct dma_controller *__init +dma_controller_create(struct musb *musb, void __iomem *mregs) +{ + struct cppi *controller; + + controller = kzalloc(sizeof *controller, GFP_KERNEL); + if (!controller) + return NULL; + + controller->mregs = mregs; + controller->tibase = mregs - DAVINCI_BASE_OFFSET; + + controller->musb = musb; + controller->controller.start = cppi_controller_start; + controller->controller.stop = cppi_controller_stop; + controller->controller.channel_alloc = cppi_channel_allocate; + controller->controller.channel_release = cppi_channel_release; + controller->controller.channel_program = cppi_channel_program; + controller->controller.channel_abort = cppi_channel_abort; + + /* NOTE: allocating from on-chip SRAM would give the least + * contention for memory access, if that ever matters here. + */ + + /* setup BufferPool */ + controller->pool = dma_pool_create("cppi", + controller->musb->controller, + sizeof(struct cppi_descriptor), + CPPI_DESCRIPTOR_ALIGN, 0); + if (!controller->pool) { + kfree(controller); + return NULL; + } + + return &controller->controller; +} + +/* + * Destroy a previously-instantiated DMA controller. + */ +void dma_controller_destroy(struct dma_controller *c) +{ + struct cppi *cppi; + + cppi = container_of(c, struct cppi, controller); + + /* assert: caller stopped the controller first */ + dma_pool_destroy(cppi->pool); + + kfree(cppi); +} + +/* + * Context: controller irqlocked, endpoint selected + */ +static int cppi_channel_abort(struct dma_channel *channel) +{ + struct cppi_channel *cppi_ch; + struct cppi *controller; + void __iomem *mbase; + void __iomem *tibase; + void __iomem *regs; + u32 value; + struct cppi_descriptor *queue; + + cppi_ch = container_of(channel, struct cppi_channel, channel); + + controller = cppi_ch->controller; + + switch (channel->status) { + case MUSB_DMA_STATUS_BUS_ABORT: + case MUSB_DMA_STATUS_CORE_ABORT: + /* from RX or TX fault irq handler */ + case MUSB_DMA_STATUS_BUSY: + /* the hardware needs shutting down */ + regs = cppi_ch->hw_ep->regs; + break; + case MUSB_DMA_STATUS_UNKNOWN: + case MUSB_DMA_STATUS_FREE: + return 0; + default: + return -EINVAL; + } + + if (!cppi_ch->transmit && cppi_ch->head) + cppi_dump_rxq(3, "/abort", cppi_ch); + + mbase = controller->mregs; + tibase = controller->tibase; + + queue = cppi_ch->head; + cppi_ch->head = NULL; + cppi_ch->tail = NULL; + + /* REVISIT should rely on caller having done this, + * and caller should rely on us not changing it. + * peripheral code is safe ... check host too. + */ + musb_ep_select(mbase, cppi_ch->index + 1); + + if (cppi_ch->transmit) { + struct cppi_tx_stateram __iomem *tx_ram; + int enabled; + + /* mask interrupts raised to signal teardown complete. */ + enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) + & (1 << cppi_ch->index); + if (enabled) + musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, + (1 << cppi_ch->index)); + + /* REVISIT put timeouts on these controller handshakes */ + + cppi_dump_tx(6, cppi_ch, " (teardown)"); + + /* teardown DMA engine then usb core */ + do { + value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); + } while (!(value & CPPI_TEAR_READY)); + musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); + + tx_ram = cppi_ch->state_ram; + do { + value = musb_readl(&tx_ram->tx_complete, 0); + } while (0xFFFFFFFC != value); + musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); + + /* FIXME clean up the transfer state ... here? + * the completion routine should get called with + * an appropriate status code. + */ + + value = musb_readw(regs, MUSB_TXCSR); + value &= ~MUSB_TXCSR_DMAENAB; + value |= MUSB_TXCSR_FLUSHFIFO; + musb_writew(regs, MUSB_TXCSR, value); + musb_writew(regs, MUSB_TXCSR, value); + + /* re-enable interrupt */ + if (enabled) + musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, + (1 << cppi_ch->index)); + + /* While we scrub the TX state RAM, ensure that we clean + * up any interrupt that's currently asserted: + * 1. Write to completion Ptr value 0x1(bit 0 set) + * (write back mode) + * 2. Write to completion Ptr value 0x0(bit 0 cleared) + * (compare mode) + * Value written is compared(for bits 31:2) and when + * equal, interrupt is deasserted. + */ + cppi_reset_tx(tx_ram, 1); + musb_writel(&tx_ram->tx_complete, 0, 0); + + cppi_dump_tx(5, cppi_ch, " (done teardown)"); + + /* REVISIT tx side _should_ clean up the same way + * as the RX side ... this does no cleanup at all! + */ + + } else /* RX */ { + u16 csr; + + /* NOTE: docs don't guarantee any of this works ... we + * expect that if the usb core stops telling the cppi core + * to pull more data from it, then it'll be safe to flush + * current RX DMA state iff any pending fifo transfer is done. + */ + + core_rxirq_disable(tibase, cppi_ch->index + 1); + + /* for host, ensure ReqPkt is never set again */ + if (is_host_active(cppi_ch->controller->musb)) { + value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); + value &= ~((0x3) << (cppi_ch->index * 2)); + musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); + } + + csr = musb_readw(regs, MUSB_RXCSR); + + /* for host, clear (just) ReqPkt at end of current packet(s) */ + if (is_host_active(cppi_ch->controller->musb)) { + csr |= MUSB_RXCSR_H_WZC_BITS; + csr &= ~MUSB_RXCSR_H_REQPKT; + } else + csr |= MUSB_RXCSR_P_WZC_BITS; + + /* clear dma enable */ + csr &= ~(MUSB_RXCSR_DMAENAB); + musb_writew(regs, MUSB_RXCSR, csr); + csr = musb_readw(regs, MUSB_RXCSR); + + /* Quiesce: wait for current dma to finish (if not cleanup). + * We can't use bit zero of stateram->rx_sop, since that + * refers to an entire "DMA packet" not just emptying the + * current fifo. Most segments need multiple usb packets. + */ + if (channel->status == MUSB_DMA_STATUS_BUSY) + udelay(50); + + /* scan the current list, reporting any data that was + * transferred and acking any IRQ + */ + cppi_rx_scan(controller, cppi_ch->index); + + /* clobber the existing state once it's idle + * + * NOTE: arguably, we should also wait for all the other + * RX channels to quiesce (how??) and then temporarily + * disable RXCPPI_CTRL_REG ... but it seems that we can + * rely on the controller restarting from state ram, with + * only RXCPPI_BUFCNT state being bogus. BUFCNT will + * correct itself after the next DMA transfer though. + * + * REVISIT does using rndis mode change that? + */ + cppi_reset_rx(cppi_ch->state_ram); + + /* next DMA request _should_ load cppi head ptr */ + + /* ... we don't "free" that list, only mutate it in place. */ + cppi_dump_rx(5, cppi_ch, " (done abort)"); + + /* clean up previously pending bds */ + cppi_bd_free(cppi_ch, cppi_ch->last_processed); + cppi_ch->last_processed = NULL; + + while (queue) { + struct cppi_descriptor *tmp = queue->next; + + cppi_bd_free(cppi_ch, queue); + queue = tmp; + } + } + + channel->status = MUSB_DMA_STATUS_FREE; + cppi_ch->buf_dma = 0; + cppi_ch->offset = 0; + cppi_ch->buf_len = 0; + cppi_ch->maxpacket = 0; + return 0; +} + +/* TBD Queries: + * + * Power Management ... probably turn off cppi during suspend, restart; + * check state ram? Clocking is presumably shared with usb core. + */ diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h new file mode 100644 index 00000000000..fc5216b5d2c --- /dev/null +++ b/drivers/usb/musb/cppi_dma.h @@ -0,0 +1,133 @@ +/* Copyright (C) 2005-2006 by Texas Instruments */ + +#ifndef _CPPI_DMA_H_ +#define _CPPI_DMA_H_ + +#include +#include +#include +#include +#include + +#include "musb_dma.h" +#include "musb_core.h" + + +/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers + * would seem to be shared with the TUSB6020 (over VLYNQ). + */ + +#include "davinci.h" + + +/* CPPI RX/TX state RAM */ + +struct cppi_tx_stateram { + u32 tx_head; /* "DMA packet" head descriptor */ + u32 tx_buf; + u32 tx_current; /* current descriptor */ + u32 tx_buf_current; + u32 tx_info; /* flags, remaining buflen */ + u32 tx_rem_len; + u32 tx_dummy; /* unused */ + u32 tx_complete; +}; + +struct cppi_rx_stateram { + u32 rx_skipbytes; + u32 rx_head; + u32 rx_sop; /* "DMA packet" head descriptor */ + u32 rx_current; /* current descriptor */ + u32 rx_buf_current; + u32 rx_len_len; + u32 rx_cnt_cnt; + u32 rx_complete; +}; + +/* hw_options bits in CPPI buffer descriptors */ +#define CPPI_SOP_SET ((u32)(1 << 31)) +#define CPPI_EOP_SET ((u32)(1 << 30)) +#define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */ +#define CPPI_EOQ_MASK ((u32)(1 << 28)) +#define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */ +#define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */ + +#define CPPI_RECV_PKTLEN_MASK 0xFFFF +#define CPPI_BUFFER_LEN_MASK 0xFFFF + +#define CPPI_TEAR_READY ((u32)(1 << 31)) + +/* CPPI data structure definitions */ + +#define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */ + +struct cppi_descriptor { + /* hardware overlay */ + u32 hw_next; /* next buffer descriptor Pointer */ + u32 hw_bufp; /* i/o buffer pointer */ + u32 hw_off_len; /* buffer_offset16, buffer_length16 */ + u32 hw_options; /* flags: SOP, EOP etc*/ + + struct cppi_descriptor *next; + dma_addr_t dma; /* address of this descriptor */ + u32 buflen; /* for RX: original buffer length */ +} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN))); + + +struct cppi; + +/* CPPI Channel Control structure */ +struct cppi_channel { + struct dma_channel channel; + + /* back pointer to the DMA controller structure */ + struct cppi *controller; + + /* which direction of which endpoint? */ + struct musb_hw_ep *hw_ep; + bool transmit; + u8 index; + + /* DMA modes: RNDIS or "transparent" */ + u8 is_rndis; + + /* book keeping for current transfer request */ + dma_addr_t buf_dma; + u32 buf_len; + u32 maxpacket; + u32 offset; /* dma requested */ + + void __iomem *state_ram; /* CPPI state */ + + struct cppi_descriptor *freelist; + + /* BD management fields */ + struct cppi_descriptor *head; + struct cppi_descriptor *tail; + struct cppi_descriptor *last_processed; + + /* use tx_complete in host role to track endpoints waiting for + * FIFONOTEMPTY to clear. + */ + struct list_head tx_complete; +}; + +/* CPPI DMA controller object */ +struct cppi { + struct dma_controller controller; + struct musb *musb; + void __iomem *mregs; /* Mentor regs */ + void __iomem *tibase; /* TI/CPPI regs */ + + struct cppi_channel tx[MUSB_C_NUM_EPT - 1]; + struct cppi_channel rx[MUSB_C_NUM_EPR - 1]; + + struct dma_pool *pool; + + struct list_head tx_complete; +}; + +/* irq handling hook */ +extern void cppi_completion(struct musb *, u32 rx, u32 tx); + +#endif /* end of ifndef _CPPI_DMA_H_ */ diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c new file mode 100644 index 00000000000..75baf181a8c --- /dev/null +++ b/drivers/usb/musb/davinci.c @@ -0,0 +1,462 @@ +/* + * Copyright (C) 2005-2006 by Texas Instruments + * + * This file is part of the Inventra Controller Driver for Linux. + * + * The Inventra Controller Driver for Linux is free software; you + * can redistribute it and/or modify it under the terms of the GNU + * General Public License version 2 as published by the Free Software + * Foundation. + * + * The Inventra Controller Driver for Linux is distributed in + * the hope that it will be useful, but WITHOUT ANY WARRANTY; + * without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + * License for more details. + * + * You should have received a copy of the GNU General Public License + * along with The Inventra Controller Driver for Linux ; if not, + * write to the Free Software Foundation, Inc., 59 Temple Place, + * Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "musb_core.h" + +#ifdef CONFIG_MACH_DAVINCI_EVM +#include +#endif + +#include "davinci.h" +#include "cppi_dma.h" + + +/* REVISIT (PM) we should be able to keep the PHY in low power mode most + * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 + * and, when in host mode, autosuspending idle root ports... PHYPLLON + * (overriding SUSPENDM?) then likely needs to stay off. + */ + +static inline void phy_on(void) +{ + /* start the on-chip PHY and its PLL */ + __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON, + (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR)); + while ((__raw_readl((void __force __iomem *) + IO_ADDRESS(USBPHY_CTL_PADDR)) + & USBPHY_PHYCLKGD) == 0) + cpu_relax(); +} + +static inline void phy_off(void) +{ + /* powerdown the on-chip PHY and its oscillator */ + __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *) + IO_ADDRESS(USBPHY_CTL_PADDR)); +} + +static int dma_off = 1; + +void musb_platform_enable(struct musb *musb) +{ + u32 tmp, old, val; + + /* workaround: setup irqs through both register sets */ + tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK) + << DAVINCI_USB_TXINT_SHIFT; + musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); + old = tmp; + tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK)) + << DAVINCI_USB_RXINT_SHIFT; + musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); + tmp |= old; + + val = ~MUSB_INTR_SOF; + tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT); + musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); + + if (is_dma_capable() && !dma_off) + printk(KERN_WARNING "%s %s: dma not reactivated\n", + __FILE__, __func__); + else + dma_off = 0; + + /* force a DRVVBUS irq so we can start polling for ID change */ + if (is_otg_enabled(musb)) + musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, + DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT); +} + +/* + * Disable the HDRC and flush interrupts + */ +void musb_platform_disable(struct musb *musb) +{ + /* because we don't set CTRLR.UINT, "important" to: + * - not read/write INTRUSB/INTRUSBE + * - (except during initial setup, as workaround) + * - use INTSETR/INTCLRR instead + */ + musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG, + DAVINCI_USB_USBINT_MASK + | DAVINCI_USB_TXINT_MASK + | DAVINCI_USB_RXINT_MASK); + musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0); + + if (is_dma_capable() && !dma_off) + WARNING("dma still active\n"); +} + + +/* REVISIT it's not clear whether DaVinci can support full OTG. */ + +static int vbus_state = -1; + +#ifdef CONFIG_USB_MUSB_HDRC_HCD +#define portstate(stmt) stmt +#else +#define portstate(stmt) +#endif + + +/* VBUS SWITCHING IS BOARD-SPECIFIC */ + +#ifdef CONFIG_MACH_DAVINCI_EVM +#ifndef CONFIG_MACH_DAVINCI_EVM_OTG + +/* I2C operations are always synchronous, and require a task context. + * With unloaded systems, using the shared workqueue seems to suffice + * to satisfy the 100msec A_WAIT_VRISE timeout... + */ +static void evm_deferred_drvvbus(struct work_struct *ignored) +{ + davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state); + vbus_state = !vbus_state; +} +static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus); + +#endif /* modified board */ +#endif /* EVM */ + +static void davinci_source_power(struct musb *musb, int is_on, int immediate) +{ + if (is_on) + is_on = 1; + + if (vbus_state == is_on) + return; + vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */ + +#ifdef CONFIG_MACH_DAVINCI_EVM + if (machine_is_davinci_evm()) { +#ifdef CONFIG_MACH_DAVINCI_EVM_OTG + /* modified EVM board switching VBUS with GPIO(6) not I2C + * NOTE: PINMUX0.RGB888 (bit23) must be clear + */ + if (is_on) + gpio_set(GPIO(6)); + else + gpio_clear(GPIO(6)); + immediate = 1; +#else + if (immediate) + davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on); + else + schedule_work(&evm_vbus_work); +#endif + } +#endif + if (immediate) + vbus_state = is_on; +} + +static void davinci_set_vbus(struct musb *musb, int is_on) +{ + WARN_ON(is_on && is_peripheral_active(musb)); + davinci_source_power(musb, is_on, 0); +} + + +#define POLL_SECONDS 2 + +static struct timer_list otg_workaround; + +static void otg_timer(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + void __iomem *mregs = musb->mregs; + u8 devctl; + unsigned long flags; + + /* We poll because DaVinci's won't expose several OTG-critical + * status change events (from the transceiver) otherwise. + */ + devctl = musb_readb(mregs, MUSB_DEVCTL); + DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb)); + + spin_lock_irqsave(&musb->lock, flags); + switch (musb->xceiv.state) { + case OTG_STATE_A_WAIT_VFALL: + /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL + * seems to mis-handle session "start" otherwise (or in our + * case "recover"), in routine "VBUS was valid by the time + * VBUSERR got reported during enumeration" cases. + */ + if (devctl & MUSB_DEVCTL_VBUS) { + mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); + break; + } + musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; + musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, + MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); + break; + case OTG_STATE_B_IDLE: + if (!is_peripheral_enabled(musb)) + break; + + /* There's no ID-changed IRQ, so we have no good way to tell + * when to switch to the A-Default state machine (by setting + * the DEVCTL.SESSION flag). + * + * Workaround: whenever we're in B_IDLE, try setting the + * session flag every few seconds. If it works, ID was + * grounded and we're now in the A-Default state machine. + * + * NOTE setting the session flag is _supposed_ to trigger + * SRP, but clearly it doesn't. + */ + musb_writeb(mregs, MUSB_DEVCTL, + devctl | MUSB_DEVCTL_SESSION); + devctl = musb_readb(mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) + mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); + else + musb->xceiv.state = OTG_STATE_A_IDLE; + break; + default: + break; + } + spin_unlock_irqrestore(&musb->lock, flags); +} + +static irqreturn_t davinci_interrupt(int irq, void *__hci) +{ + unsigned long flags; + irqreturn_t retval = IRQ_NONE; + struct musb *musb = __hci; + void __iomem *tibase = musb->ctrl_base; + u32 tmp; + + spin_lock_irqsave(&musb->lock, flags); + + /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through + * the Mentor registers (except for setup), use the TI ones and EOI. + * + * Docs describe irq "vector" registers asociated with the CPPI and + * USB EOI registers. These hold a bitmask corresponding to the + * current IRQ, not an irq handler address. Would using those bits + * resolve some of the races observed in this dispatch code?? + */ + + /* CPPI interrupts share the same IRQ line, but have their own + * mask, state, "vector", and EOI registers. + */ + if (is_cppi_enabled()) { + u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); + u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); + + if (cppi_tx || cppi_rx) { + DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx); + cppi_completion(musb, cppi_rx, cppi_tx); + retval = IRQ_HANDLED; + } + } + + /* ack and handle non-CPPI interrupts */ + tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); + musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp); + DBG(4, "IRQ %08x\n", tmp); + + musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK) + >> DAVINCI_USB_RXINT_SHIFT; + musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK) + >> DAVINCI_USB_TXINT_SHIFT; + musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK) + >> DAVINCI_USB_USBINT_SHIFT; + + /* DRVVBUS irqs are the only proxy we have (a very poor one!) for + * DaVinci's missing ID change IRQ. We need an ID change IRQ to + * switch appropriately between halves of the OTG state machine. + * Managing DEVCTL.SESSION per Mentor docs requires we know its + * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. + * Also, DRVVBUS pulses for SRP (but not at 5V) ... + */ + if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) { + int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG); + void __iomem *mregs = musb->mregs; + u8 devctl = musb_readb(mregs, MUSB_DEVCTL); + int err = musb->int_usb & MUSB_INTR_VBUSERROR; + + err = is_host_enabled(musb) + && (musb->int_usb & MUSB_INTR_VBUSERROR); + if (err) { + /* The Mentor core doesn't debounce VBUS as needed + * to cope with device connect current spikes. This + * means it's not uncommon for bus-powered devices + * to get VBUS errors during enumeration. + * + * This is a workaround, but newer RTL from Mentor + * seems to allow a better one: "re"starting sessions + * without waiting (on EVM, a **long** time) for VBUS + * to stop registering in devctl. + */ + musb->int_usb &= ~MUSB_INTR_VBUSERROR; + musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; + mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); + WARNING("VBUS error workaround (delay coming)\n"); + } else if (is_host_enabled(musb) && drvvbus) { + musb->is_active = 1; + MUSB_HST_MODE(musb); + musb->xceiv.default_a = 1; + musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; + portstate(musb->port1_status |= USB_PORT_STAT_POWER); + del_timer(&otg_workaround); + } else { + musb->is_active = 0; + MUSB_DEV_MODE(musb); + musb->xceiv.default_a = 0; + musb->xceiv.state = OTG_STATE_B_IDLE; + portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); + } + + /* NOTE: this must complete poweron within 100 msec */ + davinci_source_power(musb, drvvbus, 0); + DBG(2, "VBUS %s (%s)%s, devctl %02x\n", + drvvbus ? "on" : "off", + otg_state_string(musb), + err ? " ERROR" : "", + devctl); + retval = IRQ_HANDLED; + } + + if (musb->int_tx || musb->int_rx || musb->int_usb) + retval |= musb_interrupt(musb); + + /* irq stays asserted until EOI is written */ + musb_writel(tibase, DAVINCI_USB_EOI_REG, 0); + + /* poll for ID change */ + if (is_otg_enabled(musb) + && musb->xceiv.state == OTG_STATE_B_IDLE) + mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); + + spin_unlock_irqrestore(&musb->lock, flags); + + /* REVISIT we sometimes get unhandled IRQs + * (e.g. ep0). not clear why... + */ + if (retval != IRQ_HANDLED) + DBG(5, "unhandled? %08x\n", tmp); + return IRQ_HANDLED; +} + +int __init musb_platform_init(struct musb *musb) +{ + void __iomem *tibase = musb->ctrl_base; + u32 revision; + + musb->mregs += DAVINCI_BASE_OFFSET; +#if 0 + /* REVISIT there's something odd about clocking, this + * didn't appear do the job ... + */ + musb->clock = clk_get(pDevice, "usb"); + if (IS_ERR(musb->clock)) + return PTR_ERR(musb->clock); + + status = clk_enable(musb->clock); + if (status < 0) + return -ENODEV; +#endif + + /* returns zero if e.g. not clocked */ + revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); + if (revision == 0) + return -ENODEV; + + if (is_host_enabled(musb)) + setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); + + musb->board_set_vbus = davinci_set_vbus; + davinci_source_power(musb, 0, 1); + + /* reset the controller */ + musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); + + /* start the on-chip PHY and its PLL */ + phy_on(); + + msleep(5); + + /* NOTE: irqs are in mixed mode, not bypass to pure-musb */ + pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n", + revision, __raw_readl((void __force __iomem *) + IO_ADDRESS(USBPHY_CTL_PADDR)), + musb_readb(tibase, DAVINCI_USB_CTRL_REG)); + + musb->isr = davinci_interrupt; + return 0; +} + +int musb_platform_exit(struct musb *musb) +{ + if (is_host_enabled(musb)) + del_timer_sync(&otg_workaround); + + davinci_source_power(musb, 0 /*off*/, 1); + + /* delay, to avoid problems with module reload */ + if (is_host_enabled(musb) && musb->xceiv.default_a) { + int maxdelay = 30; + u8 devctl, warn = 0; + + /* if there's no peripheral connected, this can take a + * long time to fall, especially on EVM with huge C133. + */ + do { + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if (!(devctl & MUSB_DEVCTL_VBUS)) + break; + if ((devctl & MUSB_DEVCTL_VBUS) != warn) { + warn = devctl & MUSB_DEVCTL_VBUS; + DBG(1, "VBUS %d\n", + warn >> MUSB_DEVCTL_VBUS_SHIFT); + } + msleep(1000); + maxdelay--; + } while (maxdelay > 0); + + /* in OTG mode, another host might be connected */ + if (devctl & MUSB_DEVCTL_VBUS) + DBG(1, "VBUS off timeout (devctl %02x)\n", devctl); + } + + phy_off(); + return 0; +} diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h new file mode 100644 index 00000000000..7fb6238e270 --- /dev/null +++ b/drivers/usb/musb/davinci.h @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2005-2006 by Texas Instruments + * + * The Inventra Controller Driver for Linux is free software; you + * can redistribute it and/or modify it under the terms of the GNU + * General Public License version 2 as published by the Free Software + * Foundation. + */ + +#ifndef __MUSB_HDRDF_H__ +#define __MUSB_HDRDF_H__ + +/* + * DaVinci-specific definitions + */ + +/* Integrated highspeed/otg PHY */ +#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) +#define USBPHY_PHYCLKGD (1 << 8) +#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */ +#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */ +#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */ +#define USBPHY_CLKO1SEL (1 << 3) +#define USBPHY_OSCPDWN (1 << 2) +#define USBPHY_PHYPDWN (1 << 0) + +/* For now include usb OTG module registers here */ +#define DAVINCI_USB_VERSION_REG 0x00 +#define DAVINCI_USB_CTRL_REG 0x04 +#define DAVINCI_USB_STAT_REG 0x08 +#define DAVINCI_RNDIS_REG 0x10 +#define DAVINCI_AUTOREQ_REG 0x14 +#define DAVINCI_USB_INT_SOURCE_REG 0x20 +#define DAVINCI_USB_INT_SET_REG 0x24 +#define DAVINCI_USB_INT_SRC_CLR_REG 0x28 +#define DAVINCI_USB_INT_MASK_REG 0x2c +#define DAVINCI_USB_INT_MASK_SET_REG 0x30 +#define DAVINCI_USB_INT_MASK_CLR_REG 0x34 +#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38 +#define DAVINCI_USB_EOI_REG 0x3c +#define DAVINCI_USB_EOI_INTVEC 0x40 + +/* BEGIN CPPI-generic (?) */ + +/* CPPI related registers */ +#define DAVINCI_TXCPPI_CTRL_REG 0x80 +#define DAVINCI_TXCPPI_TEAR_REG 0x84 +#define DAVINCI_CPPI_EOI_REG 0x88 +#define DAVINCI_CPPI_INTVEC_REG 0x8c +#define DAVINCI_TXCPPI_MASKED_REG 0x90 +#define DAVINCI_TXCPPI_RAW_REG 0x94 +#define DAVINCI_TXCPPI_INTENAB_REG 0x98 +#define DAVINCI_TXCPPI_INTCLR_REG 0x9c + +#define DAVINCI_RXCPPI_CTRL_REG 0xC0 +#define DAVINCI_RXCPPI_MASKED_REG 0xD0 +#define DAVINCI_RXCPPI_RAW_REG 0xD4 +#define DAVINCI_RXCPPI_INTENAB_REG 0xD8 +#define DAVINCI_RXCPPI_INTCLR_REG 0xDC + +#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0 +#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4 +#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8 +#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC + +/* CPPI state RAM entries */ +#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100 + +#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \ + (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40)) +#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \ + (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40)) + +/* CPPI masks */ +#define DAVINCI_DMA_CTRL_ENABLE 1 +#define DAVINCI_DMA_CTRL_DISABLE 0 + +#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF +#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF + +/* END CPPI-generic (?) */ + +#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */ +#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */ + +#define DAVINCI_USB_USBINT_SHIFT 16 +#define DAVINCI_USB_TXINT_SHIFT 0 +#define DAVINCI_USB_RXINT_SHIFT 8 + +#define DAVINCI_INTR_DRVVBUS 0x0100 + +#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */ +#define DAVINCI_USB_TXINT_MASK \ + (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT) +#define DAVINCI_USB_RXINT_MASK \ + (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT) + +#define DAVINCI_BASE_OFFSET 0x400 + +#endif /* __MUSB_HDRDF_H__ */ diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c new file mode 100644 index 00000000000..462586d06da --- /dev/null +++ b/drivers/usb/musb/musb_core.c @@ -0,0 +1,2266 @@ +/* + * MUSB OTG driver core code + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * Inventra (Multipoint) Dual-Role Controller Driver for Linux. + * + * This consists of a Host Controller Driver (HCD) and a peripheral + * controller driver implementing the "Gadget" API; OTG support is + * in the works. These are normal Linux-USB controller drivers which + * use IRQs and have no dedicated thread. + * + * This version of the driver has only been used with products from + * Texas Instruments. Those products integrate the Inventra logic + * with other DMA, IRQ, and bus modules, as well as other logic that + * needs to be reflected in this driver. + * + * + * NOTE: the original Mentor code here was pretty much a collection + * of mechanisms that don't seem to have been fully integrated/working + * for *any* Linux kernel version. This version aims at Linux 2.6.now, + * Key open issues include: + * + * - Lack of host-side transaction scheduling, for all transfer types. + * The hardware doesn't do it; instead, software must. + * + * This is not an issue for OTG devices that don't support external + * hubs, but for more "normal" USB hosts it's a user issue that the + * "multipoint" support doesn't scale in the expected ways. That + * includes DaVinci EVM in a common non-OTG mode. + * + * * Control and bulk use dedicated endpoints, and there's as + * yet no mechanism to either (a) reclaim the hardware when + * peripherals are NAKing, which gets complicated with bulk + * endpoints, or (b) use more than a single bulk endpoint in + * each direction. + * + * RESULT: one device may be perceived as blocking another one. + * + * * Interrupt and isochronous will dynamically allocate endpoint + * hardware, but (a) there's no record keeping for bandwidth; + * (b) in the common case that few endpoints are available, there + * is no mechanism to reuse endpoints to talk to multiple devices. + * + * RESULT: At one extreme, bandwidth can be overcommitted in + * some hardware configurations, no faults will be reported. + * At the other extreme, the bandwidth capabilities which do + * exist tend to be severely undercommitted. You can't yet hook + * up both a keyboard and a mouse to an external USB hub. + */ + +/* + * This gets many kinds of configuration information: + * - Kconfig for everything user-configurable + * - for SOC or family details + * - platform_device for addressing, irq, and platform_data + * - platform_data is mostly for board-specific informarion + * + * Most of the conditional compilation will (someday) vanish. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_ARM +#include +#include +#include +#endif + +#include "musb_core.h" + + +#ifdef CONFIG_ARCH_DAVINCI +#include "davinci.h" +#endif + + + +#if MUSB_DEBUG > 0 +unsigned debug = MUSB_DEBUG; +module_param(debug, uint, 0); +MODULE_PARM_DESC(debug, "initial debug message level"); + +#define MUSB_VERSION_SUFFIX "/dbg" +#endif + +#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" +#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver" + +#define MUSB_VERSION_BASE "6.0" + +#ifndef MUSB_VERSION_SUFFIX +#define MUSB_VERSION_SUFFIX "" +#endif +#define MUSB_VERSION MUSB_VERSION_BASE MUSB_VERSION_SUFFIX + +#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION + +#define MUSB_DRIVER_NAME "musb_hdrc" +const char musb_driver_name[] = MUSB_DRIVER_NAME; + +MODULE_DESCRIPTION(DRIVER_INFO); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); + + +/*-------------------------------------------------------------------------*/ + +static inline struct musb *dev_to_musb(struct device *dev) +{ +#ifdef CONFIG_USB_MUSB_HDRC_HCD + /* usbcore insists dev->driver_data is a "struct hcd *" */ + return hcd_to_musb(dev_get_drvdata(dev)); +#else + return dev_get_drvdata(dev); +#endif +} + +/*-------------------------------------------------------------------------*/ + +#ifndef CONFIG_USB_TUSB6010 +/* + * Load an endpoint's FIFO + */ +void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) +{ + void __iomem *fifo = hw_ep->fifo; + + prefetch((u8 *)src); + + DBG(4, "%cX ep%d fifo %p count %d buf %p\n", + 'T', hw_ep->epnum, fifo, len, src); + + /* we can't assume unaligned reads work */ + if (likely((0x01 & (unsigned long) src) == 0)) { + u16 index = 0; + + /* best case is 32bit-aligned source address */ + if ((0x02 & (unsigned long) src) == 0) { + if (len >= 4) { + writesl(fifo, src + index, len >> 2); + index += len & ~0x03; + } + if (len & 0x02) { + musb_writew(fifo, 0, *(u16 *)&src[index]); + index += 2; + } + } else { + if (len >= 2) { + writesw(fifo, src + index, len >> 1); + index += len & ~0x01; + } + } + if (len & 0x01) + musb_writeb(fifo, 0, src[index]); + } else { + /* byte aligned */ + writesb(fifo, src, len); + } +} + +/* + * Unload an endpoint's FIFO + */ +void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) +{ + void __iomem *fifo = hw_ep->fifo; + + DBG(4, "%cX ep%d fifo %p count %d buf %p\n", + 'R', hw_ep->epnum, fifo, len, dst); + + /* we can't assume unaligned writes work */ + if (likely((0x01 & (unsigned long) dst) == 0)) { + u16 index = 0; + + /* best case is 32bit-aligned destination address */ + if ((0x02 & (unsigned long) dst) == 0) { + if (len >= 4) { + readsl(fifo, dst, len >> 2); + index = len & ~0x03; + } + if (len & 0x02) { + *(u16 *)&dst[index] = musb_readw(fifo, 0); + index += 2; + } + } else { + if (len >= 2) { + readsw(fifo, dst, len >> 1); + index = len & ~0x01; + } + } + if (len & 0x01) + dst[index] = musb_readb(fifo, 0); + } else { + /* byte aligned */ + readsb(fifo, dst, len); + } +} + +#endif /* normal PIO */ + + +/*-------------------------------------------------------------------------*/ + +/* for high speed test mode; see USB 2.0 spec 7.1.20 */ +static const u8 musb_test_packet[53] = { + /* implicit SYNC then DATA0 to start */ + + /* JKJKJKJK x9 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* JJKKJJKK x8 */ + 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, + /* JJJJKKKK x8 */ + 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, + /* JJJJJJJKKKKKKK x8 */ + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + /* JJJJJJJK x8 */ + 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, + /* JKKKKKKK x10, JK */ + 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e + + /* implicit CRC16 then EOP to end */ +}; + +void musb_load_testpacket(struct musb *musb) +{ + void __iomem *regs = musb->endpoints[0].regs; + + musb_ep_select(musb->mregs, 0); + musb_write_fifo(musb->control_ep, + sizeof(musb_test_packet), musb_test_packet); + musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY); +} + +/*-------------------------------------------------------------------------*/ + +const char *otg_state_string(struct musb *musb) +{ + switch (musb->xceiv.state) { + case OTG_STATE_A_IDLE: return "a_idle"; + case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; + case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; + case OTG_STATE_A_HOST: return "a_host"; + case OTG_STATE_A_SUSPEND: return "a_suspend"; + case OTG_STATE_A_PERIPHERAL: return "a_peripheral"; + case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall"; + case OTG_STATE_A_VBUS_ERR: return "a_vbus_err"; + case OTG_STATE_B_IDLE: return "b_idle"; + case OTG_STATE_B_SRP_INIT: return "b_srp_init"; + case OTG_STATE_B_PERIPHERAL: return "b_peripheral"; + case OTG_STATE_B_WAIT_ACON: return "b_wait_acon"; + case OTG_STATE_B_HOST: return "b_host"; + default: return "UNDEFINED"; + } +} + +#ifdef CONFIG_USB_MUSB_OTG + +/* + * See also USB_OTG_1-3.pdf 6.6.5 Timers + * REVISIT: Are the other timers done in the hardware? + */ +#define TB_ASE0_BRST 100 /* Min 3.125 ms */ + +/* + * Handles OTG hnp timeouts, such as b_ase0_brst + */ +void musb_otg_timer_func(unsigned long data) +{ + struct musb *musb = (struct musb *)data; + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + switch (musb->xceiv.state) { + case OTG_STATE_B_WAIT_ACON: + DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n"); + musb_g_disconnect(musb); + musb->xceiv.state = OTG_STATE_B_PERIPHERAL; + musb->is_active = 0; + break; + case OTG_STATE_A_WAIT_BCON: + DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n"); + musb_hnp_stop(musb); + break; + default: + DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb)); + } + musb->ignore_disconnect = 0; + spin_unlock_irqrestore(&musb->lock, flags); +} + +static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0); + +/* + * Stops the B-device HNP state. Caller must take care of locking. + */ +void musb_hnp_stop(struct musb *musb) +{ + struct usb_hcd *hcd = musb_to_hcd(musb); + void __iomem *mbase = musb->mregs; + u8 reg; + + switch (musb->xceiv.state) { + case OTG_STATE_A_PERIPHERAL: + case OTG_STATE_A_WAIT_VFALL: + case OTG_STATE_A_WAIT_BCON: + DBG(1, "HNP: Switching back to A-host\n"); + musb_g_disconnect(musb); + musb->xceiv.state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + musb->is_active = 0; + break; + case OTG_STATE_B_HOST: + DBG(1, "HNP: Disabling HR\n"); + hcd->self.is_b_host = 0; + musb->xceiv.state = OTG_STATE_B_PERIPHERAL; + MUSB_DEV_MODE(musb); + reg = musb_readb(mbase, MUSB_POWER); + reg |= MUSB_POWER_SUSPENDM; + musb_writeb(mbase, MUSB_POWER, reg); + /* REVISIT: Start SESSION_REQUEST here? */ + break; + default: + DBG(1, "HNP: Stopping in unknown state %s\n", + otg_state_string(musb)); + } + + /* + * When returning to A state after HNP, avoid hub_port_rebounce(), + * which cause occasional OPT A "Did not receive reset after connect" + * errors. + */ + musb->port1_status &= + ~(1 << USB_PORT_FEAT_C_CONNECTION); +} + +#endif + +/* + * Interrupt Service Routine to record USB "global" interrupts. + * Since these do not happen often and signify things of + * paramount importance, it seems OK to check them individually; + * the order of the tests is specified in the manual + * + * @param musb instance pointer + * @param int_usb register contents + * @param devctl + * @param power + */ + +#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \ + | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \ + | MUSB_INTR_RESET) + +static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, + u8 devctl, u8 power) +{ + irqreturn_t handled = IRQ_NONE; + void __iomem *mbase = musb->mregs; + + DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, + int_usb); + + /* in host mode, the peripheral may issue remote wakeup. + * in peripheral mode, the host may resume the link. + * spurious RESUME irqs happen too, paired with SUSPEND. + */ + if (int_usb & MUSB_INTR_RESUME) { + handled = IRQ_HANDLED; + DBG(3, "RESUME (%s)\n", otg_state_string(musb)); + + if (devctl & MUSB_DEVCTL_HM) { +#ifdef CONFIG_USB_MUSB_HDRC_HCD + switch (musb->xceiv.state) { + case OTG_STATE_A_SUSPEND: + /* remote wakeup? later, GetPortStatus + * will stop RESUME signaling + */ + + if (power & MUSB_POWER_SUSPENDM) { + /* spurious */ + musb->int_usb &= ~MUSB_INTR_SUSPEND; + DBG(2, "Spurious SUSPENDM\n"); + break; + } + + power &= ~MUSB_POWER_SUSPENDM; + musb_writeb(mbase, MUSB_POWER, + power | MUSB_POWER_RESUME); + + musb->port1_status |= + (USB_PORT_STAT_C_SUSPEND << 16) + | MUSB_PORT_STAT_RESUME; + musb->rh_timer = jiffies + + msecs_to_jiffies(20); + + musb->xceiv.state = OTG_STATE_A_HOST; + musb->is_active = 1; + usb_hcd_resume_root_hub(musb_to_hcd(musb)); + break; + case OTG_STATE_B_WAIT_ACON: + musb->xceiv.state = OTG_STATE_B_PERIPHERAL; + musb->is_active = 1; + MUSB_DEV_MODE(musb); + break; + default: + WARNING("bogus %s RESUME (%s)\n", + "host", + otg_state_string(musb)); + } +#endif + } else { + switch (musb->xceiv.state) { +#ifdef CONFIG_USB_MUSB_HDRC_HCD + case OTG_STATE_A_SUSPEND: + /* possibly DISCONNECT is upcoming */ + musb->xceiv.state = OTG_STATE_A_HOST; + usb_hcd_resume_root_hub(musb_to_hcd(musb)); + break; +#endif +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + case OTG_STATE_B_WAIT_ACON: + case OTG_STATE_B_PERIPHERAL: + /* disconnect while suspended? we may + * not get a disconnect irq... + */ + if ((devctl & MUSB_DEVCTL_VBUS) + != (3 << MUSB_DEVCTL_VBUS_SHIFT) + ) { + musb->int_usb |= MUSB_INTR_DISCONNECT; + musb->int_usb &= ~MUSB_INTR_SUSPEND; + break; + } + musb_g_resume(musb); + break; + case OTG_STATE_B_IDLE: + musb->int_usb &= ~MUSB_INTR_SUSPEND; + break; +#endif + default: + WARNING("bogus %s RESUME (%s)\n", + "peripheral", + otg_state_string(musb)); + } + } + } + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + /* see manual for the order of the tests */ + if (int_usb & MUSB_INTR_SESSREQ) { + DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb)); + + /* IRQ arrives from ID pin sense or (later, if VBUS power + * is removed) SRP. responses are time critical: + * - turn on VBUS (with silicon-specific mechanism) + * - go through A_WAIT_VRISE + * - ... to A_WAIT_BCON. + * a_wait_vrise_tmout triggers VBUS_ERROR transitions + */ + musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); + musb->ep0_stage = MUSB_EP0_START; + musb->xceiv.state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + musb_set_vbus(musb, 1); + + handled = IRQ_HANDLED; + } + + if (int_usb & MUSB_INTR_VBUSERROR) { + int ignore = 0; + + /* During connection as an A-Device, we may see a short + * current spikes causing voltage drop, because of cable + * and peripheral capacitance combined with vbus draw. + * (So: less common with truly self-powered devices, where + * vbus doesn't act like a power supply.) + * + * Such spikes are short; usually less than ~500 usec, max + * of ~2 msec. That is, they're not sustained overcurrent + * errors, though they're reported using VBUSERROR irqs. + * + * Workarounds: (a) hardware: use self powered devices. + * (b) software: ignore non-repeated VBUS errors. + * + * REVISIT: do delays from lots of DEBUG_KERNEL checks + * make trouble here, keeping VBUS < 4.4V ? + */ + switch (musb->xceiv.state) { + case OTG_STATE_A_HOST: + /* recovery is dicey once we've gotten past the + * initial stages of enumeration, but if VBUS + * stayed ok at the other end of the link, and + * another reset is due (at least for high speed, + * to redo the chirp etc), it might work OK... + */ + case OTG_STATE_A_WAIT_BCON: + case OTG_STATE_A_WAIT_VRISE: + if (musb->vbuserr_retry) { + musb->vbuserr_retry--; + ignore = 1; + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(mbase, MUSB_DEVCTL, devctl); + } else { + musb->port1_status |= + (1 << USB_PORT_FEAT_OVER_CURRENT) + | (1 << USB_PORT_FEAT_C_OVER_CURRENT); + } + break; + default: + break; + } + + DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n", + otg_state_string(musb), + devctl, + ({ char *s; + switch (devctl & MUSB_DEVCTL_VBUS) { + case 0 << MUSB_DEVCTL_VBUS_SHIFT: + s = "vbuserr_retry, + musb->port1_status); + + /* go through A_WAIT_VFALL then start a new session */ + if (!ignore) + musb_set_vbus(musb, 0); + handled = IRQ_HANDLED; + } + + if (int_usb & MUSB_INTR_CONNECT) { + struct usb_hcd *hcd = musb_to_hcd(musb); + + handled = IRQ_HANDLED; + musb->is_active = 1; + set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); + + musb->ep0_stage = MUSB_EP0_START; + +#ifdef CONFIG_USB_MUSB_OTG + /* flush endpoints when transitioning from Device Mode */ + if (is_peripheral_active(musb)) { + /* REVISIT HNP; just force disconnect */ + } + musb_writew(mbase, MUSB_INTRTXE, musb->epmask); + musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe); + musb_writeb(mbase, MUSB_INTRUSBE, 0xf7); +#endif + musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED + |USB_PORT_STAT_HIGH_SPEED + |USB_PORT_STAT_ENABLE + ); + musb->port1_status |= USB_PORT_STAT_CONNECTION + |(USB_PORT_STAT_C_CONNECTION << 16); + + /* high vs full speed is just a guess until after reset */ + if (devctl & MUSB_DEVCTL_LSDEV) + musb->port1_status |= USB_PORT_STAT_LOW_SPEED; + + if (hcd->status_urb) + usb_hcd_poll_rh_status(hcd); + else + usb_hcd_resume_root_hub(hcd); + + MUSB_HST_MODE(musb); + + /* indicate new connection to OTG machine */ + switch (musb->xceiv.state) { + case OTG_STATE_B_PERIPHERAL: + if (int_usb & MUSB_INTR_SUSPEND) { + DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n"); + musb->xceiv.state = OTG_STATE_B_HOST; + hcd->self.is_b_host = 1; + int_usb &= ~MUSB_INTR_SUSPEND; + } else + DBG(1, "CONNECT as b_peripheral???\n"); + break; + case OTG_STATE_B_WAIT_ACON: + DBG(1, "HNP: Waiting to switch to b_host state\n"); + musb->xceiv.state = OTG_STATE_B_HOST; + hcd->self.is_b_host = 1; + break; + default: + if ((devctl & MUSB_DEVCTL_VBUS) + == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { + musb->xceiv.state = OTG_STATE_A_HOST; + hcd->self.is_b_host = 0; + } + break; + } + DBG(1, "CONNECT (%s) devctl %02x\n", + otg_state_string(musb), devctl); + } +#endif /* CONFIG_USB_MUSB_HDRC_HCD */ + + /* mentor saves a bit: bus reset and babble share the same irq. + * only host sees babble; only peripheral sees bus reset. + */ + if (int_usb & MUSB_INTR_RESET) { + if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { + /* + * Looks like non-HS BABBLE can be ignored, but + * HS BABBLE is an error condition. For HS the solution + * is to avoid babble in the first place and fix what + * caused BABBLE. When HS BABBLE happens we can only + * stop the session. + */ + if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV)) + DBG(1, "BABBLE devctl: %02x\n", devctl); + else { + ERR("Stopping host session -- babble\n"); + musb_writeb(mbase, MUSB_DEVCTL, 0); + } + } else if (is_peripheral_capable()) { + DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); + switch (musb->xceiv.state) { +#ifdef CONFIG_USB_OTG + case OTG_STATE_A_SUSPEND: + /* We need to ignore disconnect on suspend + * otherwise tusb 2.0 won't reconnect after a + * power cycle, which breaks otg compliance. + */ + musb->ignore_disconnect = 1; + musb_g_reset(musb); + /* FALLTHROUGH */ + case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ + DBG(1, "HNP: Setting timer as %s\n", + otg_state_string(musb)); + musb_otg_timer.data = (unsigned long)musb; + mod_timer(&musb_otg_timer, jiffies + + msecs_to_jiffies(100)); + break; + case OTG_STATE_A_PERIPHERAL: + musb_hnp_stop(musb); + break; + case OTG_STATE_B_WAIT_ACON: + DBG(1, "HNP: RESET (%s), to b_peripheral\n", + otg_state_string(musb)); + musb->xceiv.state = OTG_STATE_B_PERIPHERAL; + musb_g_reset(musb); + break; +#endif + case OTG_STATE_B_IDLE: + musb->xceiv.state = OTG_STATE_B_PERIPHERAL; + /* FALLTHROUGH */ + case OTG_STATE_B_PERIPHERAL: + musb_g_reset(musb); + break; + default: + DBG(1, "Unhandled BUS RESET as %s\n", + otg_state_string(musb)); + } + } + + handled = IRQ_HANDLED; + } + schedule_work(&musb->irq_work); + + return handled; +} + +/* + * Interrupt Service Routine to record USB "global" interrupts. + * Since these do not happen often and signify things of + * paramount importance, it seems OK to check them individually; + * the order of the tests is specified in the manual + * + * @param musb instance pointer + * @param int_usb register contents + * @param devctl + * @param power + */ +static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb, + u8 devctl, u8 power) +{ + irqreturn_t handled = IRQ_NONE; + +#if 0 +/* REVISIT ... this would be for multiplexing periodic endpoints, or + * supporting transfer phasing to prevent exceeding ISO bandwidth + * limits of a given frame or microframe. + * + * It's not needed for peripheral side, which dedicates endpoints; + * though it _might_ use SOF irqs for other purposes. + * + * And it's not currently needed for host side, which also dedicates + * endpoints, relies on TX/RX interval registers, and isn't claimed + * to support ISO transfers yet. + */ + if (int_usb & MUSB_INTR_SOF) { + void __iomem *mbase = musb->mregs; + struct musb_hw_ep *ep; + u8 epnum; + u16 frame; + + DBG(6, "START_OF_FRAME\n"); + handled = IRQ_HANDLED; + + /* start any periodic Tx transfers waiting for current frame */ + frame = musb_readw(mbase, MUSB_FRAME); + ep = musb->endpoints; + for (epnum = 1; (epnum < musb->nr_endpoints) + && (musb->epmask >= (1 << epnum)); + epnum++, ep++) { + /* + * FIXME handle framecounter wraps (12 bits) + * eliminate duplicated StartUrb logic + */ + if (ep->dwWaitFrame >= frame) { + ep->dwWaitFrame = 0; + pr_debug("SOF --> periodic TX%s on %d\n", + ep->tx_channel ? " DMA" : "", + epnum); + if (!ep->tx_channel) + musb_h_tx_start(musb, epnum); + else + cppi_hostdma_start(musb, epnum); + } + } /* end of for loop */ + } +#endif + + if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { + DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n", + otg_state_string(musb), + MUSB_MODE(musb), devctl); + handled = IRQ_HANDLED; + + switch (musb->xceiv.state) { +#ifdef CONFIG_USB_MUSB_HDRC_HCD + case OTG_STATE_A_HOST: + case OTG_STATE_A_SUSPEND: + musb_root_disconnect(musb); + if (musb->a_wait_bcon != 0) + musb_platform_try_idle(musb, jiffies + + msecs_to_jiffies(musb->a_wait_bcon)); + break; +#endif /* HOST */ +#ifdef CONFIG_USB_MUSB_OTG + case OTG_STATE_B_HOST: + musb_hnp_stop(musb); + break; + case OTG_STATE_A_PERIPHERAL: + musb_hnp_stop(musb); + musb_root_disconnect(musb); + /* FALLTHROUGH */ + case OTG_STATE_B_WAIT_ACON: + /* FALLTHROUGH */ +#endif /* OTG */ +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + case OTG_STATE_B_PERIPHERAL: + case OTG_STATE_B_IDLE: + musb_g_disconnect(musb); + break; +#endif /* GADGET */ + default: + WARNING("unhandled DISCONNECT transition (%s)\n", + otg_state_string(musb)); + break; + } + + schedule_work(&musb->irq_work); + } + + if (int_usb & MUSB_INTR_SUSPEND) { + DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", + otg_state_string(musb), devctl, power); + handled = IRQ_HANDLED; + + switch (musb->xceiv.state) { +#ifdef CONFIG_USB_MUSB_OTG + case OTG_STATE_A_PERIPHERAL: + /* + * We cannot stop HNP here, devctl BDEVICE might be + * still set. + */ + break; +#endif + case OTG_STATE_B_PERIPHERAL: + musb_g_suspend(musb); + musb->is_active = is_otg_enabled(musb) + && musb->xceiv.gadget->b_hnp_enable; + if (musb->is_active) { +#ifdef CONFIG_USB_MUSB_OTG + musb->xceiv.state = OTG_STATE_B_WAIT_ACON; + DBG(1, "HNP: Setting timer for b_ase0_brst\n"); + musb_otg_timer.data = (unsigned long)musb; + mod_timer(&musb_otg_timer, jiffies + + msecs_to_jiffies(TB_ASE0_BRST)); +#endif + } + break; + case OTG_STATE_A_WAIT_BCON: + if (musb->a_wait_bcon != 0) + musb_platform_try_idle(musb, jiffies + + msecs_to_jiffies(musb->a_wait_bcon)); + break; + case OTG_STATE_A_HOST: + musb->xceiv.state = OTG_STATE_A_SUSPEND; + musb->is_active = is_otg_enabled(musb) + && musb->xceiv.host->b_hnp_enable; + break; + case OTG_STATE_B_HOST: + /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ + DBG(1, "REVISIT: SUSPEND as B_HOST\n"); + break; + default: + /* "should not happen" */ + musb->is_active = 0; + break; + } + schedule_work(&musb->irq_work); + } + + + return handled; +} + +/*-------------------------------------------------------------------------*/ + +/* +* Program the HDRC to start (enable interrupts, dma, etc.). +*/ +void musb_start(struct musb *musb) +{ + void __iomem *regs = musb->mregs; + u8 devctl = musb_readb(regs, MUSB_DEVCTL); + + DBG(2, "<== devctl %02x\n", devctl); + + /* Set INT enable registers, enable interrupts */ + musb_writew(regs, MUSB_INTRTXE, musb->epmask); + musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe); + musb_writeb(regs, MUSB_INTRUSBE, 0xf7); + + musb_writeb(regs, MUSB_TESTMODE, 0); + + /* put into basic highspeed mode and start session */ + musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE + | MUSB_POWER_SOFTCONN + | MUSB_POWER_HSENAB + /* ENSUSPEND wedges tusb */ + /* | MUSB_POWER_ENSUSPEND */ + ); + + musb->is_active = 0; + devctl = musb_readb(regs, MUSB_DEVCTL); + devctl &= ~MUSB_DEVCTL_SESSION; + + if (is_otg_enabled(musb)) { + /* session started after: + * (a) ID-grounded irq, host mode; + * (b) vbus present/connect IRQ, peripheral mode; + * (c) peripheral initiates, using SRP + */ + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) + musb->is_active = 1; + else + devctl |= MUSB_DEVCTL_SESSION; + + } else if (is_host_enabled(musb)) { + /* assume ID pin is hard-wired to ground */ + devctl |= MUSB_DEVCTL_SESSION; + + } else /* peripheral is enabled */ { + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) + musb->is_active = 1; + } + musb_platform_enable(musb); + musb_writeb(regs, MUSB_DEVCTL, devctl); +} + + +static void musb_generic_disable(struct musb *musb) +{ + void __iomem *mbase = musb->mregs; + u16 temp; + + /* disable interrupts */ + musb_writeb(mbase, MUSB_INTRUSBE, 0); + musb_writew(mbase, MUSB_INTRTXE, 0); + musb_writew(mbase, MUSB_INTRRXE, 0); + + /* off */ + musb_writeb(mbase, MUSB_DEVCTL, 0); + + /* flush pending interrupts */ + temp = musb_readb(mbase, MUSB_INTRUSB); + temp = musb_readw(mbase, MUSB_INTRTX); + temp = musb_readw(mbase, MUSB_INTRRX); + +} + +/* + * Make the HDRC stop (disable interrupts, etc.); + * reversible by musb_start + * called on gadget driver unregister + * with controller locked, irqs blocked + * acts as a NOP unless some role activated the hardware + */ +void musb_stop(struct musb *musb) +{ + /* stop IRQs, timers, ... */ + musb_platform_disable(musb); + musb_generic_disable(musb); + DBG(3, "HDRC disabled\n"); + + /* FIXME + * - mark host and/or peripheral drivers unusable/inactive + * - disable DMA (and enable it in HdrcStart) + * - make sure we can musb_start() after musb_stop(); with + * OTG mode, gadget driver module rmmod/modprobe cycles that + * - ... + */ + musb_platform_try_idle(musb, 0); +} + +static void musb_shutdown(struct platform_device *pdev) +{ + struct musb *musb = dev_to_musb(&pdev->dev); + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + musb_platform_disable(musb); + musb_generic_disable(musb); + if (musb->clock) { + clk_put(musb->clock); + musb->clock = NULL; + } + spin_unlock_irqrestore(&musb->lock, flags); + + /* FIXME power down */ +} + + +/*-------------------------------------------------------------------------*/ + +/* + * The silicon either has hard-wired endpoint configurations, or else + * "dynamic fifo" sizing. The driver has support for both, though at this + * writing only the dynamic sizing is very well tested. We use normal + * idioms to so both modes are compile-tested, but dead code elimination + * leaves only the relevant one in the object file. + * + * We don't currently use dynamic fifo setup capability to do anything + * more than selecting one of a bunch of predefined configurations. + */ +#ifdef MUSB_C_DYNFIFO_DEF +#define can_dynfifo() 1 +#else +#define can_dynfifo() 0 +#endif + +#if defined(CONFIG_USB_TUSB6010) || \ + defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) +static ushort __initdata fifo_mode = 4; +#else +static ushort __initdata fifo_mode = 2; +#endif + +/* "modprobe ... fifo_mode=1" etc */ +module_param(fifo_mode, ushort, 0); +MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration"); + + +#define DYN_FIFO_SIZE (1<<(MUSB_C_RAM_BITS+2)) + +enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed)); +enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed)); + +struct fifo_cfg { + u8 hw_ep_num; + enum fifo_style style; + enum buf_mode mode; + u16 maxpacket; +}; + +/* + * tables defining fifo_mode values. define more if you like. + * for host side, make sure both halves of ep1 are set up. + */ + +/* mode 0 - fits in 2KB */ +static struct fifo_cfg __initdata mode_0_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, +{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, +}; + +/* mode 1 - fits in 4KB */ +static struct fifo_cfg __initdata mode_1_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, +{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, +}; + +/* mode 2 - fits in 4KB */ +static struct fifo_cfg __initdata mode_2_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, +{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, +}; + +/* mode 3 - fits in 4KB */ +static struct fifo_cfg __initdata mode_3_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, +{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, +{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, +}; + +/* mode 4 - fits in 16KB */ +static struct fifo_cfg __initdata mode_4_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, +{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, +}; + + +/* + * configure a fifo; for non-shared endpoints, this may be called + * once for a tx fifo and once for an rx fifo. + * + * returns negative errno or offset for next fifo. + */ +static int __init +fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep, + const struct fifo_cfg *cfg, u16 offset) +{ + void __iomem *mbase = musb->mregs; + int size = 0; + u16 maxpacket = cfg->maxpacket; + u16 c_off = offset >> 3; + u8 c_size; + + /* expect hw_ep has already been zero-initialized */ + + size = ffs(max(maxpacket, (u16) 8)) - 1; + maxpacket = 1 << size; + + c_size = size - 3; + if (cfg->mode == BUF_DOUBLE) { + if ((offset + (maxpacket << 1)) > DYN_FIFO_SIZE) + return -EMSGSIZE; + c_size |= MUSB_FIFOSZ_DPB; + } else { + if ((offset + maxpacket) > DYN_FIFO_SIZE) + return -EMSGSIZE; + } + + /* configure the FIFO */ + musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum); + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + /* EP0 reserved endpoint for control, bidirectional; + * EP1 reserved for bulk, two unidirection halves. + */ + if (hw_ep->epnum == 1) + musb->bulk_ep = hw_ep; + /* REVISIT error check: be sure ep0 can both rx and tx ... */ +#endif + switch (cfg->style) { + case FIFO_TX: + musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); + musb_writew(mbase, MUSB_TXFIFOADD, c_off); + hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); + hw_ep->max_packet_sz_tx = maxpacket; + break; + case FIFO_RX: + musb_writeb(mbase, MUSB_RXFIFOSZ, c_size); + musb_writew(mbase, MUSB_RXFIFOADD, c_off); + hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); + hw_ep->max_packet_sz_rx = maxpacket; + break; + case FIFO_RXTX: + musb_writeb(mbase, MUSB_TXFIFOSZ, c_size); + musb_writew(mbase, MUSB_TXFIFOADD, c_off); + hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); + hw_ep->max_packet_sz_rx = maxpacket; + + musb_writeb(mbase, MUSB_RXFIFOSZ, c_size); + musb_writew(mbase, MUSB_RXFIFOADD, c_off); + hw_ep->tx_double_buffered = hw_ep->rx_double_buffered; + hw_ep->max_packet_sz_tx = maxpacket; + + hw_ep->is_shared_fifo = true; + break; + } + + /* NOTE rx and tx endpoint irqs aren't managed separately, + * which happens to be ok + */ + musb->epmask |= (1 << hw_ep->epnum); + + return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0)); +} + +static struct fifo_cfg __initdata ep0_cfg = { + .style = FIFO_RXTX, .maxpacket = 64, +}; + +static int __init ep_config_from_table(struct musb *musb) +{ + const struct fifo_cfg *cfg; + unsigned i, n; + int offset; + struct musb_hw_ep *hw_ep = musb->endpoints; + + switch (fifo_mode) { + default: + fifo_mode = 0; + /* FALLTHROUGH */ + case 0: + cfg = mode_0_cfg; + n = ARRAY_SIZE(mode_0_cfg); + break; + case 1: + cfg = mode_1_cfg; + n = ARRAY_SIZE(mode_1_cfg); + break; + case 2: + cfg = mode_2_cfg; + n = ARRAY_SIZE(mode_2_cfg); + break; + case 3: + cfg = mode_3_cfg; + n = ARRAY_SIZE(mode_3_cfg); + break; + case 4: + cfg = mode_4_cfg; + n = ARRAY_SIZE(mode_4_cfg); + break; + } + + printk(KERN_DEBUG "%s: setup fifo_mode %d\n", + musb_driver_name, fifo_mode); + + + offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0); + /* assert(offset > 0) */ + + /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would + * be better than static MUSB_C_NUM_EPS and DYN_FIFO_SIZE... + */ + + for (i = 0; i < n; i++) { + u8 epn = cfg->hw_ep_num; + + if (epn >= MUSB_C_NUM_EPS) { + pr_debug("%s: invalid ep %d\n", + musb_driver_name, epn); + continue; + } + offset = fifo_setup(musb, hw_ep + epn, cfg++, offset); + if (offset < 0) { + pr_debug("%s: mem overrun, ep %d\n", + musb_driver_name, epn); + return -EINVAL; + } + epn++; + musb->nr_endpoints = max(epn, musb->nr_endpoints); + } + + printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n", + musb_driver_name, + n + 1, MUSB_C_NUM_EPS * 2 - 1, + offset, DYN_FIFO_SIZE); + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + if (!musb->bulk_ep) { + pr_debug("%s: missing bulk\n", musb_driver_name); + return -EINVAL; + } +#endif + + return 0; +} + + +/* + * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false + * @param musb the controller + */ +static int __init ep_config_from_hw(struct musb *musb) +{ + u8 epnum = 0, reg; + struct musb_hw_ep *hw_ep; + void *mbase = musb->mregs; + + DBG(2, "<== static silicon ep config\n"); + + /* FIXME pick up ep0 maxpacket size */ + + for (epnum = 1; epnum < MUSB_C_NUM_EPS; epnum++) { + musb_ep_select(mbase, epnum); + hw_ep = musb->endpoints + epnum; + + /* read from core using indexed model */ + reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE); + if (!reg) { + /* 0's returned when no more endpoints */ + break; + } + musb->nr_endpoints++; + musb->epmask |= (1 << epnum); + + hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f); + + /* shared TX/RX FIFO? */ + if ((reg & 0xf0) == 0xf0) { + hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx; + hw_ep->is_shared_fifo = true; + continue; + } else { + hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4); + hw_ep->is_shared_fifo = false; + } + + /* FIXME set up hw_ep->{rx,tx}_double_buffered */ + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + /* pick an RX/TX endpoint for bulk */ + if (hw_ep->max_packet_sz_tx < 512 + || hw_ep->max_packet_sz_rx < 512) + continue; + + /* REVISIT: this algorithm is lazy, we should at least + * try to pick a double buffered endpoint. + */ + if (musb->bulk_ep) + continue; + musb->bulk_ep = hw_ep; +#endif + } + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + if (!musb->bulk_ep) { + pr_debug("%s: missing bulk\n", musb_driver_name); + return -EINVAL; + } +#endif + + return 0; +} + +enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, }; + +/* Initialize MUSB (M)HDRC part of the USB hardware subsystem; + * configure endpoints, or take their config from silicon + */ +static int __init musb_core_init(u16 musb_type, struct musb *musb) +{ +#ifdef MUSB_AHB_ID + u32 data; +#endif + u8 reg; + char *type; + u16 hwvers, rev_major, rev_minor; + char aInfo[78], aRevision[32], aDate[12]; + void __iomem *mbase = musb->mregs; + int status = 0; + int i; + + /* log core options (read using indexed model) */ + musb_ep_select(mbase, 0); + reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); + + strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); + if (reg & MUSB_CONFIGDATA_DYNFIFO) + strcat(aInfo, ", dyn FIFOs"); + if (reg & MUSB_CONFIGDATA_MPRXE) { + strcat(aInfo, ", bulk combine"); +#ifdef C_MP_RX + musb->bulk_combine = true; +#else + strcat(aInfo, " (X)"); /* no driver support */ +#endif + } + if (reg & MUSB_CONFIGDATA_MPTXE) { + strcat(aInfo, ", bulk split"); +#ifdef C_MP_TX + musb->bulk_split = true; +#else + strcat(aInfo, " (X)"); /* no driver support */ +#endif + } + if (reg & MUSB_CONFIGDATA_HBRXE) { + strcat(aInfo, ", HB-ISO Rx"); + strcat(aInfo, " (X)"); /* no driver support */ + } + if (reg & MUSB_CONFIGDATA_HBTXE) { + strcat(aInfo, ", HB-ISO Tx"); + strcat(aInfo, " (X)"); /* no driver support */ + } + if (reg & MUSB_CONFIGDATA_SOFTCONE) + strcat(aInfo, ", SoftConn"); + + printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", + musb_driver_name, reg, aInfo); + +#ifdef MUSB_AHB_ID + data = musb_readl(mbase, 0x404); + sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff), + (data >> 16) & 0xff, (data >> 24) & 0xff); + /* FIXME ID2 and ID3 are unused */ + data = musb_readl(mbase, 0x408); + printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data); + data = musb_readl(mbase, 0x40c); + printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data); + reg = musb_readb(mbase, 0x400); + musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC; +#else + aDate[0] = 0; +#endif + if (MUSB_CONTROLLER_MHDRC == musb_type) { + musb->is_multipoint = 1; + type = "M"; + } else { + musb->is_multipoint = 0; + type = ""; +#ifdef CONFIG_USB_MUSB_HDRC_HCD +#ifndef CONFIG_USB_OTG_BLACKLIST_HUB + printk(KERN_ERR + "%s: kernel must blacklist external hubs\n", + musb_driver_name); +#endif +#endif + } + + /* log release info */ + hwvers = musb_readw(mbase, MUSB_HWVERS); + rev_major = (hwvers >> 10) & 0x1f; + rev_minor = hwvers & 0x3ff; + snprintf(aRevision, 32, "%d.%d%s", rev_major, + rev_minor, (hwvers & 0x8000) ? "RC" : ""); + printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n", + musb_driver_name, type, aRevision, aDate); + + /* configure ep0 */ + musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; + musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; + + /* discover endpoint configuration */ + musb->nr_endpoints = 1; + musb->epmask = 1; + + if (reg & MUSB_CONFIGDATA_DYNFIFO) { + if (can_dynfifo()) + status = ep_config_from_table(musb); + else { + ERR("reconfigure software for Dynamic FIFOs\n"); + status = -ENODEV; + } + } else { + if (!can_dynfifo()) + status = ep_config_from_hw(musb); + else { + ERR("reconfigure software for static FIFOs\n"); + return -ENODEV; + } + } + + if (status < 0) + return status; + + /* finish init, and print endpoint config */ + for (i = 0; i < musb->nr_endpoints; i++) { + struct musb_hw_ep *hw_ep = musb->endpoints + i; + + hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase; +#ifdef CONFIG_USB_TUSB6010 + hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i); + hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i); + hw_ep->fifo_sync_va = + musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i); + + if (i == 0) + hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF; + else + hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2); +#endif + + hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase; +#ifdef CONFIG_USB_MUSB_HDRC_HCD + hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase; + hw_ep->rx_reinit = 1; + hw_ep->tx_reinit = 1; +#endif + + if (hw_ep->max_packet_sz_tx) { + printk(KERN_DEBUG + "%s: hw_ep %d%s, %smax %d\n", + musb_driver_name, i, + hw_ep->is_shared_fifo ? "shared" : "tx", + hw_ep->tx_double_buffered + ? "doublebuffer, " : "", + hw_ep->max_packet_sz_tx); + } + if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { + printk(KERN_DEBUG + "%s: hw_ep %d%s, %smax %d\n", + musb_driver_name, i, + "rx", + hw_ep->rx_double_buffered + ? "doublebuffer, " : "", + hw_ep->max_packet_sz_rx); + } + if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx)) + DBG(1, "hw_ep %d not configured\n", i); + } + + return 0; +} + +/*-------------------------------------------------------------------------*/ + +#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) + +static irqreturn_t generic_interrupt(int irq, void *__hci) +{ + unsigned long flags; + irqreturn_t retval = IRQ_NONE; + struct musb *musb = __hci; + + spin_lock_irqsave(&musb->lock, flags); + + musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); + musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); + musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); + + if (musb->int_usb || musb->int_tx || musb->int_rx) + retval = musb_interrupt(musb); + + spin_unlock_irqrestore(&musb->lock, flags); + + /* REVISIT we sometimes get spurious IRQs on g_ep0 + * not clear why... + */ + if (retval != IRQ_HANDLED) + DBG(5, "spurious?\n"); + + return IRQ_HANDLED; +} + +#else +#define generic_interrupt NULL +#endif + +/* + * handle all the irqs defined by the HDRC core. for now we expect: other + * irq sources (phy, dma, etc) will be handled first, musb->int_* values + * will be assigned, and the irq will already have been acked. + * + * called in irq context with spinlock held, irqs blocked + */ +irqreturn_t musb_interrupt(struct musb *musb) +{ + irqreturn_t retval = IRQ_NONE; + u8 devctl, power; + int ep_num; + u32 reg; + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + power = musb_readb(musb->mregs, MUSB_POWER); + + DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n", + (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", + musb->int_usb, musb->int_tx, musb->int_rx); + + /* the core can interrupt us for multiple reasons; docs have + * a generic interrupt flowchart to follow + */ + if (musb->int_usb & STAGE0_MASK) + retval |= musb_stage0_irq(musb, musb->int_usb, + devctl, power); + + /* "stage 1" is handling endpoint irqs */ + + /* handle endpoint 0 first */ + if (musb->int_tx & 1) { + if (devctl & MUSB_DEVCTL_HM) + retval |= musb_h_ep0_irq(musb); + else + retval |= musb_g_ep0_irq(musb); + } + + /* RX on endpoints 1-15 */ + reg = musb->int_rx >> 1; + ep_num = 1; + while (reg) { + if (reg & 1) { + /* musb_ep_select(musb->mregs, ep_num); */ + /* REVISIT just retval = ep->rx_irq(...) */ + retval = IRQ_HANDLED; + if (devctl & MUSB_DEVCTL_HM) { + if (is_host_capable()) + musb_host_rx(musb, ep_num); + } else { + if (is_peripheral_capable()) + musb_g_rx(musb, ep_num); + } + } + + reg >>= 1; + ep_num++; + } + + /* TX on endpoints 1-15 */ + reg = musb->int_tx >> 1; + ep_num = 1; + while (reg) { + if (reg & 1) { + /* musb_ep_select(musb->mregs, ep_num); */ + /* REVISIT just retval |= ep->tx_irq(...) */ + retval = IRQ_HANDLED; + if (devctl & MUSB_DEVCTL_HM) { + if (is_host_capable()) + musb_host_tx(musb, ep_num); + } else { + if (is_peripheral_capable()) + musb_g_tx(musb, ep_num); + } + } + reg >>= 1; + ep_num++; + } + + /* finish handling "global" interrupts after handling fifos */ + if (musb->int_usb) + retval |= musb_stage2_irq(musb, + musb->int_usb, devctl, power); + + return retval; +} + + +#ifndef CONFIG_MUSB_PIO_ONLY +static int __initdata use_dma = 1; + +/* "modprobe ... use_dma=0" etc */ +module_param(use_dma, bool, 0); +MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); + +void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) +{ + u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + /* called with controller lock already held */ + + if (!epnum) { +#ifndef CONFIG_USB_TUSB_OMAP_DMA + if (!is_cppi_enabled()) { + /* endpoint 0 */ + if (devctl & MUSB_DEVCTL_HM) + musb_h_ep0_irq(musb); + else + musb_g_ep0_irq(musb); + } +#endif + } else { + /* endpoints 1..15 */ + if (transmit) { + if (devctl & MUSB_DEVCTL_HM) { + if (is_host_capable()) + musb_host_tx(musb, epnum); + } else { + if (is_peripheral_capable()) + musb_g_tx(musb, epnum); + } + } else { + /* receive */ + if (devctl & MUSB_DEVCTL_HM) { + if (is_host_capable()) + musb_host_rx(musb, epnum); + } else { + if (is_peripheral_capable()) + musb_g_rx(musb, epnum); + } + } + } +} + +#else +#define use_dma 0 +#endif + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_SYSFS + +static ssize_t +musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&musb->lock, flags); + ret = sprintf(buf, "%s\n", otg_state_string(musb)); + spin_unlock_irqrestore(&musb->lock, flags); + + return ret; +} + +static ssize_t +musb_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + if (!strncmp(buf, "host", 4)) + musb_platform_set_mode(musb, MUSB_HOST); + if (!strncmp(buf, "peripheral", 10)) + musb_platform_set_mode(musb, MUSB_PERIPHERAL); + if (!strncmp(buf, "otg", 3)) + musb_platform_set_mode(musb, MUSB_OTG); + spin_unlock_irqrestore(&musb->lock, flags); + + return n; +} +static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store); + +static ssize_t +musb_vbus_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + unsigned long val; + + if (sscanf(buf, "%lu", &val) < 1) { + printk(KERN_ERR "Invalid VBUS timeout ms value\n"); + return -EINVAL; + } + + spin_lock_irqsave(&musb->lock, flags); + musb->a_wait_bcon = val; + if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON) + musb->is_active = 0; + musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val)); + spin_unlock_irqrestore(&musb->lock, flags); + + return n; +} + +static ssize_t +musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + unsigned long val; + int vbus; + + spin_lock_irqsave(&musb->lock, flags); + val = musb->a_wait_bcon; + vbus = musb_platform_get_vbus_status(musb); + spin_unlock_irqrestore(&musb->lock, flags); + + return sprintf(buf, "Vbus %s, timeout %lu\n", + vbus ? "on" : "off", val); +} +static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store); + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + +/* Gadget drivers can't know that a host is connected so they might want + * to start SRP, but users can. This allows userspace to trigger SRP. + */ +static ssize_t +musb_srp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + struct musb *musb = dev_to_musb(dev); + unsigned short srp; + + if (sscanf(buf, "%hu", &srp) != 1 + || (srp != 1)) { + printk(KERN_ERR "SRP: Value must be 1\n"); + return -EINVAL; + } + + if (srp == 1) + musb_g_wakeup(musb); + + return n; +} +static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store); + +#endif /* CONFIG_USB_GADGET_MUSB_HDRC */ + +#endif /* sysfs */ + +/* Only used to provide driver mode change events */ +static void musb_irq_work(struct work_struct *data) +{ + struct musb *musb = container_of(data, struct musb, irq_work); + static int old_state; + + if (musb->xceiv.state != old_state) { + old_state = musb->xceiv.state; + sysfs_notify(&musb->controller->kobj, NULL, "mode"); + } +} + +/* -------------------------------------------------------------------------- + * Init support + */ + +static struct musb *__init +allocate_instance(struct device *dev, void __iomem *mbase) +{ + struct musb *musb; + struct musb_hw_ep *ep; + int epnum; +#ifdef CONFIG_USB_MUSB_HDRC_HCD + struct usb_hcd *hcd; + + hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id); + if (!hcd) + return NULL; + /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ + + musb = hcd_to_musb(hcd); + INIT_LIST_HEAD(&musb->control); + INIT_LIST_HEAD(&musb->in_bulk); + INIT_LIST_HEAD(&musb->out_bulk); + + hcd->uses_new_polling = 1; + + musb->vbuserr_retry = VBUSERR_RETRY_COUNT; +#else + musb = kzalloc(sizeof *musb, GFP_KERNEL); + if (!musb) + return NULL; + dev_set_drvdata(dev, musb); + +#endif + + musb->mregs = mbase; + musb->ctrl_base = mbase; + musb->nIrq = -ENODEV; + for (epnum = 0, ep = musb->endpoints; + epnum < MUSB_C_NUM_EPS; + epnum++, ep++) { + + ep->musb = musb; + ep->epnum = epnum; + } + + musb->controller = dev; + return musb; +} + +static void musb_free(struct musb *musb) +{ + /* this has multiple entry modes. it handles fault cleanup after + * probe(), where things may be partially set up, as well as rmmod + * cleanup after everything's been de-activated. + */ + +#ifdef CONFIG_SYSFS + device_remove_file(musb->controller, &dev_attr_mode); + device_remove_file(musb->controller, &dev_attr_vbus); +#ifdef CONFIG_USB_MUSB_OTG + device_remove_file(musb->controller, &dev_attr_srp); +#endif +#endif + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + musb_gadget_cleanup(musb); +#endif + + if (musb->nIrq >= 0) { + disable_irq_wake(musb->nIrq); + free_irq(musb->nIrq, musb); + } + if (is_dma_capable() && musb->dma_controller) { + struct dma_controller *c = musb->dma_controller; + + (void) c->stop(c); + dma_controller_destroy(c); + } + + musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + musb_platform_exit(musb); + musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + + if (musb->clock) { + clk_disable(musb->clock); + clk_put(musb->clock); + } + +#ifdef CONFIG_USB_MUSB_OTG + put_device(musb->xceiv.dev); +#endif + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + usb_put_hcd(musb_to_hcd(musb)); +#else + kfree(musb); +#endif +} + +/* + * Perform generic per-controller initialization. + * + * @pDevice: the controller (already clocked, etc) + * @nIrq: irq + * @mregs: virtual address of controller registers, + * not yet corrected for platform-specific offsets + */ +static int __init +musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) +{ + int status; + struct musb *musb; + struct musb_hdrc_platform_data *plat = dev->platform_data; + + /* The driver might handle more features than the board; OK. + * Fail when the board needs a feature that's not enabled. + */ + if (!plat) { + dev_dbg(dev, "no platform_data?\n"); + return -ENODEV; + } + switch (plat->mode) { + case MUSB_HOST: +#ifdef CONFIG_USB_MUSB_HDRC_HCD + break; +#else + goto bad_config; +#endif + case MUSB_PERIPHERAL: +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + break; +#else + goto bad_config; +#endif + case MUSB_OTG: +#ifdef CONFIG_USB_MUSB_OTG + break; +#else +bad_config: +#endif + default: + dev_err(dev, "incompatible Kconfig role setting\n"); + return -EINVAL; + } + + /* allocate */ + musb = allocate_instance(dev, ctrl); + if (!musb) + return -ENOMEM; + + spin_lock_init(&musb->lock); + musb->board_mode = plat->mode; + musb->board_set_power = plat->set_power; + musb->set_clock = plat->set_clock; + musb->min_power = plat->min_power; + + /* Clock usage is chip-specific ... functional clock (DaVinci, + * OMAP2430), or PHY ref (some TUSB6010 boards). All this core + * code does is make sure a clock handle is available; platform + * code manages it during start/stop and suspend/resume. + */ + if (plat->clock) { + musb->clock = clk_get(dev, plat->clock); + if (IS_ERR(musb->clock)) { + status = PTR_ERR(musb->clock); + musb->clock = NULL; + goto fail; + } + } + + /* assume vbus is off */ + + /* platform adjusts musb->mregs and musb->isr if needed, + * and activates clocks + */ + musb->isr = generic_interrupt; + status = musb_platform_init(musb); + + if (status < 0) + goto fail; + if (!musb->isr) { + status = -ENODEV; + goto fail2; + } + +#ifndef CONFIG_MUSB_PIO_ONLY + if (use_dma && dev->dma_mask) { + struct dma_controller *c; + + c = dma_controller_create(musb, musb->mregs); + musb->dma_controller = c; + if (c) + (void) c->start(c); + } +#endif + /* ideally this would be abstracted in platform setup */ + if (!is_dma_capable() || !musb->dma_controller) + dev->dma_mask = NULL; + + /* be sure interrupts are disabled before connecting ISR */ + musb_platform_disable(musb); + musb_generic_disable(musb); + + /* setup musb parts of the core (especially endpoints) */ + status = musb_core_init(plat->multipoint + ? MUSB_CONTROLLER_MHDRC + : MUSB_CONTROLLER_HDRC, musb); + if (status < 0) + goto fail2; + + /* Init IRQ workqueue before request_irq */ + INIT_WORK(&musb->irq_work, musb_irq_work); + + /* attach to the IRQ */ + if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) { + dev_err(dev, "request_irq %d failed!\n", nIrq); + status = -ENODEV; + goto fail2; + } + musb->nIrq = nIrq; +/* FIXME this handles wakeup irqs wrong */ + if (enable_irq_wake(nIrq) == 0) + device_init_wakeup(dev, 1); + + pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n", + musb_driver_name, + ({char *s; + switch (musb->board_mode) { + case MUSB_HOST: s = "Host"; break; + case MUSB_PERIPHERAL: s = "Peripheral"; break; + default: s = "OTG"; break; + }; s; }), + ctrl, + (is_dma_capable() && musb->dma_controller) + ? "DMA" : "PIO", + musb->nIrq); + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + /* host side needs more setup, except for no-host modes */ + if (musb->board_mode != MUSB_PERIPHERAL) { + struct usb_hcd *hcd = musb_to_hcd(musb); + + if (musb->board_mode == MUSB_OTG) + hcd->self.otg_port = 1; + musb->xceiv.host = &hcd->self; + hcd->power_budget = 2 * (plat->power ? : 250); + } +#endif /* CONFIG_USB_MUSB_HDRC_HCD */ + + /* For the host-only role, we can activate right away. + * (We expect the ID pin to be forcibly grounded!!) + * Otherwise, wait till the gadget driver hooks up. + */ + if (!is_otg_enabled(musb) && is_host_enabled(musb)) { + MUSB_HST_MODE(musb); + musb->xceiv.default_a = 1; + musb->xceiv.state = OTG_STATE_A_IDLE; + + status = usb_add_hcd(musb_to_hcd(musb), -1, 0); + + DBG(1, "%s mode, status %d, devctl %02x %c\n", + "HOST", status, + musb_readb(musb->mregs, MUSB_DEVCTL), + (musb_readb(musb->mregs, MUSB_DEVCTL) + & MUSB_DEVCTL_BDEVICE + ? 'B' : 'A')); + + } else /* peripheral is enabled */ { + MUSB_DEV_MODE(musb); + musb->xceiv.default_a = 0; + musb->xceiv.state = OTG_STATE_B_IDLE; + + status = musb_gadget_setup(musb); + + DBG(1, "%s mode, status %d, dev%02x\n", + is_otg_enabled(musb) ? "OTG" : "PERIPHERAL", + status, + musb_readb(musb->mregs, MUSB_DEVCTL)); + + } + + if (status == 0) + musb_debug_create("driver/musb_hdrc", musb); + else { +fail: + if (musb->clock) + clk_put(musb->clock); + device_init_wakeup(dev, 0); + musb_free(musb); + return status; + } + +#ifdef CONFIG_SYSFS + status = device_create_file(dev, &dev_attr_mode); + status = device_create_file(dev, &dev_attr_vbus); +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + status = device_create_file(dev, &dev_attr_srp); +#endif /* CONFIG_USB_GADGET_MUSB_HDRC */ + status = 0; +#endif + + return status; + +fail2: + musb_platform_exit(musb); + goto fail; +} + +/*-------------------------------------------------------------------------*/ + +/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just + * bridge to a platform device; this driver then suffices. + */ + +#ifndef CONFIG_MUSB_PIO_ONLY +static u64 *orig_dma_mask; +#endif + +static int __init musb_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int irq = platform_get_irq(pdev, 0); + struct resource *iomem; + void __iomem *base; + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iomem || irq == 0) + return -ENODEV; + + base = ioremap(iomem->start, iomem->end - iomem->start + 1); + if (!base) { + dev_err(dev, "ioremap failed\n"); + return -ENOMEM; + } + +#ifndef CONFIG_MUSB_PIO_ONLY + /* clobbered by use_dma=n */ + orig_dma_mask = dev->dma_mask; +#endif + return musb_init_controller(dev, irq, base); +} + +static int __devexit musb_remove(struct platform_device *pdev) +{ + struct musb *musb = dev_to_musb(&pdev->dev); + void __iomem *ctrl_base = musb->ctrl_base; + + /* this gets called on rmmod. + * - Host mode: host may still be active + * - Peripheral mode: peripheral is deactivated (or never-activated) + * - OTG mode: both roles are deactivated (or never-activated) + */ + musb_shutdown(pdev); + musb_debug_delete("driver/musb_hdrc", musb); +#ifdef CONFIG_USB_MUSB_HDRC_HCD + if (musb->board_mode == MUSB_HOST) + usb_remove_hcd(musb_to_hcd(musb)); +#endif + musb_free(musb); + iounmap(ctrl_base); + device_init_wakeup(&pdev->dev, 0); +#ifndef CONFIG_MUSB_PIO_ONLY + pdev->dev.dma_mask = orig_dma_mask; +#endif + return 0; +} + +#ifdef CONFIG_PM + +static int musb_suspend(struct platform_device *pdev, pm_message_t message) +{ + unsigned long flags; + struct musb *musb = dev_to_musb(&pdev->dev); + + if (!musb->clock) + return 0; + + spin_lock_irqsave(&musb->lock, flags); + + if (is_peripheral_active(musb)) { + /* FIXME force disconnect unless we know USB will wake + * the system up quickly enough to respond ... + */ + } else if (is_host_active(musb)) { + /* we know all the children are suspended; sometimes + * they will even be wakeup-enabled. + */ + } + + if (musb->set_clock) + musb->set_clock(musb->clock, 0); + else + clk_disable(musb->clock); + spin_unlock_irqrestore(&musb->lock, flags); + return 0; +} + +static int musb_resume(struct platform_device *pdev) +{ + unsigned long flags; + struct musb *musb = dev_to_musb(&pdev->dev); + + if (!musb->clock) + return 0; + + spin_lock_irqsave(&musb->lock, flags); + + if (musb->set_clock) + musb->set_clock(musb->clock, 1); + else + clk_enable(musb->clock); + + /* for static cmos like DaVinci, register values were preserved + * unless for some reason the whole soc powered down and we're + * not treating that as a whole-system restart (e.g. swsusp) + */ + spin_unlock_irqrestore(&musb->lock, flags); + return 0; +} + +#else +#define musb_suspend NULL +#define musb_resume NULL +#endif + +static struct platform_driver musb_driver = { + .driver = { + .name = (char *)musb_driver_name, + .bus = &platform_bus_type, + .owner = THIS_MODULE, + }, + .remove = __devexit_p(musb_remove), + .shutdown = musb_shutdown, + .suspend = musb_suspend, + .resume = musb_resume, +}; + +/*-------------------------------------------------------------------------*/ + +static int __init musb_init(void) +{ +#ifdef CONFIG_USB_MUSB_HDRC_HCD + if (usb_disabled()) + return 0; +#endif + + pr_info("%s: version " MUSB_VERSION ", " +#ifdef CONFIG_MUSB_PIO_ONLY + "pio" +#elif defined(CONFIG_USB_TI_CPPI_DMA) + "cppi-dma" +#elif defined(CONFIG_USB_INVENTRA_DMA) + "musb-dma" +#elif defined(CONFIG_USB_TUSB_OMAP_DMA) + "tusb-omap-dma" +#else + "?dma?" +#endif + ", " +#ifdef CONFIG_USB_MUSB_OTG + "otg (peripheral+host)" +#elif defined(CONFIG_USB_GADGET_MUSB_HDRC) + "peripheral" +#elif defined(CONFIG_USB_MUSB_HDRC_HCD) + "host" +#endif + ", debug=%d\n", + musb_driver_name, debug); + return platform_driver_probe(&musb_driver, musb_probe); +} + +/* make us init after usbcore and before usb + * gadget and host-side drivers start to register + */ +subsys_initcall(musb_init); + +static void __exit musb_cleanup(void) +{ + platform_driver_unregister(&musb_driver); +} +module_exit(musb_cleanup); diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h new file mode 100644 index 00000000000..90035c12ab5 --- /dev/null +++ b/drivers/usb/musb/musb_core.h @@ -0,0 +1,517 @@ +/* + * MUSB OTG driver defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_CORE_H__ +#define __MUSB_CORE_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct musb; +struct musb_hw_ep; +struct musb_ep; + + +#include "musb_debug.h" +#include "musb_dma.h" + +#ifdef CONFIG_USB_MUSB_SOC +/* + * Get core configuration from a header converted (by cfg_conv) + * from the Verilog config file generated by the core config utility + * + * For now we assume that header is provided along with other + * arch-specific files. Discrete chips will need a build tweak. + * So will using AHB IDs from silicon that provides them. + */ +#include +#endif + +#include "musb_io.h" +#include "musb_regs.h" + +#include "musb_gadget.h" +#include "../core/hcd.h" +#include "musb_host.h" + + + +#ifdef CONFIG_USB_MUSB_OTG + +#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST) +#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL) +#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG) + +/* NOTE: otg and peripheral-only state machines start at B_IDLE. + * OTG or host-only go to A_IDLE when ID is sensed. + */ +#define is_peripheral_active(m) (!(m)->is_host) +#define is_host_active(m) ((m)->is_host) + +#else +#define is_peripheral_enabled(musb) is_peripheral_capable() +#define is_host_enabled(musb) is_host_capable() +#define is_otg_enabled(musb) 0 + +#define is_peripheral_active(musb) is_peripheral_capable() +#define is_host_active(musb) is_host_capable() +#endif + +#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL) +/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always + * override that choice selection (often USB_GADGET_DUMMY_HCD). + */ +#ifndef CONFIG_USB_GADGET_MUSB_HDRC +#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC +#endif +#endif /* need MUSB gadget selection */ + + +#ifdef CONFIG_PROC_FS +#include +#define MUSB_CONFIG_PROC_FS +#endif + +/****************************** PERIPHERAL ROLE *****************************/ + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + +#define is_peripheral_capable() (1) + +extern irqreturn_t musb_g_ep0_irq(struct musb *); +extern void musb_g_tx(struct musb *, u8); +extern void musb_g_rx(struct musb *, u8); +extern void musb_g_reset(struct musb *); +extern void musb_g_suspend(struct musb *); +extern void musb_g_resume(struct musb *); +extern void musb_g_wakeup(struct musb *); +extern void musb_g_disconnect(struct musb *); + +#else + +#define is_peripheral_capable() (0) + +static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; } +static inline void musb_g_reset(struct musb *m) {} +static inline void musb_g_suspend(struct musb *m) {} +static inline void musb_g_resume(struct musb *m) {} +static inline void musb_g_wakeup(struct musb *m) {} +static inline void musb_g_disconnect(struct musb *m) {} + +#endif + +/****************************** HOST ROLE ***********************************/ + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + +#define is_host_capable() (1) + +extern irqreturn_t musb_h_ep0_irq(struct musb *); +extern void musb_host_tx(struct musb *, u8); +extern void musb_host_rx(struct musb *, u8); + +#else + +#define is_host_capable() (0) + +static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; } +static inline void musb_host_tx(struct musb *m, u8 e) {} +static inline void musb_host_rx(struct musb *m, u8 e) {} + +#endif + + +/****************************** CONSTANTS ********************************/ + +#ifndef MUSB_C_NUM_EPS +#define MUSB_C_NUM_EPS ((u8)16) +#endif + +#ifndef MUSB_MAX_END0_PACKET +#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE) +#endif + +/* host side ep0 states */ +enum musb_h_ep0_state { + MUSB_EP0_IDLE, + MUSB_EP0_START, /* expect ack of setup */ + MUSB_EP0_IN, /* expect IN DATA */ + MUSB_EP0_OUT, /* expect ack of OUT DATA */ + MUSB_EP0_STATUS, /* expect ack of STATUS */ +} __attribute__ ((packed)); + +/* peripheral side ep0 states */ +enum musb_g_ep0_state { + MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */ + MUSB_EP0_STAGE_TX, /* IN data */ + MUSB_EP0_STAGE_RX, /* OUT data */ + MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */ + MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */ + MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */ +} __attribute__ ((packed)); + +/* OTG protocol constants */ +#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */ +#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */ +#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */ + +/*************************** REGISTER ACCESS ********************************/ + +/* Endpoint registers (other than dynfifo setup) can be accessed either + * directly with the "flat" model, or after setting up an index register. + */ + +#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \ + || defined(CONFIG_ARCH_OMAP3430) +/* REVISIT indexed access seemed to + * misbehave (on DaVinci) for at least peripheral IN ... + */ +#define MUSB_FLAT_REG +#endif + +/* TUSB mapping: "flat" plus ep0 special cases */ +#if defined(CONFIG_USB_TUSB6010) +#define musb_ep_select(_mbase, _epnum) \ + musb_writeb((_mbase), MUSB_INDEX, (_epnum)) +#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET + +/* "flat" mapping: each endpoint has its own i/o address */ +#elif defined(MUSB_FLAT_REG) +#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum))) +#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET + +/* "indexed" mapping: INDEX register controls register bank select */ +#else +#define musb_ep_select(_mbase, _epnum) \ + musb_writeb((_mbase), MUSB_INDEX, (_epnum)) +#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET +#endif + +/****************************** FUNCTIONS ********************************/ + +#define MUSB_HST_MODE(_musb)\ + { (_musb)->is_host = true; } +#define MUSB_DEV_MODE(_musb) \ + { (_musb)->is_host = false; } + +#define test_devctl_hst_mode(_x) \ + (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM) + +#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral") + +/******************************** TYPES *************************************/ + +/* + * struct musb_hw_ep - endpoint hardware (bidirectional) + * + * Ordered slightly for better cacheline locality. + */ +struct musb_hw_ep { + struct musb *musb; + void __iomem *fifo; + void __iomem *regs; + +#ifdef CONFIG_USB_TUSB6010 + void __iomem *conf; +#endif + + /* index in musb->endpoints[] */ + u8 epnum; + + /* hardware configuration, possibly dynamic */ + bool is_shared_fifo; + bool tx_double_buffered; + bool rx_double_buffered; + u16 max_packet_sz_tx; + u16 max_packet_sz_rx; + + struct dma_channel *tx_channel; + struct dma_channel *rx_channel; + +#ifdef CONFIG_USB_TUSB6010 + /* TUSB has "asynchronous" and "synchronous" dma modes */ + dma_addr_t fifo_async; + dma_addr_t fifo_sync; + void __iomem *fifo_sync_va; +#endif + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + void __iomem *target_regs; + + /* currently scheduled peripheral endpoint */ + struct musb_qh *in_qh; + struct musb_qh *out_qh; + + u8 rx_reinit; + u8 tx_reinit; +#endif + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + /* peripheral side */ + struct musb_ep ep_in; /* TX */ + struct musb_ep ep_out; /* RX */ +#endif +}; + +static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep) +{ +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + return next_request(&hw_ep->ep_in); +#else + return NULL; +#endif +} + +static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep) +{ +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + return next_request(&hw_ep->ep_out); +#else + return NULL; +#endif +} + +/* + * struct musb - Driver instance data. + */ +struct musb { + /* device lock */ + spinlock_t lock; + struct clk *clock; + irqreturn_t (*isr)(int, void *); + struct work_struct irq_work; + +/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ +#define MUSB_PORT_STAT_RESUME (1 << 31) + + u32 port1_status; + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + unsigned long rh_timer; + + enum musb_h_ep0_state ep0_stage; + + /* bulk traffic normally dedicates endpoint hardware, and each + * direction has its own ring of host side endpoints. + * we try to progress the transfer at the head of each endpoint's + * queue until it completes or NAKs too much; then we try the next + * endpoint. + */ + struct musb_hw_ep *bulk_ep; + + struct list_head control; /* of musb_qh */ + struct list_head in_bulk; /* of musb_qh */ + struct list_head out_bulk; /* of musb_qh */ + struct musb_qh *periodic[32]; /* tree of interrupt+iso */ +#endif + + /* called with IRQs blocked; ON/nonzero implies starting a session, + * and waiting at least a_wait_vrise_tmout. + */ + void (*board_set_vbus)(struct musb *, int is_on); + + struct dma_controller *dma_controller; + + struct device *controller; + void __iomem *ctrl_base; + void __iomem *mregs; + +#ifdef CONFIG_USB_TUSB6010 + dma_addr_t async; + dma_addr_t sync; + void __iomem *sync_va; +#endif + + /* passed down from chip/board specific irq handlers */ + u8 int_usb; + u16 int_rx; + u16 int_tx; + + struct otg_transceiver xceiv; + + int nIrq; + + struct musb_hw_ep endpoints[MUSB_C_NUM_EPS]; +#define control_ep endpoints + +#define VBUSERR_RETRY_COUNT 3 + u16 vbuserr_retry; + u16 epmask; + u8 nr_endpoints; + + u8 board_mode; /* enum musb_mode */ + int (*board_set_power)(int state); + + int (*set_clock)(struct clk *clk, int is_active); + + u8 min_power; /* vbus for periph, in mA/2 */ + + bool is_host; + + int a_wait_bcon; /* VBUS timeout in msecs */ + unsigned long idle_timeout; /* Next timeout in jiffies */ + + /* active means connected and not suspended */ + unsigned is_active:1; + + unsigned is_multipoint:1; + unsigned ignore_disconnect:1; /* during bus resets */ + +#ifdef C_MP_TX + unsigned bulk_split:1; +#define can_bulk_split(musb,type) \ + (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) +#else +#define can_bulk_split(musb, type) 0 +#endif + +#ifdef C_MP_RX + unsigned bulk_combine:1; +#define can_bulk_combine(musb,type) \ + (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) +#else +#define can_bulk_combine(musb, type) 0 +#endif + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + /* is_suspended means USB B_PERIPHERAL suspend */ + unsigned is_suspended:1; + + /* may_wakeup means remote wakeup is enabled */ + unsigned may_wakeup:1; + + /* is_self_powered is reported in device status and the + * config descriptor. is_bus_powered means B_PERIPHERAL + * draws some VBUS current; both can be true. + */ + unsigned is_self_powered:1; + unsigned is_bus_powered:1; + + unsigned set_address:1; + unsigned test_mode:1; + unsigned softconnect:1; + + u8 address; + u8 test_mode_nr; + u16 ackpend; /* ep0 */ + enum musb_g_ep0_state ep0_state; + struct usb_gadget g; /* the gadget */ + struct usb_gadget_driver *gadget_driver; /* its driver */ +#endif + +#ifdef MUSB_CONFIG_PROC_FS + struct proc_dir_entry *proc_entry; +#endif +}; + +static inline void musb_set_vbus(struct musb *musb, int is_on) +{ + musb->board_set_vbus(musb, is_on); +} + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC +static inline struct musb *gadget_to_musb(struct usb_gadget *g) +{ + return container_of(g, struct musb, g); +} +#endif + + +/***************************** Glue it together *****************************/ + +extern const char musb_driver_name[]; + +extern void musb_start(struct musb *musb); +extern void musb_stop(struct musb *musb); + +extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); +extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst); + +extern void musb_load_testpacket(struct musb *); + +extern irqreturn_t musb_interrupt(struct musb *); + +extern void musb_platform_enable(struct musb *musb); +extern void musb_platform_disable(struct musb *musb); + +extern void musb_hnp_stop(struct musb *musb); + +extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode); + +#if defined(CONFIG_USB_TUSB6010) || \ + defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) +extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout); +#else +#define musb_platform_try_idle(x, y) do {} while (0) +#endif + +#ifdef CONFIG_USB_TUSB6010 +extern int musb_platform_get_vbus_status(struct musb *musb); +#else +#define musb_platform_get_vbus_status(x) 0 +#endif + +extern int __init musb_platform_init(struct musb *musb); +extern int musb_platform_exit(struct musb *musb); + +/*-------------------------- ProcFS definitions ---------------------*/ + +struct proc_dir_entry; + +#if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS) +extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data); +extern void musb_debug_delete(char *name, struct musb *data); + +#else +static inline struct proc_dir_entry * +musb_debug_create(char *name, struct musb *data) +{ + return NULL; +} +static inline void musb_debug_delete(char *name, struct musb *data) +{ +} +#endif + +#endif /* __MUSB_CORE_H__ */ diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h new file mode 100644 index 00000000000..3bdb311e820 --- /dev/null +++ b/drivers/usb/musb/musb_debug.h @@ -0,0 +1,66 @@ +/* + * MUSB OTG driver debug defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_LINUX_DEBUG_H__ +#define __MUSB_LINUX_DEBUG_H__ + +#define yprintk(facility, format, args...) \ + do { printk(facility "%s %d: " format , \ + __func__, __LINE__ , ## args); } while (0) +#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args) +#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args) +#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args) + +#define xprintk(level, facility, format, args...) do { \ + if (_dbg_level(level)) { \ + printk(facility "%s %d: " format , \ + __func__, __LINE__ , ## args); \ + } } while (0) + +#if MUSB_DEBUG > 0 +extern unsigned debug; +#else +#define debug 0 +#endif + +static inline int _dbg_level(unsigned l) +{ + return debug >= l; +} + +#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args) + +extern const char *otg_state_string(struct musb *); + +#endif /* __MUSB_LINUX_DEBUG_H__ */ diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h new file mode 100644 index 00000000000..0a2c4e3602c --- /dev/null +++ b/drivers/usb/musb/musb_dma.h @@ -0,0 +1,172 @@ +/* + * MUSB OTG driver DMA controller abstraction + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_DMA_H__ +#define __MUSB_DMA_H__ + +struct musb_hw_ep; + +/* + * DMA Controller Abstraction + * + * DMA Controllers are abstracted to allow use of a variety of different + * implementations of DMA, as allowed by the Inventra USB cores. On the + * host side, usbcore sets up the DMA mappings and flushes caches; on the + * peripheral side, the gadget controller driver does. Responsibilities + * of a DMA controller driver include: + * + * - Handling the details of moving multiple USB packets + * in cooperation with the Inventra USB core, including especially + * the correct RX side treatment of short packets and buffer-full + * states (both of which terminate transfers). + * + * - Knowing the correlation between dma channels and the + * Inventra core's local endpoint resources and data direction. + * + * - Maintaining a list of allocated/available channels. + * + * - Updating channel status on interrupts, + * whether shared with the Inventra core or separate. + */ + +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + +#ifndef CONFIG_MUSB_PIO_ONLY +#define is_dma_capable() (1) +#else +#define is_dma_capable() (0) +#endif + +#ifdef CONFIG_USB_TI_CPPI_DMA +#define is_cppi_enabled() 1 +#else +#define is_cppi_enabled() 0 +#endif + +#ifdef CONFIG_USB_TUSB_OMAP_DMA +#define tusb_dma_omap() 1 +#else +#define tusb_dma_omap() 0 +#endif + +/* + * DMA channel status ... updated by the dma controller driver whenever that + * status changes, and protected by the overall controller spinlock. + */ +enum dma_channel_status { + /* unallocated */ + MUSB_DMA_STATUS_UNKNOWN, + /* allocated ... but not busy, no errors */ + MUSB_DMA_STATUS_FREE, + /* busy ... transactions are active */ + MUSB_DMA_STATUS_BUSY, + /* transaction(s) aborted due to ... dma or memory bus error */ + MUSB_DMA_STATUS_BUS_ABORT, + /* transaction(s) aborted due to ... core error or USB fault */ + MUSB_DMA_STATUS_CORE_ABORT +}; + +struct dma_controller; + +/** + * struct dma_channel - A DMA channel. + * @private_data: channel-private data + * @max_len: the maximum number of bytes the channel can move in one + * transaction (typically representing many USB maximum-sized packets) + * @actual_len: how many bytes have been transferred + * @status: current channel status (updated e.g. on interrupt) + * @desired_mode: true if mode 1 is desired; false if mode 0 is desired + * + * channels are associated with an endpoint for the duration of at least + * one usb transfer. + */ +struct dma_channel { + void *private_data; + /* FIXME not void* private_data, but a dma_controller * */ + size_t max_len; + size_t actual_len; + enum dma_channel_status status; + bool desired_mode; +}; + +/* + * dma_channel_status - return status of dma channel + * @c: the channel + * + * Returns the software's view of the channel status. If that status is BUSY + * then it's possible that the hardware has completed (or aborted) a transfer, + * so the driver needs to update that status. + */ +static inline enum dma_channel_status +dma_channel_status(struct dma_channel *c) +{ + return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN; +} + +/** + * struct dma_controller - A DMA Controller. + * @start: call this to start a DMA controller; + * return 0 on success, else negative errno + * @stop: call this to stop a DMA controller + * return 0 on success, else negative errno + * @channel_alloc: call this to allocate a DMA channel + * @channel_release: call this to release a DMA channel + * @channel_abort: call this to abort a pending DMA transaction, + * returning it to FREE (but allocated) state + * + * Controllers manage dma channels. + */ +struct dma_controller { + int (*start)(struct dma_controller *); + int (*stop)(struct dma_controller *); + struct dma_channel *(*channel_alloc)(struct dma_controller *, + struct musb_hw_ep *, u8 is_tx); + void (*channel_release)(struct dma_channel *); + int (*channel_program)(struct dma_channel *channel, + u16 maxpacket, u8 mode, + dma_addr_t dma_addr, + u32 length); + int (*channel_abort)(struct dma_channel *); +}; + +/* called after channel_program(), may indicate a fault */ +extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit); + + +extern struct dma_controller *__init +dma_controller_create(struct musb *, void __iomem *); + +extern void dma_controller_destroy(struct dma_controller *); + +#endif /* __MUSB_DMA_H__ */ diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c new file mode 100644 index 00000000000..b3773f13ee0 --- /dev/null +++ b/drivers/usb/musb/musb_gadget.c @@ -0,0 +1,2033 @@ +/* + * MUSB OTG driver peripheral support + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "musb_core.h" + + +/* MUSB PERIPHERAL status 3-mar-2006: + * + * - EP0 seems solid. It passes both USBCV and usbtest control cases. + * Minor glitches: + * + * + remote wakeup to Linux hosts work, but saw USBCV failures; + * in one test run (operator error?) + * + endpoint halt tests -- in both usbtest and usbcv -- seem + * to break when dma is enabled ... is something wrongly + * clearing SENDSTALL? + * + * - Mass storage behaved ok when last tested. Network traffic patterns + * (with lots of short transfers etc) need retesting; they turn up the + * worst cases of the DMA, since short packets are typical but are not + * required. + * + * - TX/IN + * + both pio and dma behave in with network and g_zero tests + * + no cppi throughput issues other than no-hw-queueing + * + failed with FLAT_REG (DaVinci) + * + seems to behave with double buffering, PIO -and- CPPI + * + with gadgetfs + AIO, requests got lost? + * + * - RX/OUT + * + both pio and dma behave in with network and g_zero tests + * + dma is slow in typical case (short_not_ok is clear) + * + double buffering ok with PIO + * + double buffering *FAILS* with CPPI, wrong data bytes sometimes + * + request lossage observed with gadgetfs + * + * - ISO not tested ... might work, but only weakly isochronous + * + * - Gadget driver disabling of softconnect during bind() is ignored; so + * drivers can't hold off host requests until userspace is ready. + * (Workaround: they can turn it off later.) + * + * - PORTABILITY (assumes PIO works): + * + DaVinci, basically works with cppi dma + * + OMAP 2430, ditto with mentor dma + * + TUSB 6010, platform-specific dma in the works + */ + +/* ----------------------------------------------------------------------- */ + +/* + * Immediately complete a request. + * + * @param request the request to complete + * @param status the status to complete the request with + * Context: controller locked, IRQs blocked. + */ +void musb_g_giveback( + struct musb_ep *ep, + struct usb_request *request, + int status) +__releases(ep->musb->lock) +__acquires(ep->musb->lock) +{ + struct musb_request *req; + struct musb *musb; + int busy = ep->busy; + + req = to_musb_request(request); + + list_del(&request->list); + if (req->request.status == -EINPROGRESS) + req->request.status = status; + musb = req->musb; + + ep->busy = 1; + spin_unlock(&musb->lock); + if (is_dma_capable()) { + if (req->mapped) { + dma_unmap_single(musb->controller, + req->request.dma, + req->request.length, + req->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + req->request.dma = DMA_ADDR_INVALID; + req->mapped = 0; + } else if (req->request.dma != DMA_ADDR_INVALID) + dma_sync_single_for_cpu(musb->controller, + req->request.dma, + req->request.length, + req->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + } + if (request->status == 0) + DBG(5, "%s done request %p, %d/%d\n", + ep->end_point.name, request, + req->request.actual, req->request.length); + else + DBG(2, "%s request %p, %d/%d fault %d\n", + ep->end_point.name, request, + req->request.actual, req->request.length, + request->status); + req->request.complete(&req->ep->end_point, &req->request); + spin_lock(&musb->lock); + ep->busy = busy; +} + +/* ----------------------------------------------------------------------- */ + +/* + * Abort requests queued to an endpoint using the status. Synchronous. + * caller locked controller and blocked irqs, and selected this ep. + */ +static void nuke(struct musb_ep *ep, const int status) +{ + struct musb_request *req = NULL; + void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; + + ep->busy = 1; + + if (is_dma_capable() && ep->dma) { + struct dma_controller *c = ep->musb->dma_controller; + int value; + if (ep->is_in) { + musb_writew(epio, MUSB_TXCSR, + 0 | MUSB_TXCSR_FLUSHFIFO); + musb_writew(epio, MUSB_TXCSR, + 0 | MUSB_TXCSR_FLUSHFIFO); + } else { + musb_writew(epio, MUSB_RXCSR, + 0 | MUSB_RXCSR_FLUSHFIFO); + musb_writew(epio, MUSB_RXCSR, + 0 | MUSB_RXCSR_FLUSHFIFO); + } + + value = c->channel_abort(ep->dma); + DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value); + c->channel_release(ep->dma); + ep->dma = NULL; + } + + while (!list_empty(&(ep->req_list))) { + req = container_of(ep->req_list.next, struct musb_request, + request.list); + musb_g_giveback(ep, &req->request, status); + } +} + +/* ----------------------------------------------------------------------- */ + +/* Data transfers - pure PIO, pure DMA, or mixed mode */ + +/* + * This assumes the separate CPPI engine is responding to DMA requests + * from the usb core ... sequenced a bit differently from mentor dma. + */ + +static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) +{ + if (can_bulk_split(musb, ep->type)) + return ep->hw_ep->max_packet_sz_tx; + else + return ep->packet_sz; +} + + +#ifdef CONFIG_USB_INVENTRA_DMA + +/* Peripheral tx (IN) using Mentor DMA works as follows: + Only mode 0 is used for transfers <= wPktSize, + mode 1 is used for larger transfers, + + One of the following happens: + - Host sends IN token which causes an endpoint interrupt + -> TxAvail + -> if DMA is currently busy, exit. + -> if queue is non-empty, txstate(). + + - Request is queued by the gadget driver. + -> if queue was previously empty, txstate() + + txstate() + -> start + /\ -> setup DMA + | (data is transferred to the FIFO, then sent out when + | IN token(s) are recd from Host. + | -> DMA interrupt on completion + | calls TxAvail. + | -> stop DMA, ~DmaEenab, + | -> set TxPktRdy for last short pkt or zlp + | -> Complete Request + | -> Continue next request (call txstate) + |___________________________________| + + * Non-Mentor DMA engines can of course work differently, such as by + * upleveling from irq-per-packet to irq-per-buffer. + */ + +#endif + +/* + * An endpoint is transmitting data. This can be called either from + * the IRQ routine or from ep.queue() to kickstart a request on an + * endpoint. + * + * Context: controller locked, IRQs blocked, endpoint selected + */ +static void txstate(struct musb *musb, struct musb_request *req) +{ + u8 epnum = req->epnum; + struct musb_ep *musb_ep; + void __iomem *epio = musb->endpoints[epnum].regs; + struct usb_request *request; + u16 fifo_count = 0, csr; + int use_dma = 0; + + musb_ep = req->ep; + + /* we shouldn't get here while DMA is active ... but we do ... */ + if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { + DBG(4, "dma pending...\n"); + return; + } + + /* read TXCSR before */ + csr = musb_readw(epio, MUSB_TXCSR); + + request = &req->request; + fifo_count = min(max_ep_writesize(musb, musb_ep), + (int)(request->length - request->actual)); + + if (csr & MUSB_TXCSR_TXPKTRDY) { + DBG(5, "%s old packet still ready , txcsr %03x\n", + musb_ep->end_point.name, csr); + return; + } + + if (csr & MUSB_TXCSR_P_SENDSTALL) { + DBG(5, "%s stalling, txcsr %03x\n", + musb_ep->end_point.name, csr); + return; + } + + DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", + epnum, musb_ep->packet_sz, fifo_count, + csr); + +#ifndef CONFIG_MUSB_PIO_ONLY + if (is_dma_capable() && musb_ep->dma) { + struct dma_controller *c = musb->dma_controller; + + use_dma = (request->dma != DMA_ADDR_INVALID); + + /* MUSB_TXCSR_P_ISO is still set correctly */ + +#ifdef CONFIG_USB_INVENTRA_DMA + { + size_t request_size; + + /* setup DMA, then program endpoint CSR */ + request_size = min(request->length, + musb_ep->dma->max_len); + if (request_size <= musb_ep->packet_sz) + musb_ep->dma->desired_mode = 0; + else + musb_ep->dma->desired_mode = 1; + + use_dma = use_dma && c->channel_program( + musb_ep->dma, musb_ep->packet_sz, + musb_ep->dma->desired_mode, + request->dma, request_size); + if (use_dma) { + if (musb_ep->dma->desired_mode == 0) { + /* ASSERT: DMAENAB is clear */ + csr &= ~(MUSB_TXCSR_AUTOSET | + MUSB_TXCSR_DMAMODE); + csr |= (MUSB_TXCSR_DMAENAB | + MUSB_TXCSR_MODE); + /* against programming guide */ + } else + csr |= (MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_DMAMODE + | MUSB_TXCSR_MODE); + + csr &= ~MUSB_TXCSR_P_UNDERRUN; + musb_writew(epio, MUSB_TXCSR, csr); + } + } + +#elif defined(CONFIG_USB_TI_CPPI_DMA) + /* program endpoint CSR first, then setup DMA */ + csr &= ~(MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAMODE + | MUSB_TXCSR_P_UNDERRUN + | MUSB_TXCSR_TXPKTRDY); + csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; + musb_writew(epio, MUSB_TXCSR, + (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) + | csr); + + /* ensure writebuffer is empty */ + csr = musb_readw(epio, MUSB_TXCSR); + + /* NOTE host side sets DMAENAB later than this; both are + * OK since the transfer dma glue (between CPPI and Mentor + * fifos) just tells CPPI it could start. Data only moves + * to the USB TX fifo when both fifos are ready. + */ + + /* "mode" is irrelevant here; handle terminating ZLPs like + * PIO does, since the hardware RNDIS mode seems unreliable + * except for the last-packet-is-already-short case. + */ + use_dma = use_dma && c->channel_program( + musb_ep->dma, musb_ep->packet_sz, + 0, + request->dma, + request->length); + if (!use_dma) { + c->channel_release(musb_ep->dma); + musb_ep->dma = NULL; + /* ASSERT: DMAENAB clear */ + csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); + /* invariant: prequest->buf is non-null */ + } +#elif defined(CONFIG_USB_TUSB_OMAP_DMA) + use_dma = use_dma && c->channel_program( + musb_ep->dma, musb_ep->packet_sz, + request->zero, + request->dma, + request->length); +#endif + } +#endif + + if (!use_dma) { + musb_write_fifo(musb_ep->hw_ep, fifo_count, + (u8 *) (request->buf + request->actual)); + request->actual += fifo_count; + csr |= MUSB_TXCSR_TXPKTRDY; + csr &= ~MUSB_TXCSR_P_UNDERRUN; + musb_writew(epio, MUSB_TXCSR, csr); + } + + /* host may already have the data when this message shows... */ + DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", + musb_ep->end_point.name, use_dma ? "dma" : "pio", + request->actual, request->length, + musb_readw(epio, MUSB_TXCSR), + fifo_count, + musb_readw(epio, MUSB_TXMAXP)); +} + +/* + * FIFO state update (e.g. data ready). + * Called from IRQ, with controller locked. + */ +void musb_g_tx(struct musb *musb, u8 epnum) +{ + u16 csr; + struct usb_request *request; + u8 __iomem *mbase = musb->mregs; + struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; + void __iomem *epio = musb->endpoints[epnum].regs; + struct dma_channel *dma; + + musb_ep_select(mbase, epnum); + request = next_request(musb_ep); + + csr = musb_readw(epio, MUSB_TXCSR); + DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); + + dma = is_dma_capable() ? musb_ep->dma : NULL; + do { + /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX + * probably rates reporting as a host error + */ + if (csr & MUSB_TXCSR_P_SENTSTALL) { + csr |= MUSB_TXCSR_P_WZC_BITS; + csr &= ~MUSB_TXCSR_P_SENTSTALL; + musb_writew(epio, MUSB_TXCSR, csr); + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dma->status = MUSB_DMA_STATUS_CORE_ABORT; + musb->dma_controller->channel_abort(dma); + } + + if (request) + musb_g_giveback(musb_ep, request, -EPIPE); + + break; + } + + if (csr & MUSB_TXCSR_P_UNDERRUN) { + /* we NAKed, no big deal ... little reason to care */ + csr |= MUSB_TXCSR_P_WZC_BITS; + csr &= ~(MUSB_TXCSR_P_UNDERRUN + | MUSB_TXCSR_TXPKTRDY); + musb_writew(epio, MUSB_TXCSR, csr); + DBG(20, "underrun on ep%d, req %p\n", epnum, request); + } + + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + /* SHOULD NOT HAPPEN ... has with cppi though, after + * changing SENDSTALL (and other cases); harmless? + */ + DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); + break; + } + + if (request) { + u8 is_dma = 0; + + if (dma && (csr & MUSB_TXCSR_DMAENAB)) { + is_dma = 1; + csr |= MUSB_TXCSR_P_WZC_BITS; + csr &= ~(MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_P_UNDERRUN + | MUSB_TXCSR_TXPKTRDY); + musb_writew(epio, MUSB_TXCSR, csr); + /* ensure writebuffer is empty */ + csr = musb_readw(epio, MUSB_TXCSR); + request->actual += musb_ep->dma->actual_len; + DBG(4, "TXCSR%d %04x, dma off, " + "len %zu, req %p\n", + epnum, csr, + musb_ep->dma->actual_len, + request); + } + + if (is_dma || request->actual == request->length) { + + /* First, maybe a terminating short packet. + * Some DMA engines might handle this by + * themselves. + */ + if ((request->zero + && request->length + && (request->length + % musb_ep->packet_sz) + == 0) +#ifdef CONFIG_USB_INVENTRA_DMA + || (is_dma && + ((!dma->desired_mode) || + (request->actual & + (musb_ep->packet_sz - 1)))) +#endif + ) { + /* on dma completion, fifo may not + * be available yet ... + */ + if (csr & MUSB_TXCSR_TXPKTRDY) + break; + + DBG(4, "sending zero pkt\n"); + musb_writew(epio, MUSB_TXCSR, + MUSB_TXCSR_MODE + | MUSB_TXCSR_TXPKTRDY); + request->zero = 0; + } + + /* ... or if not, then complete it */ + musb_g_giveback(musb_ep, request, 0); + + /* kickstart next transfer if appropriate; + * the packet that just completed might not + * be transmitted for hours or days. + * REVISIT for double buffering... + * FIXME revisit for stalls too... + */ + musb_ep_select(mbase, epnum); + csr = musb_readw(epio, MUSB_TXCSR); + if (csr & MUSB_TXCSR_FIFONOTEMPTY) + break; + request = musb_ep->desc + ? next_request(musb_ep) + : NULL; + if (!request) { + DBG(4, "%s idle now\n", + musb_ep->end_point.name); + break; + } + } + + txstate(musb, to_musb_request(request)); + } + + } while (0); +} + +/* ------------------------------------------------------------ */ + +#ifdef CONFIG_USB_INVENTRA_DMA + +/* Peripheral rx (OUT) using Mentor DMA works as follows: + - Only mode 0 is used. + + - Request is queued by the gadget class driver. + -> if queue was previously empty, rxstate() + + - Host sends OUT token which causes an endpoint interrupt + /\ -> RxReady + | -> if request queued, call rxstate + | /\ -> setup DMA + | | -> DMA interrupt on completion + | | -> RxReady + | | -> stop DMA + | | -> ack the read + | | -> if data recd = max expected + | | by the request, or host + | | sent a short packet, + | | complete the request, + | | and start the next one. + | |_____________________________________| + | else just wait for the host + | to send the next OUT token. + |__________________________________________________| + + * Non-Mentor DMA engines can of course work differently. + */ + +#endif + +/* + * Context: controller locked, IRQs blocked, endpoint selected + */ +static void rxstate(struct musb *musb, struct musb_request *req) +{ + u16 csr = 0; + const u8 epnum = req->epnum; + struct usb_request *request = &req->request; + struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; + void __iomem *epio = musb->endpoints[epnum].regs; + u16 fifo_count = 0; + u16 len = musb_ep->packet_sz; + + csr = musb_readw(epio, MUSB_RXCSR); + + if (is_cppi_enabled() && musb_ep->dma) { + struct dma_controller *c = musb->dma_controller; + struct dma_channel *channel = musb_ep->dma; + + /* NOTE: CPPI won't actually stop advancing the DMA + * queue after short packet transfers, so this is almost + * always going to run as IRQ-per-packet DMA so that + * faults will be handled correctly. + */ + if (c->channel_program(channel, + musb_ep->packet_sz, + !request->short_not_ok, + request->dma + request->actual, + request->length - request->actual)) { + + /* make sure that if an rxpkt arrived after the irq, + * the cppi engine will be ready to take it as soon + * as DMA is enabled + */ + csr &= ~(MUSB_RXCSR_AUTOCLEAR + | MUSB_RXCSR_DMAMODE); + csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; + musb_writew(epio, MUSB_RXCSR, csr); + return; + } + } + + if (csr & MUSB_RXCSR_RXPKTRDY) { + len = musb_readw(epio, MUSB_RXCOUNT); + if (request->actual < request->length) { +#ifdef CONFIG_USB_INVENTRA_DMA + if (is_dma_capable() && musb_ep->dma) { + struct dma_controller *c; + struct dma_channel *channel; + int use_dma = 0; + + c = musb->dma_controller; + channel = musb_ep->dma; + + /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in + * mode 0 only. So we do not get endpoint interrupts due to DMA + * completion. We only get interrupts from DMA controller. + * + * We could operate in DMA mode 1 if we knew the size of the tranfer + * in advance. For mass storage class, request->length = what the host + * sends, so that'd work. But for pretty much everything else, + * request->length is routinely more than what the host sends. For + * most these gadgets, end of is signified either by a short packet, + * or filling the last byte of the buffer. (Sending extra data in + * that last pckate should trigger an overflow fault.) But in mode 1, + * we don't get DMA completion interrrupt for short packets. + * + * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), + * to get endpoint interrupt on every DMA req, but that didn't seem + * to work reliably. + * + * REVISIT an updated g_file_storage can set req->short_not_ok, which + * then becomes usable as a runtime "use mode 1" hint... + */ + + csr |= MUSB_RXCSR_DMAENAB; +#ifdef USE_MODE1 + csr |= MUSB_RXCSR_AUTOCLEAR; + /* csr |= MUSB_RXCSR_DMAMODE; */ + + /* this special sequence (enabling and then + * disabling MUSB_RXCSR_DMAMODE) is required + * to get DMAReq to activate + */ + musb_writew(epio, MUSB_RXCSR, + csr | MUSB_RXCSR_DMAMODE); +#endif + musb_writew(epio, MUSB_RXCSR, csr); + + if (request->actual < request->length) { + int transfer_size = 0; +#ifdef USE_MODE1 + transfer_size = min(request->length, + channel->max_len); +#else + transfer_size = len; +#endif + if (transfer_size <= musb_ep->packet_sz) + musb_ep->dma->desired_mode = 0; + else + musb_ep->dma->desired_mode = 1; + + use_dma = c->channel_program( + channel, + musb_ep->packet_sz, + channel->desired_mode, + request->dma + + request->actual, + transfer_size); + } + + if (use_dma) + return; + } +#endif /* Mentor's DMA */ + + fifo_count = request->length - request->actual; + DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", + musb_ep->end_point.name, + len, fifo_count, + musb_ep->packet_sz); + + fifo_count = min(len, fifo_count); + +#ifdef CONFIG_USB_TUSB_OMAP_DMA + if (tusb_dma_omap() && musb_ep->dma) { + struct dma_controller *c = musb->dma_controller; + struct dma_channel *channel = musb_ep->dma; + u32 dma_addr = request->dma + request->actual; + int ret; + + ret = c->channel_program(channel, + musb_ep->packet_sz, + channel->desired_mode, + dma_addr, + fifo_count); + if (ret) + return; + } +#endif + + musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) + (request->buf + request->actual)); + request->actual += fifo_count; + + /* REVISIT if we left anything in the fifo, flush + * it and report -EOVERFLOW + */ + + /* ack the read! */ + csr |= MUSB_RXCSR_P_WZC_BITS; + csr &= ~MUSB_RXCSR_RXPKTRDY; + musb_writew(epio, MUSB_RXCSR, csr); + } + } + + /* reach the end or short packet detected */ + if (request->actual == request->length || len < musb_ep->packet_sz) + musb_g_giveback(musb_ep, request, 0); +} + +/* + * Data ready for a request; called from IRQ + */ +void musb_g_rx(struct musb *musb, u8 epnum) +{ + u16 csr; + struct usb_request *request; + void __iomem *mbase = musb->mregs; + struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; + void __iomem *epio = musb->endpoints[epnum].regs; + struct dma_channel *dma; + + musb_ep_select(mbase, epnum); + + request = next_request(musb_ep); + + csr = musb_readw(epio, MUSB_RXCSR); + dma = is_dma_capable() ? musb_ep->dma : NULL; + + DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, + csr, dma ? " (dma)" : "", request); + + if (csr & MUSB_RXCSR_P_SENTSTALL) { + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dma->status = MUSB_DMA_STATUS_CORE_ABORT; + (void) musb->dma_controller->channel_abort(dma); + request->actual += musb_ep->dma->actual_len; + } + + csr |= MUSB_RXCSR_P_WZC_BITS; + csr &= ~MUSB_RXCSR_P_SENTSTALL; + musb_writew(epio, MUSB_RXCSR, csr); + + if (request) + musb_g_giveback(musb_ep, request, -EPIPE); + goto done; + } + + if (csr & MUSB_RXCSR_P_OVERRUN) { + /* csr |= MUSB_RXCSR_P_WZC_BITS; */ + csr &= ~MUSB_RXCSR_P_OVERRUN; + musb_writew(epio, MUSB_RXCSR, csr); + + DBG(3, "%s iso overrun on %p\n", musb_ep->name, request); + if (request && request->status == -EINPROGRESS) + request->status = -EOVERFLOW; + } + if (csr & MUSB_RXCSR_INCOMPRX) { + /* REVISIT not necessarily an error */ + DBG(4, "%s, incomprx\n", musb_ep->end_point.name); + } + + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + /* "should not happen"; likely RXPKTRDY pending for DMA */ + DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, + "%s busy, csr %04x\n", + musb_ep->end_point.name, csr); + goto done; + } + + if (dma && (csr & MUSB_RXCSR_DMAENAB)) { + csr &= ~(MUSB_RXCSR_AUTOCLEAR + | MUSB_RXCSR_DMAENAB + | MUSB_RXCSR_DMAMODE); + musb_writew(epio, MUSB_RXCSR, + MUSB_RXCSR_P_WZC_BITS | csr); + + request->actual += musb_ep->dma->actual_len; + + DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", + epnum, csr, + musb_readw(epio, MUSB_RXCSR), + musb_ep->dma->actual_len, request); + +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) + /* Autoclear doesn't clear RxPktRdy for short packets */ + if ((dma->desired_mode == 0) + || (dma->actual_len + & (musb_ep->packet_sz - 1))) { + /* ack the read! */ + csr &= ~MUSB_RXCSR_RXPKTRDY; + musb_writew(epio, MUSB_RXCSR, csr); + } + + /* incomplete, and not short? wait for next IN packet */ + if ((request->actual < request->length) + && (musb_ep->dma->actual_len + == musb_ep->packet_sz)) + goto done; +#endif + musb_g_giveback(musb_ep, request, 0); + + request = next_request(musb_ep); + if (!request) + goto done; + + /* don't start more i/o till the stall clears */ + musb_ep_select(mbase, epnum); + csr = musb_readw(epio, MUSB_RXCSR); + if (csr & MUSB_RXCSR_P_SENDSTALL) + goto done; + } + + + /* analyze request if the ep is hot */ + if (request) + rxstate(musb, to_musb_request(request)); + else + DBG(3, "packet waiting for %s%s request\n", + musb_ep->desc ? "" : "inactive ", + musb_ep->end_point.name); + +done: + return; +} + +/* ------------------------------------------------------------ */ + +static int musb_gadget_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + unsigned long flags; + struct musb_ep *musb_ep; + struct musb_hw_ep *hw_ep; + void __iomem *regs; + struct musb *musb; + void __iomem *mbase; + u8 epnum; + u16 csr; + unsigned tmp; + int status = -EINVAL; + + if (!ep || !desc) + return -EINVAL; + + musb_ep = to_musb_ep(ep); + hw_ep = musb_ep->hw_ep; + regs = hw_ep->regs; + musb = musb_ep->musb; + mbase = musb->mregs; + epnum = musb_ep->current_epnum; + + spin_lock_irqsave(&musb->lock, flags); + + if (musb_ep->desc) { + status = -EBUSY; + goto fail; + } + musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + + /* check direction and (later) maxpacket size against endpoint */ + if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum) + goto fail; + + /* REVISIT this rules out high bandwidth periodic transfers */ + tmp = le16_to_cpu(desc->wMaxPacketSize); + if (tmp & ~0x07ff) + goto fail; + musb_ep->packet_sz = tmp; + + /* enable the interrupts for the endpoint, set the endpoint + * packet size (or fail), set the mode, clear the fifo + */ + musb_ep_select(mbase, epnum); + if (desc->bEndpointAddress & USB_DIR_IN) { + u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); + + if (hw_ep->is_shared_fifo) + musb_ep->is_in = 1; + if (!musb_ep->is_in) + goto fail; + if (tmp > hw_ep->max_packet_sz_tx) + goto fail; + + int_txe |= (1 << epnum); + musb_writew(mbase, MUSB_INTRTXE, int_txe); + + /* REVISIT if can_bulk_split(), use by updating "tmp"; + * likewise high bandwidth periodic tx + */ + musb_writew(regs, MUSB_TXMAXP, tmp); + + csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; + if (musb_readw(regs, MUSB_TXCSR) + & MUSB_TXCSR_FIFONOTEMPTY) + csr |= MUSB_TXCSR_FLUSHFIFO; + if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) + csr |= MUSB_TXCSR_P_ISO; + + /* set twice in case of double buffering */ + musb_writew(regs, MUSB_TXCSR, csr); + /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ + musb_writew(regs, MUSB_TXCSR, csr); + + } else { + u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); + + if (hw_ep->is_shared_fifo) + musb_ep->is_in = 0; + if (musb_ep->is_in) + goto fail; + if (tmp > hw_ep->max_packet_sz_rx) + goto fail; + + int_rxe |= (1 << epnum); + musb_writew(mbase, MUSB_INTRRXE, int_rxe); + + /* REVISIT if can_bulk_combine() use by updating "tmp" + * likewise high bandwidth periodic rx + */ + musb_writew(regs, MUSB_RXMAXP, tmp); + + /* force shared fifo to OUT-only mode */ + if (hw_ep->is_shared_fifo) { + csr = musb_readw(regs, MUSB_TXCSR); + csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); + musb_writew(regs, MUSB_TXCSR, csr); + } + + csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; + if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) + csr |= MUSB_RXCSR_P_ISO; + else if (musb_ep->type == USB_ENDPOINT_XFER_INT) + csr |= MUSB_RXCSR_DISNYET; + + /* set twice in case of double buffering */ + musb_writew(regs, MUSB_RXCSR, csr); + musb_writew(regs, MUSB_RXCSR, csr); + } + + /* NOTE: all the I/O code _should_ work fine without DMA, in case + * for some reason you run out of channels here. + */ + if (is_dma_capable() && musb->dma_controller) { + struct dma_controller *c = musb->dma_controller; + + musb_ep->dma = c->channel_alloc(c, hw_ep, + (desc->bEndpointAddress & USB_DIR_IN)); + } else + musb_ep->dma = NULL; + + musb_ep->desc = desc; + musb_ep->busy = 0; + status = 0; + + pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", + musb_driver_name, musb_ep->end_point.name, + ({ char *s; switch (musb_ep->type) { + case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; + case USB_ENDPOINT_XFER_INT: s = "int"; break; + default: s = "iso"; break; + }; s; }), + musb_ep->is_in ? "IN" : "OUT", + musb_ep->dma ? "dma, " : "", + musb_ep->packet_sz); + + schedule_work(&musb->irq_work); + +fail: + spin_unlock_irqrestore(&musb->lock, flags); + return status; +} + +/* + * Disable an endpoint flushing all requests queued. + */ +static int musb_gadget_disable(struct usb_ep *ep) +{ + unsigned long flags; + struct musb *musb; + u8 epnum; + struct musb_ep *musb_ep; + void __iomem *epio; + int status = 0; + + musb_ep = to_musb_ep(ep); + musb = musb_ep->musb; + epnum = musb_ep->current_epnum; + epio = musb->endpoints[epnum].regs; + + spin_lock_irqsave(&musb->lock, flags); + musb_ep_select(musb->mregs, epnum); + + /* zero the endpoint sizes */ + if (musb_ep->is_in) { + u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); + int_txe &= ~(1 << epnum); + musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); + musb_writew(epio, MUSB_TXMAXP, 0); + } else { + u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); + int_rxe &= ~(1 << epnum); + musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); + musb_writew(epio, MUSB_RXMAXP, 0); + } + + musb_ep->desc = NULL; + + /* abort all pending DMA and requests */ + nuke(musb_ep, -ESHUTDOWN); + + schedule_work(&musb->irq_work); + + spin_unlock_irqrestore(&(musb->lock), flags); + + DBG(2, "%s\n", musb_ep->end_point.name); + + return status; +} + +/* + * Allocate a request for an endpoint. + * Reused by ep0 code. + */ +struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + struct musb_request *request = NULL; + + request = kzalloc(sizeof *request, gfp_flags); + if (request) { + INIT_LIST_HEAD(&request->request.list); + request->request.dma = DMA_ADDR_INVALID; + request->epnum = musb_ep->current_epnum; + request->ep = musb_ep; + } + + return &request->request; +} + +/* + * Free a request + * Reused by ep0 code. + */ +void musb_free_request(struct usb_ep *ep, struct usb_request *req) +{ + kfree(to_musb_request(req)); +} + +static LIST_HEAD(buffers); + +struct free_record { + struct list_head list; + struct device *dev; + unsigned bytes; + dma_addr_t dma; +}; + +/* + * Context: controller locked, IRQs blocked. + */ +static void musb_ep_restart(struct musb *musb, struct musb_request *req) +{ + DBG(3, "<== %s request %p len %u on hw_ep%d\n", + req->tx ? "TX/IN" : "RX/OUT", + &req->request, req->request.length, req->epnum); + + musb_ep_select(musb->mregs, req->epnum); + if (req->tx) + txstate(musb, req); + else + rxstate(musb, req); +} + +static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, + gfp_t gfp_flags) +{ + struct musb_ep *musb_ep; + struct musb_request *request; + struct musb *musb; + int status = 0; + unsigned long lockflags; + + if (!ep || !req) + return -EINVAL; + if (!req->buf) + return -ENODATA; + + musb_ep = to_musb_ep(ep); + musb = musb_ep->musb; + + request = to_musb_request(req); + request->musb = musb; + + if (request->ep != musb_ep) + return -EINVAL; + + DBG(4, "<== to %s request=%p\n", ep->name, req); + + /* request is mine now... */ + request->request.actual = 0; + request->request.status = -EINPROGRESS; + request->epnum = musb_ep->current_epnum; + request->tx = musb_ep->is_in; + + if (is_dma_capable() && musb_ep->dma) { + if (request->request.dma == DMA_ADDR_INVALID) { + request->request.dma = dma_map_single( + musb->controller, + request->request.buf, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->mapped = 1; + } else { + dma_sync_single_for_device(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->mapped = 0; + } + } else if (!req->buf) { + return -ENODATA; + } else + request->mapped = 0; + + spin_lock_irqsave(&musb->lock, lockflags); + + /* don't queue if the ep is down */ + if (!musb_ep->desc) { + DBG(4, "req %p queued to %s while ep %s\n", + req, ep->name, "disabled"); + status = -ESHUTDOWN; + goto cleanup; + } + + /* add request to the list */ + list_add_tail(&(request->request.list), &(musb_ep->req_list)); + + /* it this is the head of the queue, start i/o ... */ + if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next) + musb_ep_restart(musb, request); + +cleanup: + spin_unlock_irqrestore(&musb->lock, lockflags); + return status; +} + +static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + struct usb_request *r; + unsigned long flags; + int status = 0; + struct musb *musb = musb_ep->musb; + + if (!ep || !request || to_musb_request(request)->ep != musb_ep) + return -EINVAL; + + spin_lock_irqsave(&musb->lock, flags); + + list_for_each_entry(r, &musb_ep->req_list, list) { + if (r == request) + break; + } + if (r != request) { + DBG(3, "request %p not queued to %s\n", request, ep->name); + status = -EINVAL; + goto done; + } + + /* if the hardware doesn't have the request, easy ... */ + if (musb_ep->req_list.next != &request->list || musb_ep->busy) + musb_g_giveback(musb_ep, request, -ECONNRESET); + + /* ... else abort the dma transfer ... */ + else if (is_dma_capable() && musb_ep->dma) { + struct dma_controller *c = musb->dma_controller; + + musb_ep_select(musb->mregs, musb_ep->current_epnum); + if (c->channel_abort) + status = c->channel_abort(musb_ep->dma); + else + status = -EBUSY; + if (status == 0) + musb_g_giveback(musb_ep, request, -ECONNRESET); + } else { + /* NOTE: by sticking to easily tested hardware/driver states, + * we leave counting of in-flight packets imprecise. + */ + musb_g_giveback(musb_ep, request, -ECONNRESET); + } + +done: + spin_unlock_irqrestore(&musb->lock, flags); + return status; +} + +/* + * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any + * data but will queue requests. + * + * exported to ep0 code + */ +int musb_gadget_set_halt(struct usb_ep *ep, int value) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + u8 epnum = musb_ep->current_epnum; + struct musb *musb = musb_ep->musb; + void __iomem *epio = musb->endpoints[epnum].regs; + void __iomem *mbase; + unsigned long flags; + u16 csr; + struct musb_request *request = NULL; + int status = 0; + + if (!ep) + return -EINVAL; + mbase = musb->mregs; + + spin_lock_irqsave(&musb->lock, flags); + + if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { + status = -EINVAL; + goto done; + } + + musb_ep_select(mbase, epnum); + + /* cannot portably stall with non-empty FIFO */ + request = to_musb_request(next_request(musb_ep)); + if (value && musb_ep->is_in) { + csr = musb_readw(epio, MUSB_TXCSR); + if (csr & MUSB_TXCSR_FIFONOTEMPTY) { + DBG(3, "%s fifo busy, cannot halt\n", ep->name); + spin_unlock_irqrestore(&musb->lock, flags); + return -EAGAIN; + } + + } + + /* set/clear the stall and toggle bits */ + DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); + if (musb_ep->is_in) { + csr = musb_readw(epio, MUSB_TXCSR); + if (csr & MUSB_TXCSR_FIFONOTEMPTY) + csr |= MUSB_TXCSR_FLUSHFIFO; + csr |= MUSB_TXCSR_P_WZC_BITS + | MUSB_TXCSR_CLRDATATOG; + if (value) + csr |= MUSB_TXCSR_P_SENDSTALL; + else + csr &= ~(MUSB_TXCSR_P_SENDSTALL + | MUSB_TXCSR_P_SENTSTALL); + csr &= ~MUSB_TXCSR_TXPKTRDY; + musb_writew(epio, MUSB_TXCSR, csr); + } else { + csr = musb_readw(epio, MUSB_RXCSR); + csr |= MUSB_RXCSR_P_WZC_BITS + | MUSB_RXCSR_FLUSHFIFO + | MUSB_RXCSR_CLRDATATOG; + if (value) + csr |= MUSB_RXCSR_P_SENDSTALL; + else + csr &= ~(MUSB_RXCSR_P_SENDSTALL + | MUSB_RXCSR_P_SENTSTALL); + musb_writew(epio, MUSB_RXCSR, csr); + } + +done: + + /* maybe start the first request in the queue */ + if (!musb_ep->busy && !value && request) { + DBG(3, "restarting the request\n"); + musb_ep_restart(musb, request); + } + + spin_unlock_irqrestore(&musb->lock, flags); + return status; +} + +static int musb_gadget_fifo_status(struct usb_ep *ep) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + void __iomem *epio = musb_ep->hw_ep->regs; + int retval = -EINVAL; + + if (musb_ep->desc && !musb_ep->is_in) { + struct musb *musb = musb_ep->musb; + int epnum = musb_ep->current_epnum; + void __iomem *mbase = musb->mregs; + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + + musb_ep_select(mbase, epnum); + /* FIXME return zero unless RXPKTRDY is set */ + retval = musb_readw(epio, MUSB_RXCOUNT); + + spin_unlock_irqrestore(&musb->lock, flags); + } + return retval; +} + +static void musb_gadget_fifo_flush(struct usb_ep *ep) +{ + struct musb_ep *musb_ep = to_musb_ep(ep); + struct musb *musb = musb_ep->musb; + u8 epnum = musb_ep->current_epnum; + void __iomem *epio = musb->endpoints[epnum].regs; + void __iomem *mbase; + unsigned long flags; + u16 csr, int_txe; + + mbase = musb->mregs; + + spin_lock_irqsave(&musb->lock, flags); + musb_ep_select(mbase, (u8) epnum); + + /* disable interrupts */ + int_txe = musb_readw(mbase, MUSB_INTRTXE); + musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); + + if (musb_ep->is_in) { + csr = musb_readw(epio, MUSB_TXCSR); + if (csr & MUSB_TXCSR_FIFONOTEMPTY) { + csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; + musb_writew(epio, MUSB_TXCSR, csr); + /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ + musb_writew(epio, MUSB_TXCSR, csr); + } + } else { + csr = musb_readw(epio, MUSB_RXCSR); + csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; + musb_writew(epio, MUSB_RXCSR, csr); + musb_writew(epio, MUSB_RXCSR, csr); + } + + /* re-enable interrupt */ + musb_writew(mbase, MUSB_INTRTXE, int_txe); + spin_unlock_irqrestore(&musb->lock, flags); +} + +static const struct usb_ep_ops musb_ep_ops = { + .enable = musb_gadget_enable, + .disable = musb_gadget_disable, + .alloc_request = musb_alloc_request, + .free_request = musb_free_request, + .queue = musb_gadget_queue, + .dequeue = musb_gadget_dequeue, + .set_halt = musb_gadget_set_halt, + .fifo_status = musb_gadget_fifo_status, + .fifo_flush = musb_gadget_fifo_flush +}; + +/* ----------------------------------------------------------------------- */ + +static int musb_gadget_get_frame(struct usb_gadget *gadget) +{ + struct musb *musb = gadget_to_musb(gadget); + + return (int)musb_readw(musb->mregs, MUSB_FRAME); +} + +static int musb_gadget_wakeup(struct usb_gadget *gadget) +{ + struct musb *musb = gadget_to_musb(gadget); + void __iomem *mregs = musb->mregs; + unsigned long flags; + int status = -EINVAL; + u8 power, devctl; + int retries; + + spin_lock_irqsave(&musb->lock, flags); + + switch (musb->xceiv.state) { + case OTG_STATE_B_PERIPHERAL: + /* NOTE: OTG state machine doesn't include B_SUSPENDED; + * that's part of the standard usb 1.1 state machine, and + * doesn't affect OTG transitions. + */ + if (musb->may_wakeup && musb->is_suspended) + break; + goto done; + case OTG_STATE_B_IDLE: + /* Start SRP ... OTG not required. */ + devctl = musb_readb(mregs, MUSB_DEVCTL); + DBG(2, "Sending SRP: devctl: %02x\n", devctl); + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(mregs, MUSB_DEVCTL, devctl); + devctl = musb_readb(mregs, MUSB_DEVCTL); + retries = 100; + while (!(devctl & MUSB_DEVCTL_SESSION)) { + devctl = musb_readb(mregs, MUSB_DEVCTL); + if (retries-- < 1) + break; + } + retries = 10000; + while (devctl & MUSB_DEVCTL_SESSION) { + devctl = musb_readb(mregs, MUSB_DEVCTL); + if (retries-- < 1) + break; + } + + /* Block idling for at least 1s */ + musb_platform_try_idle(musb, + jiffies + msecs_to_jiffies(1 * HZ)); + + status = 0; + goto done; + default: + DBG(2, "Unhandled wake: %s\n", otg_state_string(musb)); + goto done; + } + + status = 0; + + power = musb_readb(mregs, MUSB_POWER); + power |= MUSB_POWER_RESUME; + musb_writeb(mregs, MUSB_POWER, power); + DBG(2, "issue wakeup\n"); + + /* FIXME do this next chunk in a timer callback, no udelay */ + mdelay(2); + + power = musb_readb(mregs, MUSB_POWER); + power &= ~MUSB_POWER_RESUME; + musb_writeb(mregs, MUSB_POWER, power); +done: + spin_unlock_irqrestore(&musb->lock, flags); + return status; +} + +static int +musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) +{ + struct musb *musb = gadget_to_musb(gadget); + + musb->is_self_powered = !!is_selfpowered; + return 0; +} + +static void musb_pullup(struct musb *musb, int is_on) +{ + u8 power; + + power = musb_readb(musb->mregs, MUSB_POWER); + if (is_on) + power |= MUSB_POWER_SOFTCONN; + else + power &= ~MUSB_POWER_SOFTCONN; + + /* FIXME if on, HdrcStart; if off, HdrcStop */ + + DBG(3, "gadget %s D+ pullup %s\n", + musb->gadget_driver->function, is_on ? "on" : "off"); + musb_writeb(musb->mregs, MUSB_POWER, power); +} + +#if 0 +static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) +{ + DBG(2, "<= %s =>\n", __func__); + + /* + * FIXME iff driver's softconnect flag is set (as it is during probe, + * though that can clear it), just musb_pullup(). + */ + + return -EINVAL; +} +#endif + +static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) +{ + struct musb *musb = gadget_to_musb(gadget); + + if (!musb->xceiv.set_power) + return -EOPNOTSUPP; + return otg_set_power(&musb->xceiv, mA); +} + +static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) +{ + struct musb *musb = gadget_to_musb(gadget); + unsigned long flags; + + is_on = !!is_on; + + /* NOTE: this assumes we are sensing vbus; we'd rather + * not pullup unless the B-session is active. + */ + spin_lock_irqsave(&musb->lock, flags); + if (is_on != musb->softconnect) { + musb->softconnect = is_on; + musb_pullup(musb, is_on); + } + spin_unlock_irqrestore(&musb->lock, flags); + return 0; +} + +static const struct usb_gadget_ops musb_gadget_operations = { + .get_frame = musb_gadget_get_frame, + .wakeup = musb_gadget_wakeup, + .set_selfpowered = musb_gadget_set_self_powered, + /* .vbus_session = musb_gadget_vbus_session, */ + .vbus_draw = musb_gadget_vbus_draw, + .pullup = musb_gadget_pullup, +}; + +/* ----------------------------------------------------------------------- */ + +/* Registration */ + +/* Only this registration code "knows" the rule (from USB standards) + * about there being only one external upstream port. It assumes + * all peripheral ports are external... + */ +static struct musb *the_gadget; + +static void musb_gadget_release(struct device *dev) +{ + /* kref_put(WHAT) */ + dev_dbg(dev, "%s\n", __func__); +} + + +static void __init +init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) +{ + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + + memset(ep, 0, sizeof *ep); + + ep->current_epnum = epnum; + ep->musb = musb; + ep->hw_ep = hw_ep; + ep->is_in = is_in; + + INIT_LIST_HEAD(&ep->req_list); + + sprintf(ep->name, "ep%d%s", epnum, + (!epnum || hw_ep->is_shared_fifo) ? "" : ( + is_in ? "in" : "out")); + ep->end_point.name = ep->name; + INIT_LIST_HEAD(&ep->end_point.ep_list); + if (!epnum) { + ep->end_point.maxpacket = 64; + ep->end_point.ops = &musb_g_ep0_ops; + musb->g.ep0 = &ep->end_point; + } else { + if (is_in) + ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; + else + ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; + ep->end_point.ops = &musb_ep_ops; + list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); + } +} + +/* + * Initialize the endpoints exposed to peripheral drivers, with backlinks + * to the rest of the driver state. + */ +static inline void __init musb_g_init_endpoints(struct musb *musb) +{ + u8 epnum; + struct musb_hw_ep *hw_ep; + unsigned count = 0; + + /* intialize endpoint list just once */ + INIT_LIST_HEAD(&(musb->g.ep_list)); + + for (epnum = 0, hw_ep = musb->endpoints; + epnum < musb->nr_endpoints; + epnum++, hw_ep++) { + if (hw_ep->is_shared_fifo /* || !epnum */) { + init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); + count++; + } else { + if (hw_ep->max_packet_sz_tx) { + init_peripheral_ep(musb, &hw_ep->ep_in, + epnum, 1); + count++; + } + if (hw_ep->max_packet_sz_rx) { + init_peripheral_ep(musb, &hw_ep->ep_out, + epnum, 0); + count++; + } + } + } +} + +/* called once during driver setup to initialize and link into + * the driver model; memory is zeroed. + */ +int __init musb_gadget_setup(struct musb *musb) +{ + int status; + + /* REVISIT minor race: if (erroneously) setting up two + * musb peripherals at the same time, only the bus lock + * is probably held. + */ + if (the_gadget) + return -EBUSY; + the_gadget = musb; + + musb->g.ops = &musb_gadget_operations; + musb->g.is_dualspeed = 1; + musb->g.speed = USB_SPEED_UNKNOWN; + + /* this "gadget" abstracts/virtualizes the controller */ + strcpy(musb->g.dev.bus_id, "gadget"); + musb->g.dev.parent = musb->controller; + musb->g.dev.dma_mask = musb->controller->dma_mask; + musb->g.dev.release = musb_gadget_release; + musb->g.name = musb_driver_name; + + if (is_otg_enabled(musb)) + musb->g.is_otg = 1; + + musb_g_init_endpoints(musb); + + musb->is_active = 0; + musb_platform_try_idle(musb, 0); + + status = device_register(&musb->g.dev); + if (status != 0) + the_gadget = NULL; + return status; +} + +void musb_gadget_cleanup(struct musb *musb) +{ + if (musb != the_gadget) + return; + + device_unregister(&musb->g.dev); + the_gadget = NULL; +} + +/* + * Register the gadget driver. Used by gadget drivers when + * registering themselves with the controller. + * + * -EINVAL something went wrong (not driver) + * -EBUSY another gadget is already using the controller + * -ENOMEM no memeory to perform the operation + * + * @param driver the gadget driver + * @return <0 if error, 0 if everything is fine + */ +int usb_gadget_register_driver(struct usb_gadget_driver *driver) +{ + int retval; + unsigned long flags; + struct musb *musb = the_gadget; + + if (!driver + || driver->speed != USB_SPEED_HIGH + || !driver->bind + || !driver->setup) + return -EINVAL; + + /* driver must be initialized to support peripheral mode */ + if (!musb || !(musb->board_mode == MUSB_OTG + || musb->board_mode != MUSB_OTG)) { + DBG(1, "%s, no dev??\n", __func__); + return -ENODEV; + } + + DBG(3, "registering driver %s\n", driver->function); + spin_lock_irqsave(&musb->lock, flags); + + if (musb->gadget_driver) { + DBG(1, "%s is already bound to %s\n", + musb_driver_name, + musb->gadget_driver->driver.name); + retval = -EBUSY; + } else { + musb->gadget_driver = driver; + musb->g.dev.driver = &driver->driver; + driver->driver.bus = NULL; + musb->softconnect = 1; + retval = 0; + } + + spin_unlock_irqrestore(&musb->lock, flags); + + if (retval == 0) + retval = driver->bind(&musb->g); + if (retval != 0) { + DBG(3, "bind to driver %s failed --> %d\n", + driver->driver.name, retval); + musb->gadget_driver = NULL; + musb->g.dev.driver = NULL; + } + + /* start peripheral and/or OTG engines */ + if (retval == 0) { + spin_lock_irqsave(&musb->lock, flags); + + /* REVISIT always use otg_set_peripheral(), handling + * issues including the root hub one below ... + */ + musb->xceiv.gadget = &musb->g; + musb->xceiv.state = OTG_STATE_B_IDLE; + musb->is_active = 1; + + /* FIXME this ignores the softconnect flag. Drivers are + * allowed hold the peripheral inactive until for example + * userspace hooks up printer hardware or DSP codecs, so + * hosts only see fully functional devices. + */ + + if (!is_otg_enabled(musb)) + musb_start(musb); + + spin_unlock_irqrestore(&musb->lock, flags); + + if (is_otg_enabled(musb)) { + DBG(3, "OTG startup...\n"); + + /* REVISIT: funcall to other code, which also + * handles power budgeting ... this way also + * ensures HdrcStart is indirectly called. + */ + retval = usb_add_hcd(musb_to_hcd(musb), -1, 0); + if (retval < 0) { + DBG(1, "add_hcd failed, %d\n", retval); + spin_lock_irqsave(&musb->lock, flags); + musb->xceiv.gadget = NULL; + musb->xceiv.state = OTG_STATE_UNDEFINED; + musb->gadget_driver = NULL; + musb->g.dev.driver = NULL; + spin_unlock_irqrestore(&musb->lock, flags); + } + } + } + + return retval; +} +EXPORT_SYMBOL(usb_gadget_register_driver); + +static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) +{ + int i; + struct musb_hw_ep *hw_ep; + + /* don't disconnect if it's not connected */ + if (musb->g.speed == USB_SPEED_UNKNOWN) + driver = NULL; + else + musb->g.speed = USB_SPEED_UNKNOWN; + + /* deactivate the hardware */ + if (musb->softconnect) { + musb->softconnect = 0; + musb_pullup(musb, 0); + } + musb_stop(musb); + + /* killing any outstanding requests will quiesce the driver; + * then report disconnect + */ + if (driver) { + for (i = 0, hw_ep = musb->endpoints; + i < musb->nr_endpoints; + i++, hw_ep++) { + musb_ep_select(musb->mregs, i); + if (hw_ep->is_shared_fifo /* || !epnum */) { + nuke(&hw_ep->ep_in, -ESHUTDOWN); + } else { + if (hw_ep->max_packet_sz_tx) + nuke(&hw_ep->ep_in, -ESHUTDOWN); + if (hw_ep->max_packet_sz_rx) + nuke(&hw_ep->ep_out, -ESHUTDOWN); + } + } + + spin_unlock(&musb->lock); + driver->disconnect(&musb->g); + spin_lock(&musb->lock); + } +} + +/* + * Unregister the gadget driver. Used by gadget drivers when + * unregistering themselves from the controller. + * + * @param driver the gadget driver to unregister + */ +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) +{ + unsigned long flags; + int retval = 0; + struct musb *musb = the_gadget; + + if (!driver || !driver->unbind || !musb) + return -EINVAL; + + /* REVISIT always use otg_set_peripheral() here too; + * this needs to shut down the OTG engine. + */ + + spin_lock_irqsave(&musb->lock, flags); + +#ifdef CONFIG_USB_MUSB_OTG + musb_hnp_stop(musb); +#endif + + if (musb->gadget_driver == driver) { + + (void) musb_gadget_vbus_draw(&musb->g, 0); + + musb->xceiv.state = OTG_STATE_UNDEFINED; + stop_activity(musb, driver); + + DBG(3, "unregistering driver %s\n", driver->function); + spin_unlock_irqrestore(&musb->lock, flags); + driver->unbind(&musb->g); + spin_lock_irqsave(&musb->lock, flags); + + musb->gadget_driver = NULL; + musb->g.dev.driver = NULL; + + musb->is_active = 0; + musb_platform_try_idle(musb, 0); + } else + retval = -EINVAL; + spin_unlock_irqrestore(&musb->lock, flags); + + if (is_otg_enabled(musb) && retval == 0) { + usb_remove_hcd(musb_to_hcd(musb)); + /* FIXME we need to be able to register another + * gadget driver here and have everything work; + * that currently misbehaves. + */ + } + + return retval; +} +EXPORT_SYMBOL(usb_gadget_unregister_driver); + + +/* ----------------------------------------------------------------------- */ + +/* lifecycle operations called through plat_uds.c */ + +void musb_g_resume(struct musb *musb) +{ + musb->is_suspended = 0; + switch (musb->xceiv.state) { + case OTG_STATE_B_IDLE: + break; + case OTG_STATE_B_WAIT_ACON: + case OTG_STATE_B_PERIPHERAL: + musb->is_active = 1; + if (musb->gadget_driver && musb->gadget_driver->resume) { + spin_unlock(&musb->lock); + musb->gadget_driver->resume(&musb->g); + spin_lock(&musb->lock); + } + break; + default: + WARNING("unhandled RESUME transition (%s)\n", + otg_state_string(musb)); + } +} + +/* called when SOF packets stop for 3+ msec */ +void musb_g_suspend(struct musb *musb) +{ + u8 devctl; + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + DBG(3, "devctl %02x\n", devctl); + + switch (musb->xceiv.state) { + case OTG_STATE_B_IDLE: + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) + musb->xceiv.state = OTG_STATE_B_PERIPHERAL; + break; + case OTG_STATE_B_PERIPHERAL: + musb->is_suspended = 1; + if (musb->gadget_driver && musb->gadget_driver->suspend) { + spin_unlock(&musb->lock); + musb->gadget_driver->suspend(&musb->g); + spin_lock(&musb->lock); + } + break; + default: + /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; + * A_PERIPHERAL may need care too + */ + WARNING("unhandled SUSPEND transition (%s)\n", + otg_state_string(musb)); + } +} + +/* Called during SRP */ +void musb_g_wakeup(struct musb *musb) +{ + musb_gadget_wakeup(&musb->g); +} + +/* called when VBUS drops below session threshold, and in other cases */ +void musb_g_disconnect(struct musb *musb) +{ + void __iomem *mregs = musb->mregs; + u8 devctl = musb_readb(mregs, MUSB_DEVCTL); + + DBG(3, "devctl %02x\n", devctl); + + /* clear HR */ + musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); + + /* don't draw vbus until new b-default session */ + (void) musb_gadget_vbus_draw(&musb->g, 0); + + musb->g.speed = USB_SPEED_UNKNOWN; + if (musb->gadget_driver && musb->gadget_driver->disconnect) { + spin_unlock(&musb->lock); + musb->gadget_driver->disconnect(&musb->g); + spin_lock(&musb->lock); + } + + switch (musb->xceiv.state) { + default: +#ifdef CONFIG_USB_MUSB_OTG + DBG(2, "Unhandled disconnect %s, setting a_idle\n", + otg_state_string(musb)); + musb->xceiv.state = OTG_STATE_A_IDLE; + break; + case OTG_STATE_A_PERIPHERAL: + musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; + break; + case OTG_STATE_B_WAIT_ACON: + case OTG_STATE_B_HOST: +#endif + case OTG_STATE_B_PERIPHERAL: + case OTG_STATE_B_IDLE: + musb->xceiv.state = OTG_STATE_B_IDLE; + break; + case OTG_STATE_B_SRP_INIT: + break; + } + + musb->is_active = 0; +} + +void musb_g_reset(struct musb *musb) +__releases(musb->lock) +__acquires(musb->lock) +{ + void __iomem *mbase = musb->mregs; + u8 devctl = musb_readb(mbase, MUSB_DEVCTL); + u8 power; + + DBG(3, "<== %s addr=%x driver '%s'\n", + (devctl & MUSB_DEVCTL_BDEVICE) + ? "B-Device" : "A-Device", + musb_readb(mbase, MUSB_FADDR), + musb->gadget_driver + ? musb->gadget_driver->driver.name + : NULL + ); + + /* report disconnect, if we didn't already (flushing EP state) */ + if (musb->g.speed != USB_SPEED_UNKNOWN) + musb_g_disconnect(musb); + + /* clear HR */ + else if (devctl & MUSB_DEVCTL_HR) + musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); + + + /* what speed did we negotiate? */ + power = musb_readb(mbase, MUSB_POWER); + musb->g.speed = (power & MUSB_POWER_HSMODE) + ? USB_SPEED_HIGH : USB_SPEED_FULL; + + /* start in USB_STATE_DEFAULT */ + musb->is_active = 1; + musb->is_suspended = 0; + MUSB_DEV_MODE(musb); + musb->address = 0; + musb->ep0_state = MUSB_EP0_STAGE_SETUP; + + musb->may_wakeup = 0; + musb->g.b_hnp_enable = 0; + musb->g.a_alt_hnp_support = 0; + musb->g.a_hnp_support = 0; + + /* Normal reset, as B-Device; + * or else after HNP, as A-Device + */ + if (devctl & MUSB_DEVCTL_BDEVICE) { + musb->xceiv.state = OTG_STATE_B_PERIPHERAL; + musb->g.is_a_peripheral = 0; + } else if (is_otg_enabled(musb)) { + musb->xceiv.state = OTG_STATE_A_PERIPHERAL; + musb->g.is_a_peripheral = 1; + } else + WARN_ON(1); + + /* start with default limits on VBUS power draw */ + (void) musb_gadget_vbus_draw(&musb->g, + is_otg_enabled(musb) ? 8 : 100); +} diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h new file mode 100644 index 00000000000..59502da9f73 --- /dev/null +++ b/drivers/usb/musb/musb_gadget.h @@ -0,0 +1,108 @@ +/* + * MUSB OTG driver peripheral defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_GADGET_H +#define __MUSB_GADGET_H + +struct musb_request { + struct usb_request request; + struct musb_ep *ep; + struct musb *musb; + u8 tx; /* endpoint direction */ + u8 epnum; + u8 mapped; +}; + +static inline struct musb_request *to_musb_request(struct usb_request *req) +{ + return req ? container_of(req, struct musb_request, request) : NULL; +} + +extern struct usb_request * +musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags); +extern void musb_free_request(struct usb_ep *ep, struct usb_request *req); + + +/* + * struct musb_ep - peripheral side view of endpoint rx or tx side + */ +struct musb_ep { + /* stuff towards the head is basically write-once. */ + struct usb_ep end_point; + char name[12]; + struct musb_hw_ep *hw_ep; + struct musb *musb; + u8 current_epnum; + + /* ... when enabled/disabled ... */ + u8 type; + u8 is_in; + u16 packet_sz; + const struct usb_endpoint_descriptor *desc; + struct dma_channel *dma; + + /* later things are modified based on usage */ + struct list_head req_list; + + /* true if lock must be dropped but req_list may not be advanced */ + u8 busy; +}; + +static inline struct musb_ep *to_musb_ep(struct usb_ep *ep) +{ + return ep ? container_of(ep, struct musb_ep, end_point) : NULL; +} + +static inline struct usb_request *next_request(struct musb_ep *ep) +{ + struct list_head *queue = &ep->req_list; + + if (list_empty(queue)) + return NULL; + return container_of(queue->next, struct usb_request, list); +} + +extern void musb_g_tx(struct musb *musb, u8 epnum); +extern void musb_g_rx(struct musb *musb, u8 epnum); + +extern const struct usb_ep_ops musb_g_ep0_ops; + +extern int musb_gadget_setup(struct musb *); +extern void musb_gadget_cleanup(struct musb *); + +extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); + +extern int musb_gadget_set_halt(struct usb_ep *ep, int value); + +#endif /* __MUSB_GADGET_H */ diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c new file mode 100644 index 00000000000..48d7d3ccb24 --- /dev/null +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -0,0 +1,981 @@ +/* + * MUSB OTG peripheral driver ep0 handling + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "musb_core.h" + +/* ep0 is always musb->endpoints[0].ep_in */ +#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0]) + +/* + * locking note: we use only the controller lock, for simpler correctness. + * It's always held with IRQs blocked. + * + * It protects the ep0 request queue as well as ep0_state, not just the + * controller and indexed registers. And that lock stays held unless it + * needs to be dropped to allow reentering this driver ... like upcalls to + * the gadget driver, or adjusting endpoint halt status. + */ + +static char *decode_ep0stage(u8 stage) +{ + switch (stage) { + case MUSB_EP0_STAGE_SETUP: return "idle"; + case MUSB_EP0_STAGE_TX: return "in"; + case MUSB_EP0_STAGE_RX: return "out"; + case MUSB_EP0_STAGE_ACKWAIT: return "wait"; + case MUSB_EP0_STAGE_STATUSIN: return "in/status"; + case MUSB_EP0_STAGE_STATUSOUT: return "out/status"; + default: return "?"; + } +} + +/* handle a standard GET_STATUS request + * Context: caller holds controller lock + */ +static int service_tx_status_request( + struct musb *musb, + const struct usb_ctrlrequest *ctrlrequest) +{ + void __iomem *mbase = musb->mregs; + int handled = 1; + u8 result[2], epnum = 0; + const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; + + result[1] = 0; + + switch (recip) { + case USB_RECIP_DEVICE: + result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED; + result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP; +#ifdef CONFIG_USB_MUSB_OTG + if (musb->g.is_otg) { + result[0] |= musb->g.b_hnp_enable + << USB_DEVICE_B_HNP_ENABLE; + result[0] |= musb->g.a_alt_hnp_support + << USB_DEVICE_A_ALT_HNP_SUPPORT; + result[0] |= musb->g.a_hnp_support + << USB_DEVICE_A_HNP_SUPPORT; + } +#endif + break; + + case USB_RECIP_INTERFACE: + result[0] = 0; + break; + + case USB_RECIP_ENDPOINT: { + int is_in; + struct musb_ep *ep; + u16 tmp; + void __iomem *regs; + + epnum = (u8) ctrlrequest->wIndex; + if (!epnum) { + result[0] = 0; + break; + } + + is_in = epnum & USB_DIR_IN; + if (is_in) { + epnum &= 0x0f; + ep = &musb->endpoints[epnum].ep_in; + } else { + ep = &musb->endpoints[epnum].ep_out; + } + regs = musb->endpoints[epnum].regs; + + if (epnum >= MUSB_C_NUM_EPS || !ep->desc) { + handled = -EINVAL; + break; + } + + musb_ep_select(mbase, epnum); + if (is_in) + tmp = musb_readw(regs, MUSB_TXCSR) + & MUSB_TXCSR_P_SENDSTALL; + else + tmp = musb_readw(regs, MUSB_RXCSR) + & MUSB_RXCSR_P_SENDSTALL; + musb_ep_select(mbase, 0); + + result[0] = tmp ? 1 : 0; + } break; + + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + + /* fill up the fifo; caller updates csr0 */ + if (handled > 0) { + u16 len = le16_to_cpu(ctrlrequest->wLength); + + if (len > 2) + len = 2; + musb_write_fifo(&musb->endpoints[0], len, result); + } + + return handled; +} + +/* + * handle a control-IN request, the end0 buffer contains the current request + * that is supposed to be a standard control request. Assumes the fifo to + * be at least 2 bytes long. + * + * @return 0 if the request was NOT HANDLED, + * < 0 when error + * > 0 when the request is processed + * + * Context: caller holds controller lock + */ +static int +service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) +{ + int handled = 0; /* not handled */ + + if ((ctrlrequest->bRequestType & USB_TYPE_MASK) + == USB_TYPE_STANDARD) { + switch (ctrlrequest->bRequest) { + case USB_REQ_GET_STATUS: + handled = service_tx_status_request(musb, + ctrlrequest); + break; + + /* case USB_REQ_SYNC_FRAME: */ + + default: + break; + } + } + return handled; +} + +/* + * Context: caller holds controller lock + */ +static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) +{ + musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); + musb->ep0_state = MUSB_EP0_STAGE_SETUP; +} + +/* + * Tries to start B-device HNP negotiation if enabled via sysfs + */ +static inline void musb_try_b_hnp_enable(struct musb *musb) +{ + void __iomem *mbase = musb->mregs; + u8 devctl; + + DBG(1, "HNP: Setting HR\n"); + devctl = musb_readb(mbase, MUSB_DEVCTL); + musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR); +} + +/* + * Handle all control requests with no DATA stage, including standard + * requests such as: + * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized + * always delegated to the gadget driver + * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE + * always handled here, except for class/vendor/... features + * + * Context: caller holds controller lock + */ +static int +service_zero_data_request(struct musb *musb, + struct usb_ctrlrequest *ctrlrequest) +__releases(musb->lock) +__acquires(musb->lock) +{ + int handled = -EINVAL; + void __iomem *mbase = musb->mregs; + const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK; + + /* the gadget driver handles everything except what we MUST handle */ + if ((ctrlrequest->bRequestType & USB_TYPE_MASK) + == USB_TYPE_STANDARD) { + switch (ctrlrequest->bRequest) { + case USB_REQ_SET_ADDRESS: + /* change it after the status stage */ + musb->set_address = true; + musb->address = (u8) (ctrlrequest->wValue & 0x7f); + handled = 1; + break; + + case USB_REQ_CLEAR_FEATURE: + switch (recip) { + case USB_RECIP_DEVICE: + if (ctrlrequest->wValue + != USB_DEVICE_REMOTE_WAKEUP) + break; + musb->may_wakeup = 0; + handled = 1; + break; + case USB_RECIP_INTERFACE: + break; + case USB_RECIP_ENDPOINT:{ + const u8 num = ctrlrequest->wIndex & 0x0f; + struct musb_ep *musb_ep; + + if (num == 0 + || num >= MUSB_C_NUM_EPS + || ctrlrequest->wValue + != USB_ENDPOINT_HALT) + break; + + if (ctrlrequest->wIndex & USB_DIR_IN) + musb_ep = &musb->endpoints[num].ep_in; + else + musb_ep = &musb->endpoints[num].ep_out; + if (!musb_ep->desc) + break; + + /* REVISIT do it directly, no locking games */ + spin_unlock(&musb->lock); + musb_gadget_set_halt(&musb_ep->end_point, 0); + spin_lock(&musb->lock); + + /* select ep0 again */ + musb_ep_select(mbase, 0); + handled = 1; + } break; + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + break; + + case USB_REQ_SET_FEATURE: + switch (recip) { + case USB_RECIP_DEVICE: + handled = 1; + switch (ctrlrequest->wValue) { + case USB_DEVICE_REMOTE_WAKEUP: + musb->may_wakeup = 1; + break; + case USB_DEVICE_TEST_MODE: + if (musb->g.speed != USB_SPEED_HIGH) + goto stall; + if (ctrlrequest->wIndex & 0xff) + goto stall; + + switch (ctrlrequest->wIndex >> 8) { + case 1: + pr_debug("TEST_J\n"); + /* TEST_J */ + musb->test_mode_nr = + MUSB_TEST_J; + break; + case 2: + /* TEST_K */ + pr_debug("TEST_K\n"); + musb->test_mode_nr = + MUSB_TEST_K; + break; + case 3: + /* TEST_SE0_NAK */ + pr_debug("TEST_SE0_NAK\n"); + musb->test_mode_nr = + MUSB_TEST_SE0_NAK; + break; + case 4: + /* TEST_PACKET */ + pr_debug("TEST_PACKET\n"); + musb->test_mode_nr = + MUSB_TEST_PACKET; + break; + default: + goto stall; + } + + /* enter test mode after irq */ + if (handled > 0) + musb->test_mode = true; + break; +#ifdef CONFIG_USB_MUSB_OTG + case USB_DEVICE_B_HNP_ENABLE: + if (!musb->g.is_otg) + goto stall; + musb->g.b_hnp_enable = 1; + musb_try_b_hnp_enable(musb); + break; + case USB_DEVICE_A_HNP_SUPPORT: + if (!musb->g.is_otg) + goto stall; + musb->g.a_hnp_support = 1; + break; + case USB_DEVICE_A_ALT_HNP_SUPPORT: + if (!musb->g.is_otg) + goto stall; + musb->g.a_alt_hnp_support = 1; + break; +#endif +stall: + default: + handled = -EINVAL; + break; + } + break; + + case USB_RECIP_INTERFACE: + break; + + case USB_RECIP_ENDPOINT:{ + const u8 epnum = + ctrlrequest->wIndex & 0x0f; + struct musb_ep *musb_ep; + struct musb_hw_ep *ep; + void __iomem *regs; + int is_in; + u16 csr; + + if (epnum == 0 + || epnum >= MUSB_C_NUM_EPS + || ctrlrequest->wValue + != USB_ENDPOINT_HALT) + break; + + ep = musb->endpoints + epnum; + regs = ep->regs; + is_in = ctrlrequest->wIndex & USB_DIR_IN; + if (is_in) + musb_ep = &ep->ep_in; + else + musb_ep = &ep->ep_out; + if (!musb_ep->desc) + break; + + musb_ep_select(mbase, epnum); + if (is_in) { + csr = musb_readw(regs, + MUSB_TXCSR); + if (csr & MUSB_TXCSR_FIFONOTEMPTY) + csr |= MUSB_TXCSR_FLUSHFIFO; + csr |= MUSB_TXCSR_P_SENDSTALL + | MUSB_TXCSR_CLRDATATOG + | MUSB_TXCSR_P_WZC_BITS; + musb_writew(regs, MUSB_TXCSR, + csr); + } else { + csr = musb_readw(regs, + MUSB_RXCSR); + csr |= MUSB_RXCSR_P_SENDSTALL + | MUSB_RXCSR_FLUSHFIFO + | MUSB_RXCSR_CLRDATATOG + | MUSB_TXCSR_P_WZC_BITS; + musb_writew(regs, MUSB_RXCSR, + csr); + } + + /* select ep0 again */ + musb_ep_select(mbase, 0); + handled = 1; + } break; + + default: + /* class, vendor, etc ... delegate */ + handled = 0; + break; + } + break; + default: + /* delegate SET_CONFIGURATION, etc */ + handled = 0; + } + } else + handled = 0; + return handled; +} + +/* we have an ep0out data packet + * Context: caller holds controller lock + */ +static void ep0_rxstate(struct musb *musb) +{ + void __iomem *regs = musb->control_ep->regs; + struct usb_request *req; + u16 tmp; + + req = next_ep0_request(musb); + + /* read packet and ack; or stall because of gadget driver bug: + * should have provided the rx buffer before setup() returned. + */ + if (req) { + void *buf = req->buf + req->actual; + unsigned len = req->length - req->actual; + + /* read the buffer */ + tmp = musb_readb(regs, MUSB_COUNT0); + if (tmp > len) { + req->status = -EOVERFLOW; + tmp = len; + } + musb_read_fifo(&musb->endpoints[0], tmp, buf); + req->actual += tmp; + tmp = MUSB_CSR0_P_SVDRXPKTRDY; + if (tmp < 64 || req->actual == req->length) { + musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; + tmp |= MUSB_CSR0_P_DATAEND; + } else + req = NULL; + } else + tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL; + + + /* Completion handler may choose to stall, e.g. because the + * message just received holds invalid data. + */ + if (req) { + musb->ackpend = tmp; + musb_g_ep0_giveback(musb, req); + if (!musb->ackpend) + return; + musb->ackpend = 0; + } + musb_writew(regs, MUSB_CSR0, tmp); +} + +/* + * transmitting to the host (IN), this code might be called from IRQ + * and from kernel thread. + * + * Context: caller holds controller lock + */ +static void ep0_txstate(struct musb *musb) +{ + void __iomem *regs = musb->control_ep->regs; + struct usb_request *request = next_ep0_request(musb); + u16 csr = MUSB_CSR0_TXPKTRDY; + u8 *fifo_src; + u8 fifo_count; + + if (!request) { + /* WARN_ON(1); */ + DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0)); + return; + } + + /* load the data */ + fifo_src = (u8 *) request->buf + request->actual; + fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE, + request->length - request->actual); + musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src); + request->actual += fifo_count; + + /* update the flags */ + if (fifo_count < MUSB_MAX_END0_PACKET + || request->actual == request->length) { + musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; + csr |= MUSB_CSR0_P_DATAEND; + } else + request = NULL; + + /* report completions as soon as the fifo's loaded; there's no + * win in waiting till this last packet gets acked. (other than + * very precise fault reporting, needed by USB TMC; possible with + * this hardware, but not usable from portable gadget drivers.) + */ + if (request) { + musb->ackpend = csr; + musb_g_ep0_giveback(musb, request); + if (!musb->ackpend) + return; + musb->ackpend = 0; + } + + /* send it out, triggering a "txpktrdy cleared" irq */ + musb_writew(regs, MUSB_CSR0, csr); +} + +/* + * Read a SETUP packet (struct usb_ctrlrequest) from the hardware. + * Fields are left in USB byte-order. + * + * Context: caller holds controller lock. + */ +static void +musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req) +{ + struct usb_request *r; + void __iomem *regs = musb->control_ep->regs; + + musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req); + + /* NOTE: earlier 2.6 versions changed setup packets to host + * order, but now USB packets always stay in USB byte order. + */ + DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n", + req->bRequestType, + req->bRequest, + le16_to_cpu(req->wValue), + le16_to_cpu(req->wIndex), + le16_to_cpu(req->wLength)); + + /* clean up any leftover transfers */ + r = next_ep0_request(musb); + if (r) + musb_g_ep0_giveback(musb, r); + + /* For zero-data requests we want to delay the STATUS stage to + * avoid SETUPEND errors. If we read data (OUT), delay accepting + * packets until there's a buffer to store them in. + * + * If we write data, the controller acts happier if we enable + * the TX FIFO right away, and give the controller a moment + * to switch modes... + */ + musb->set_address = false; + musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY; + if (req->wLength == 0) { + if (req->bRequestType & USB_DIR_IN) + musb->ackpend |= MUSB_CSR0_TXPKTRDY; + musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT; + } else if (req->bRequestType & USB_DIR_IN) { + musb->ep0_state = MUSB_EP0_STAGE_TX; + musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY); + while ((musb_readw(regs, MUSB_CSR0) + & MUSB_CSR0_RXPKTRDY) != 0) + cpu_relax(); + musb->ackpend = 0; + } else + musb->ep0_state = MUSB_EP0_STAGE_RX; +} + +static int +forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) +__releases(musb->lock) +__acquires(musb->lock) +{ + int retval; + if (!musb->gadget_driver) + return -EOPNOTSUPP; + spin_unlock(&musb->lock); + retval = musb->gadget_driver->setup(&musb->g, ctrlrequest); + spin_lock(&musb->lock); + return retval; +} + +/* + * Handle peripheral ep0 interrupt + * + * Context: irq handler; we won't re-enter the driver that way. + */ +irqreturn_t musb_g_ep0_irq(struct musb *musb) +{ + u16 csr; + u16 len; + void __iomem *mbase = musb->mregs; + void __iomem *regs = musb->endpoints[0].regs; + irqreturn_t retval = IRQ_NONE; + + musb_ep_select(mbase, 0); /* select ep0 */ + csr = musb_readw(regs, MUSB_CSR0); + len = musb_readb(regs, MUSB_COUNT0); + + DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n", + csr, len, + musb_readb(mbase, MUSB_FADDR), + decode_ep0stage(musb->ep0_state)); + + /* I sent a stall.. need to acknowledge it now.. */ + if (csr & MUSB_CSR0_P_SENTSTALL) { + musb_writew(regs, MUSB_CSR0, + csr & ~MUSB_CSR0_P_SENTSTALL); + retval = IRQ_HANDLED; + musb->ep0_state = MUSB_EP0_STAGE_SETUP; + csr = musb_readw(regs, MUSB_CSR0); + } + + /* request ended "early" */ + if (csr & MUSB_CSR0_P_SETUPEND) { + musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND); + retval = IRQ_HANDLED; + musb->ep0_state = MUSB_EP0_STAGE_SETUP; + csr = musb_readw(regs, MUSB_CSR0); + /* NOTE: request may need completion */ + } + + /* docs from Mentor only describe tx, rx, and idle/setup states. + * we need to handle nuances around status stages, and also the + * case where status and setup stages come back-to-back ... + */ + switch (musb->ep0_state) { + + case MUSB_EP0_STAGE_TX: + /* irq on clearing txpktrdy */ + if ((csr & MUSB_CSR0_TXPKTRDY) == 0) { + ep0_txstate(musb); + retval = IRQ_HANDLED; + } + break; + + case MUSB_EP0_STAGE_RX: + /* irq on set rxpktrdy */ + if (csr & MUSB_CSR0_RXPKTRDY) { + ep0_rxstate(musb); + retval = IRQ_HANDLED; + } + break; + + case MUSB_EP0_STAGE_STATUSIN: + /* end of sequence #2 (OUT/RX state) or #3 (no data) */ + + /* update address (if needed) only @ the end of the + * status phase per usb spec, which also guarantees + * we get 10 msec to receive this irq... until this + * is done we won't see the next packet. + */ + if (musb->set_address) { + musb->set_address = false; + musb_writeb(mbase, MUSB_FADDR, musb->address); + } + + /* enter test mode if needed (exit by reset) */ + else if (musb->test_mode) { + DBG(1, "entering TESTMODE\n"); + + if (MUSB_TEST_PACKET == musb->test_mode_nr) + musb_load_testpacket(musb); + + musb_writeb(mbase, MUSB_TESTMODE, + musb->test_mode_nr); + } + /* FALLTHROUGH */ + + case MUSB_EP0_STAGE_STATUSOUT: + /* end of sequence #1: write to host (TX state) */ + { + struct usb_request *req; + + req = next_ep0_request(musb); + if (req) + musb_g_ep0_giveback(musb, req); + } + retval = IRQ_HANDLED; + musb->ep0_state = MUSB_EP0_STAGE_SETUP; + /* FALLTHROUGH */ + + case MUSB_EP0_STAGE_SETUP: + if (csr & MUSB_CSR0_RXPKTRDY) { + struct usb_ctrlrequest setup; + int handled = 0; + + if (len != 8) { + ERR("SETUP packet len %d != 8 ?\n", len); + break; + } + musb_read_setup(musb, &setup); + retval = IRQ_HANDLED; + + /* sometimes the RESET won't be reported */ + if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) { + u8 power; + + printk(KERN_NOTICE "%s: peripheral reset " + "irq lost!\n", + musb_driver_name); + power = musb_readb(mbase, MUSB_POWER); + musb->g.speed = (power & MUSB_POWER_HSMODE) + ? USB_SPEED_HIGH : USB_SPEED_FULL; + + } + + switch (musb->ep0_state) { + + /* sequence #3 (no data stage), includes requests + * we can't forward (notably SET_ADDRESS and the + * device/endpoint feature set/clear operations) + * plus SET_CONFIGURATION and others we must + */ + case MUSB_EP0_STAGE_ACKWAIT: + handled = service_zero_data_request( + musb, &setup); + + /* status stage might be immediate */ + if (handled > 0) { + musb->ackpend |= MUSB_CSR0_P_DATAEND; + musb->ep0_state = + MUSB_EP0_STAGE_STATUSIN; + } + break; + + /* sequence #1 (IN to host), includes GET_STATUS + * requests that we can't forward, GET_DESCRIPTOR + * and others that we must + */ + case MUSB_EP0_STAGE_TX: + handled = service_in_request(musb, &setup); + if (handled > 0) { + musb->ackpend = MUSB_CSR0_TXPKTRDY + | MUSB_CSR0_P_DATAEND; + musb->ep0_state = + MUSB_EP0_STAGE_STATUSOUT; + } + break; + + /* sequence #2 (OUT from host), always forward */ + default: /* MUSB_EP0_STAGE_RX */ + break; + } + + DBG(3, "handled %d, csr %04x, ep0stage %s\n", + handled, csr, + decode_ep0stage(musb->ep0_state)); + + /* unless we need to delegate this to the gadget + * driver, we know how to wrap this up: csr0 has + * not yet been written. + */ + if (handled < 0) + goto stall; + else if (handled > 0) + goto finish; + + handled = forward_to_driver(musb, &setup); + if (handled < 0) { + musb_ep_select(mbase, 0); +stall: + DBG(3, "stall (%d)\n", handled); + musb->ackpend |= MUSB_CSR0_P_SENDSTALL; + musb->ep0_state = MUSB_EP0_STAGE_SETUP; +finish: + musb_writew(regs, MUSB_CSR0, + musb->ackpend); + musb->ackpend = 0; + } + } + break; + + case MUSB_EP0_STAGE_ACKWAIT: + /* This should not happen. But happens with tusb6010 with + * g_file_storage and high speed. Do nothing. + */ + retval = IRQ_HANDLED; + break; + + default: + /* "can't happen" */ + WARN_ON(1); + musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL); + musb->ep0_state = MUSB_EP0_STAGE_SETUP; + break; + } + + return retval; +} + + +static int +musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) +{ + /* always enabled */ + return -EINVAL; +} + +static int musb_g_ep0_disable(struct usb_ep *e) +{ + /* always enabled */ + return -EINVAL; +} + +static int +musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags) +{ + struct musb_ep *ep; + struct musb_request *req; + struct musb *musb; + int status; + unsigned long lockflags; + void __iomem *regs; + + if (!e || !r) + return -EINVAL; + + ep = to_musb_ep(e); + musb = ep->musb; + regs = musb->control_ep->regs; + + req = to_musb_request(r); + req->musb = musb; + req->request.actual = 0; + req->request.status = -EINPROGRESS; + req->tx = ep->is_in; + + spin_lock_irqsave(&musb->lock, lockflags); + + if (!list_empty(&ep->req_list)) { + status = -EBUSY; + goto cleanup; + } + + switch (musb->ep0_state) { + case MUSB_EP0_STAGE_RX: /* control-OUT data */ + case MUSB_EP0_STAGE_TX: /* control-IN data */ + case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */ + status = 0; + break; + default: + DBG(1, "ep0 request queued in state %d\n", + musb->ep0_state); + status = -EINVAL; + goto cleanup; + } + + /* add request to the list */ + list_add_tail(&(req->request.list), &(ep->req_list)); + + DBG(3, "queue to %s (%s), length=%d\n", + ep->name, ep->is_in ? "IN/TX" : "OUT/RX", + req->request.length); + + musb_ep_select(musb->mregs, 0); + + /* sequence #1, IN ... start writing the data */ + if (musb->ep0_state == MUSB_EP0_STAGE_TX) + ep0_txstate(musb); + + /* sequence #3, no-data ... issue IN status */ + else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) { + if (req->request.length) + status = -EINVAL; + else { + musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; + musb_writew(regs, MUSB_CSR0, + musb->ackpend | MUSB_CSR0_P_DATAEND); + musb->ackpend = 0; + musb_g_ep0_giveback(ep->musb, r); + } + + /* else for sequence #2 (OUT), caller provides a buffer + * before the next packet arrives. deferred responses + * (after SETUP is acked) are racey. + */ + } else if (musb->ackpend) { + musb_writew(regs, MUSB_CSR0, musb->ackpend); + musb->ackpend = 0; + } + +cleanup: + spin_unlock_irqrestore(&musb->lock, lockflags); + return status; +} + +static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req) +{ + /* we just won't support this */ + return -EINVAL; +} + +static int musb_g_ep0_halt(struct usb_ep *e, int value) +{ + struct musb_ep *ep; + struct musb *musb; + void __iomem *base, *regs; + unsigned long flags; + int status; + u16 csr; + + if (!e || !value) + return -EINVAL; + + ep = to_musb_ep(e); + musb = ep->musb; + base = musb->mregs; + regs = musb->control_ep->regs; + status = 0; + + spin_lock_irqsave(&musb->lock, flags); + + if (!list_empty(&ep->req_list)) { + status = -EBUSY; + goto cleanup; + } + + musb_ep_select(base, 0); + csr = musb->ackpend; + + switch (musb->ep0_state) { + + /* Stalls are usually issued after parsing SETUP packet, either + * directly in irq context from setup() or else later. + */ + case MUSB_EP0_STAGE_TX: /* control-IN data */ + case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */ + case MUSB_EP0_STAGE_RX: /* control-OUT data */ + csr = musb_readw(regs, MUSB_CSR0); + /* FALLTHROUGH */ + + /* It's also OK to issue stalls during callbacks when a non-empty + * DATA stage buffer has been read (or even written). + */ + case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */ + case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */ + + csr |= MUSB_CSR0_P_SENDSTALL; + musb_writew(regs, MUSB_CSR0, csr); + musb->ep0_state = MUSB_EP0_STAGE_SETUP; + musb->ackpend = 0; + break; + default: + DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state); + status = -EINVAL; + } + +cleanup: + spin_unlock_irqrestore(&musb->lock, flags); + return status; +} + +const struct usb_ep_ops musb_g_ep0_ops = { + .enable = musb_g_ep0_enable, + .disable = musb_g_ep0_disable, + .alloc_request = musb_alloc_request, + .free_request = musb_free_request, + .queue = musb_g_ep0_queue, + .dequeue = musb_g_ep0_dequeue, + .set_halt = musb_g_ep0_halt, +}; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c new file mode 100644 index 00000000000..8b4be012669 --- /dev/null +++ b/drivers/usb/musb/musb_host.c @@ -0,0 +1,2170 @@ +/* + * MUSB OTG driver host support + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "musb_core.h" +#include "musb_host.h" + + +/* MUSB HOST status 22-mar-2006 + * + * - There's still lots of partial code duplication for fault paths, so + * they aren't handled as consistently as they need to be. + * + * - PIO mostly behaved when last tested. + * + including ep0, with all usbtest cases 9, 10 + * + usbtest 14 (ep0out) doesn't seem to run at all + * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest + * configurations, but otherwise double buffering passes basic tests. + * + for 2.6.N, for N > ~10, needs API changes for hcd framework. + * + * - DMA (CPPI) ... partially behaves, not currently recommended + * + about 1/15 the speed of typical EHCI implementations (PCI) + * + RX, all too often reqpkt seems to misbehave after tx + * + TX, no known issues (other than evident silicon issue) + * + * - DMA (Mentor/OMAP) ...has at least toggle update problems + * + * - Still no traffic scheduling code to make NAKing for bulk or control + * transfers unable to starve other requests; or to make efficient use + * of hardware with periodic transfers. (Note that network drivers + * commonly post bulk reads that stay pending for a long time; these + * would make very visible trouble.) + * + * - Not tested with HNP, but some SRP paths seem to behave. + * + * NOTE 24-August-2006: + * + * - Bulk traffic finally uses both sides of hardware ep1, freeing up an + * extra endpoint for periodic use enabling hub + keybd + mouse. That + * mostly works, except that with "usbnet" it's easy to trigger cases + * with "ping" where RX loses. (a) ping to davinci, even "ping -f", + * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses + * although ARP RX wins. (That test was done with a full speed link.) + */ + + +/* + * NOTE on endpoint usage: + * + * CONTROL transfers all go through ep0. BULK ones go through dedicated IN + * and OUT endpoints ... hardware is dedicated for those "async" queue(s). + * + * (Yes, bulk _could_ use more of the endpoints than that, and would even + * benefit from it ... one remote device may easily be NAKing while others + * need to perform transfers in that same direction. The same thing could + * be done in software though, assuming dma cooperates.) + * + * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. + * So far that scheduling is both dumb and optimistic: the endpoint will be + * "claimed" until its software queue is no longer refilled. No multiplexing + * of transfers between endpoints, or anything clever. + */ + + +static void musb_ep_program(struct musb *musb, u8 epnum, + struct urb *urb, unsigned int nOut, + u8 *buf, u32 len); + +/* + * Clear TX fifo. Needed to avoid BABBLE errors. + */ +static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) +{ + void __iomem *epio = ep->regs; + u16 csr; + int retries = 1000; + + csr = musb_readw(epio, MUSB_TXCSR); + while (csr & MUSB_TXCSR_FIFONOTEMPTY) { + DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr); + csr |= MUSB_TXCSR_FLUSHFIFO; + musb_writew(epio, MUSB_TXCSR, csr); + csr = musb_readw(epio, MUSB_TXCSR); + if (retries-- < 1) { + ERR("Could not flush host TX fifo: csr: %04x\n", csr); + return; + } + mdelay(1); + } +} + +/* + * Start transmit. Caller is responsible for locking shared resources. + * musb must be locked. + */ +static inline void musb_h_tx_start(struct musb_hw_ep *ep) +{ + u16 txcsr; + + /* NOTE: no locks here; caller should lock and select EP */ + if (ep->epnum) { + txcsr = musb_readw(ep->regs, MUSB_TXCSR); + txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; + musb_writew(ep->regs, MUSB_TXCSR, txcsr); + } else { + txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; + musb_writew(ep->regs, MUSB_CSR0, txcsr); + } + +} + +static inline void cppi_host_txdma_start(struct musb_hw_ep *ep) +{ + u16 txcsr; + + /* NOTE: no locks here; caller should lock and select EP */ + txcsr = musb_readw(ep->regs, MUSB_TXCSR); + txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; + musb_writew(ep->regs, MUSB_TXCSR, txcsr); +} + +/* + * Start the URB at the front of an endpoint's queue + * end must be claimed from the caller. + * + * Context: controller locked, irqs blocked + */ +static void +musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) +{ + u16 frame; + u32 len; + void *buf; + void __iomem *mbase = musb->mregs; + struct urb *urb = next_urb(qh); + struct musb_hw_ep *hw_ep = qh->hw_ep; + unsigned pipe = urb->pipe; + u8 address = usb_pipedevice(pipe); + int epnum = hw_ep->epnum; + + /* initialize software qh state */ + qh->offset = 0; + qh->segsize = 0; + + /* gather right source of data */ + switch (qh->type) { + case USB_ENDPOINT_XFER_CONTROL: + /* control transfers always start with SETUP */ + is_in = 0; + hw_ep->out_qh = qh; + musb->ep0_stage = MUSB_EP0_START; + buf = urb->setup_packet; + len = 8; + break; + case USB_ENDPOINT_XFER_ISOC: + qh->iso_idx = 0; + qh->frame = 0; + buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset; + len = urb->iso_frame_desc[0].length; + break; + default: /* bulk, interrupt */ + buf = urb->transfer_buffer; + len = urb->transfer_buffer_length; + } + + DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", + qh, urb, address, qh->epnum, + is_in ? "in" : "out", + ({char *s; switch (qh->type) { + case USB_ENDPOINT_XFER_CONTROL: s = ""; break; + case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; + case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; + default: s = "-intr"; break; + }; s; }), + epnum, buf, len); + + /* Configure endpoint */ + if (is_in || hw_ep->is_shared_fifo) + hw_ep->in_qh = qh; + else + hw_ep->out_qh = qh; + musb_ep_program(musb, epnum, urb, !is_in, buf, len); + + /* transmit may have more work: start it when it is time */ + if (is_in) + return; + + /* determine if the time is right for a periodic transfer */ + switch (qh->type) { + case USB_ENDPOINT_XFER_ISOC: + case USB_ENDPOINT_XFER_INT: + DBG(3, "check whether there's still time for periodic Tx\n"); + qh->iso_idx = 0; + frame = musb_readw(mbase, MUSB_FRAME); + /* FIXME this doesn't implement that scheduling policy ... + * or handle framecounter wrapping + */ + if ((urb->transfer_flags & URB_ISO_ASAP) + || (frame >= urb->start_frame)) { + /* REVISIT the SOF irq handler shouldn't duplicate + * this code; and we don't init urb->start_frame... + */ + qh->frame = 0; + goto start; + } else { + qh->frame = urb->start_frame; + /* enable SOF interrupt so we can count down */ + DBG(1, "SOF for %d\n", epnum); +#if 1 /* ifndef CONFIG_ARCH_DAVINCI */ + musb_writeb(mbase, MUSB_INTRUSBE, 0xff); +#endif + } + break; + default: +start: + DBG(4, "Start TX%d %s\n", epnum, + hw_ep->tx_channel ? "dma" : "pio"); + + if (!hw_ep->tx_channel) + musb_h_tx_start(hw_ep); + else if (is_cppi_enabled() || tusb_dma_omap()) + cppi_host_txdma_start(hw_ep); + } +} + +/* caller owns controller lock, irqs are blocked */ +static void +__musb_giveback(struct musb *musb, struct urb *urb, int status) +__releases(musb->lock) +__acquires(musb->lock) +{ + DBG(({ int level; switch (urb->status) { + case 0: + level = 4; + break; + /* common/boring faults */ + case -EREMOTEIO: + case -ESHUTDOWN: + case -ECONNRESET: + case -EPIPE: + level = 3; + break; + default: + level = 2; + break; + }; level; }), + "complete %p (%d), dev%d ep%d%s, %d/%d\n", + urb, urb->status, + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out", + urb->actual_length, urb->transfer_buffer_length + ); + + spin_unlock(&musb->lock); + usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); + spin_lock(&musb->lock); +} + +/* for bulk/interrupt endpoints only */ +static inline void +musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb) +{ + struct usb_device *udev = urb->dev; + u16 csr; + void __iomem *epio = ep->regs; + struct musb_qh *qh; + + /* FIXME: the current Mentor DMA code seems to have + * problems getting toggle correct. + */ + + if (is_in || ep->is_shared_fifo) + qh = ep->in_qh; + else + qh = ep->out_qh; + + if (!is_in) { + csr = musb_readw(epio, MUSB_TXCSR); + usb_settoggle(udev, qh->epnum, 1, + (csr & MUSB_TXCSR_H_DATATOGGLE) + ? 1 : 0); + } else { + csr = musb_readw(epio, MUSB_RXCSR); + usb_settoggle(udev, qh->epnum, 0, + (csr & MUSB_RXCSR_H_DATATOGGLE) + ? 1 : 0); + } +} + +/* caller owns controller lock, irqs are blocked */ +static struct musb_qh * +musb_giveback(struct musb_qh *qh, struct urb *urb, int status) +{ + int is_in; + struct musb_hw_ep *ep = qh->hw_ep; + struct musb *musb = ep->musb; + int ready = qh->is_ready; + + if (ep->is_shared_fifo) + is_in = 1; + else + is_in = usb_pipein(urb->pipe); + + /* save toggle eagerly, for paranoia */ + switch (qh->type) { + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + musb_save_toggle(ep, is_in, urb); + break; + case USB_ENDPOINT_XFER_ISOC: + if (status == 0 && urb->error_count) + status = -EXDEV; + break; + } + + usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); + + qh->is_ready = 0; + __musb_giveback(musb, urb, status); + qh->is_ready = ready; + + /* reclaim resources (and bandwidth) ASAP; deschedule it, and + * invalidate qh as soon as list_empty(&hep->urb_list) + */ + if (list_empty(&qh->hep->urb_list)) { + struct list_head *head; + + if (is_in) + ep->rx_reinit = 1; + else + ep->tx_reinit = 1; + + /* clobber old pointers to this qh */ + if (is_in || ep->is_shared_fifo) + ep->in_qh = NULL; + else + ep->out_qh = NULL; + qh->hep->hcpriv = NULL; + + switch (qh->type) { + + case USB_ENDPOINT_XFER_ISOC: + case USB_ENDPOINT_XFER_INT: + /* this is where periodic bandwidth should be + * de-allocated if it's tracked and allocated; + * and where we'd update the schedule tree... + */ + musb->periodic[ep->epnum] = NULL; + kfree(qh); + qh = NULL; + break; + + case USB_ENDPOINT_XFER_CONTROL: + case USB_ENDPOINT_XFER_BULK: + /* fifo policy for these lists, except that NAKing + * should rotate a qh to the end (for fairness). + */ + head = qh->ring.prev; + list_del(&qh->ring); + kfree(qh); + qh = first_qh(head); + break; + } + } + return qh; +} + +/* + * Advance this hardware endpoint's queue, completing the specified urb and + * advancing to either the next urb queued to that qh, or else invalidating + * that qh and advancing to the next qh scheduled after the current one. + * + * Context: caller owns controller lock, irqs are blocked + */ +static void +musb_advance_schedule(struct musb *musb, struct urb *urb, + struct musb_hw_ep *hw_ep, int is_in) +{ + struct musb_qh *qh; + + if (is_in || hw_ep->is_shared_fifo) + qh = hw_ep->in_qh; + else + qh = hw_ep->out_qh; + + if (urb->status == -EINPROGRESS) + qh = musb_giveback(qh, urb, 0); + else + qh = musb_giveback(qh, urb, urb->status); + + if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) { + DBG(4, "... next ep%d %cX urb %p\n", + hw_ep->epnum, is_in ? 'R' : 'T', + next_urb(qh)); + musb_start_urb(musb, is_in, qh); + } +} + +static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) +{ + /* we don't want fifo to fill itself again; + * ignore dma (various models), + * leave toggle alone (may not have been saved yet) + */ + csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; + csr &= ~(MUSB_RXCSR_H_REQPKT + | MUSB_RXCSR_H_AUTOREQ + | MUSB_RXCSR_AUTOCLEAR); + + /* write 2x to allow double buffering */ + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + + /* flush writebuffer */ + return musb_readw(hw_ep->regs, MUSB_RXCSR); +} + +/* + * PIO RX for a packet (or part of it). + */ +static bool +musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) +{ + u16 rx_count; + u8 *buf; + u16 csr; + bool done = false; + u32 length; + int do_flush = 0; + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh = hw_ep->in_qh; + int pipe = urb->pipe; + void *buffer = urb->transfer_buffer; + + /* musb_ep_select(mbase, epnum); */ + rx_count = musb_readw(epio, MUSB_RXCOUNT); + DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, + urb->transfer_buffer, qh->offset, + urb->transfer_buffer_length); + + /* unload FIFO */ + if (usb_pipeisoc(pipe)) { + int status = 0; + struct usb_iso_packet_descriptor *d; + + if (iso_err) { + status = -EILSEQ; + urb->error_count++; + } + + d = urb->iso_frame_desc + qh->iso_idx; + buf = buffer + d->offset; + length = d->length; + if (rx_count > length) { + if (status == 0) { + status = -EOVERFLOW; + urb->error_count++; + } + DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); + do_flush = 1; + } else + length = rx_count; + urb->actual_length += length; + d->actual_length = length; + + d->status = status; + + /* see if we are done */ + done = (++qh->iso_idx >= urb->number_of_packets); + } else { + /* non-isoch */ + buf = buffer + qh->offset; + length = urb->transfer_buffer_length - qh->offset; + if (rx_count > length) { + if (urb->status == -EINPROGRESS) + urb->status = -EOVERFLOW; + DBG(2, "** OVERFLOW %d into %d\n", rx_count, length); + do_flush = 1; + } else + length = rx_count; + urb->actual_length += length; + qh->offset += length; + + /* see if we are done */ + done = (urb->actual_length == urb->transfer_buffer_length) + || (rx_count < qh->maxpacket) + || (urb->status != -EINPROGRESS); + if (done + && (urb->status == -EINPROGRESS) + && (urb->transfer_flags & URB_SHORT_NOT_OK) + && (urb->actual_length + < urb->transfer_buffer_length)) + urb->status = -EREMOTEIO; + } + + musb_read_fifo(hw_ep, length, buf); + + csr = musb_readw(epio, MUSB_RXCSR); + csr |= MUSB_RXCSR_H_WZC_BITS; + if (unlikely(do_flush)) + musb_h_flush_rxfifo(hw_ep, csr); + else { + /* REVISIT this assumes AUTOCLEAR is never set */ + csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); + if (!done) + csr |= MUSB_RXCSR_H_REQPKT; + musb_writew(epio, MUSB_RXCSR, csr); + } + + return done; +} + +/* we don't always need to reinit a given side of an endpoint... + * when we do, use tx/rx reinit routine and then construct a new CSR + * to address data toggle, NYET, and DMA or PIO. + * + * it's possible that driver bugs (especially for DMA) or aborting a + * transfer might have left the endpoint busier than it should be. + * the busy/not-empty tests are basically paranoia. + */ +static void +musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) +{ + u16 csr; + + /* NOTE: we know the "rx" fifo reinit never triggers for ep0. + * That always uses tx_reinit since ep0 repurposes TX register + * offsets; the initial SETUP packet is also a kind of OUT. + */ + + /* if programmed for Tx, put it in RX mode */ + if (ep->is_shared_fifo) { + csr = musb_readw(ep->regs, MUSB_TXCSR); + if (csr & MUSB_TXCSR_MODE) { + musb_h_tx_flush_fifo(ep); + musb_writew(ep->regs, MUSB_TXCSR, + MUSB_TXCSR_FRCDATATOG); + } + /* clear mode (and everything else) to enable Rx */ + musb_writew(ep->regs, MUSB_TXCSR, 0); + + /* scrub all previous state, clearing toggle */ + } else { + csr = musb_readw(ep->regs, MUSB_RXCSR); + if (csr & MUSB_RXCSR_RXPKTRDY) + WARNING("rx%d, packet/%d ready?\n", ep->epnum, + musb_readw(ep->regs, MUSB_RXCOUNT)); + + musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); + } + + /* target addr and (for multipoint) hub addr/port */ + if (musb->is_multipoint) { + musb_writeb(ep->target_regs, MUSB_RXFUNCADDR, + qh->addr_reg); + musb_writeb(ep->target_regs, MUSB_RXHUBADDR, + qh->h_addr_reg); + musb_writeb(ep->target_regs, MUSB_RXHUBPORT, + qh->h_port_reg); + } else + musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); + + /* protocol/endpoint, interval/NAKlimit, i/o size */ + musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); + musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); + /* NOTE: bulk combining rewrites high bits of maxpacket */ + musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); + + ep->rx_reinit = 0; +} + + +/* + * Program an HDRC endpoint as per the given URB + * Context: irqs blocked, controller lock held + */ +static void musb_ep_program(struct musb *musb, u8 epnum, + struct urb *urb, unsigned int is_out, + u8 *buf, u32 len) +{ + struct dma_controller *dma_controller; + struct dma_channel *dma_channel; + u8 dma_ok; + void __iomem *mbase = musb->mregs; + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh; + u16 packet_sz; + + if (!is_out || hw_ep->is_shared_fifo) + qh = hw_ep->in_qh; + else + qh = hw_ep->out_qh; + + packet_sz = qh->maxpacket; + + DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " + "h_addr%02x h_port%02x bytes %d\n", + is_out ? "-->" : "<--", + epnum, urb, urb->dev->speed, + qh->addr_reg, qh->epnum, is_out ? "out" : "in", + qh->h_addr_reg, qh->h_port_reg, + len); + + musb_ep_select(mbase, epnum); + + /* candidate for DMA? */ + dma_controller = musb->dma_controller; + if (is_dma_capable() && epnum && dma_controller) { + dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; + if (!dma_channel) { + dma_channel = dma_controller->channel_alloc( + dma_controller, hw_ep, is_out); + if (is_out) + hw_ep->tx_channel = dma_channel; + else + hw_ep->rx_channel = dma_channel; + } + } else + dma_channel = NULL; + + /* make sure we clear DMAEnab, autoSet bits from previous run */ + + /* OUT/transmit/EP0 or IN/receive? */ + if (is_out) { + u16 csr; + u16 int_txe; + u16 load_count; + + csr = musb_readw(epio, MUSB_TXCSR); + + /* disable interrupt in case we flush */ + int_txe = musb_readw(mbase, MUSB_INTRTXE); + musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); + + /* general endpoint setup */ + if (epnum) { + /* ASSERT: TXCSR_DMAENAB was already cleared */ + + /* flush all old state, set default */ + musb_h_tx_flush_fifo(hw_ep); + csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT + | MUSB_TXCSR_DMAMODE + | MUSB_TXCSR_FRCDATATOG + | MUSB_TXCSR_H_RXSTALL + | MUSB_TXCSR_H_ERROR + | MUSB_TXCSR_TXPKTRDY + ); + csr |= MUSB_TXCSR_MODE; + + if (usb_gettoggle(urb->dev, + qh->epnum, 1)) + csr |= MUSB_TXCSR_H_WR_DATATOGGLE + | MUSB_TXCSR_H_DATATOGGLE; + else + csr |= MUSB_TXCSR_CLRDATATOG; + + /* twice in case of double packet buffering */ + musb_writew(epio, MUSB_TXCSR, csr); + /* REVISIT may need to clear FLUSHFIFO ... */ + musb_writew(epio, MUSB_TXCSR, csr); + csr = musb_readw(epio, MUSB_TXCSR); + } else { + /* endpoint 0: just flush */ + musb_writew(epio, MUSB_CSR0, + csr | MUSB_CSR0_FLUSHFIFO); + musb_writew(epio, MUSB_CSR0, + csr | MUSB_CSR0_FLUSHFIFO); + } + + /* target addr and (for multipoint) hub addr/port */ + if (musb->is_multipoint) { + musb_writeb(mbase, + MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR), + qh->addr_reg); + musb_writeb(mbase, + MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR), + qh->h_addr_reg); + musb_writeb(mbase, + MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT), + qh->h_port_reg); +/* FIXME if !epnum, do the same for RX ... */ + } else + musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); + + /* protocol/endpoint/interval/NAKlimit */ + if (epnum) { + musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); + if (can_bulk_split(musb, qh->type)) + musb_writew(epio, MUSB_TXMAXP, + packet_sz + | ((hw_ep->max_packet_sz_tx / + packet_sz) - 1) << 11); + else + musb_writew(epio, MUSB_TXMAXP, + packet_sz); + musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); + } else { + musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); + if (musb->is_multipoint) + musb_writeb(epio, MUSB_TYPE0, + qh->type_reg); + } + + if (can_bulk_split(musb, qh->type)) + load_count = min((u32) hw_ep->max_packet_sz_tx, + len); + else + load_count = min((u32) packet_sz, len); + +#ifdef CONFIG_USB_INVENTRA_DMA + if (dma_channel) { + + /* clear previous state */ + csr = musb_readw(epio, MUSB_TXCSR); + csr &= ~(MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAMODE + | MUSB_TXCSR_DMAENAB); + csr |= MUSB_TXCSR_MODE; + musb_writew(epio, MUSB_TXCSR, + csr | MUSB_TXCSR_MODE); + + qh->segsize = min(len, dma_channel->max_len); + + if (qh->segsize <= packet_sz) + dma_channel->desired_mode = 0; + else + dma_channel->desired_mode = 1; + + + if (dma_channel->desired_mode == 0) { + csr &= ~(MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAMODE); + csr |= (MUSB_TXCSR_DMAENAB); + /* against programming guide */ + } else + csr |= (MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_DMAMODE); + + musb_writew(epio, MUSB_TXCSR, csr); + + dma_ok = dma_controller->channel_program( + dma_channel, packet_sz, + dma_channel->desired_mode, + urb->transfer_dma, + qh->segsize); + if (dma_ok) { + load_count = 0; + } else { + dma_controller->channel_release(dma_channel); + if (is_out) + hw_ep->tx_channel = NULL; + else + hw_ep->rx_channel = NULL; + dma_channel = NULL; + } + } +#endif + + /* candidate for DMA */ + if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { + + /* program endpoint CSRs first, then setup DMA. + * assume CPPI setup succeeds. + * defer enabling dma. + */ + csr = musb_readw(epio, MUSB_TXCSR); + csr &= ~(MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAMODE + | MUSB_TXCSR_DMAENAB); + csr |= MUSB_TXCSR_MODE; + musb_writew(epio, MUSB_TXCSR, + csr | MUSB_TXCSR_MODE); + + dma_channel->actual_len = 0L; + qh->segsize = len; + + /* TX uses "rndis" mode automatically, but needs help + * to identify the zero-length-final-packet case. + */ + dma_ok = dma_controller->channel_program( + dma_channel, packet_sz, + (urb->transfer_flags + & URB_ZERO_PACKET) + == URB_ZERO_PACKET, + urb->transfer_dma, + qh->segsize); + if (dma_ok) { + load_count = 0; + } else { + dma_controller->channel_release(dma_channel); + hw_ep->tx_channel = NULL; + dma_channel = NULL; + + /* REVISIT there's an error path here that + * needs handling: can't do dma, but + * there's no pio buffer address... + */ + } + } + + if (load_count) { + /* ASSERT: TXCSR_DMAENAB was already cleared */ + + /* PIO to load FIFO */ + qh->segsize = load_count; + musb_write_fifo(hw_ep, load_count, buf); + csr = musb_readw(epio, MUSB_TXCSR); + csr &= ~(MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_DMAMODE + | MUSB_TXCSR_AUTOSET); + /* write CSR */ + csr |= MUSB_TXCSR_MODE; + + if (epnum) + musb_writew(epio, MUSB_TXCSR, csr); + } + + /* re-enable interrupt */ + musb_writew(mbase, MUSB_INTRTXE, int_txe); + + /* IN/receive */ + } else { + u16 csr; + + if (hw_ep->rx_reinit) { + musb_rx_reinit(musb, qh, hw_ep); + + /* init new state: toggle and NYET, maybe DMA later */ + if (usb_gettoggle(urb->dev, qh->epnum, 0)) + csr = MUSB_RXCSR_H_WR_DATATOGGLE + | MUSB_RXCSR_H_DATATOGGLE; + else + csr = 0; + if (qh->type == USB_ENDPOINT_XFER_INT) + csr |= MUSB_RXCSR_DISNYET; + + } else { + csr = musb_readw(hw_ep->regs, MUSB_RXCSR); + + if (csr & (MUSB_RXCSR_RXPKTRDY + | MUSB_RXCSR_DMAENAB + | MUSB_RXCSR_H_REQPKT)) + ERR("broken !rx_reinit, ep%d csr %04x\n", + hw_ep->epnum, csr); + + /* scrub any stale state, leaving toggle alone */ + csr &= MUSB_RXCSR_DISNYET; + } + + /* kick things off */ + + if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { + /* candidate for DMA */ + if (dma_channel) { + dma_channel->actual_len = 0L; + qh->segsize = len; + + /* AUTOREQ is in a DMA register */ + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + csr = musb_readw(hw_ep->regs, + MUSB_RXCSR); + + /* unless caller treats short rx transfers as + * errors, we dare not queue multiple transfers. + */ + dma_ok = dma_controller->channel_program( + dma_channel, packet_sz, + !(urb->transfer_flags + & URB_SHORT_NOT_OK), + urb->transfer_dma, + qh->segsize); + if (!dma_ok) { + dma_controller->channel_release( + dma_channel); + hw_ep->rx_channel = NULL; + dma_channel = NULL; + } else + csr |= MUSB_RXCSR_DMAENAB; + } + } + + csr |= MUSB_RXCSR_H_REQPKT; + DBG(7, "RXCSR%d := %04x\n", epnum, csr); + musb_writew(hw_ep->regs, MUSB_RXCSR, csr); + csr = musb_readw(hw_ep->regs, MUSB_RXCSR); + } +} + + +/* + * Service the default endpoint (ep0) as host. + * Return true until it's time to start the status stage. + */ +static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) +{ + bool more = false; + u8 *fifo_dest = NULL; + u16 fifo_count = 0; + struct musb_hw_ep *hw_ep = musb->control_ep; + struct musb_qh *qh = hw_ep->in_qh; + struct usb_ctrlrequest *request; + + switch (musb->ep0_stage) { + case MUSB_EP0_IN: + fifo_dest = urb->transfer_buffer + urb->actual_length; + fifo_count = min(len, ((u16) (urb->transfer_buffer_length + - urb->actual_length))); + if (fifo_count < len) + urb->status = -EOVERFLOW; + + musb_read_fifo(hw_ep, fifo_count, fifo_dest); + + urb->actual_length += fifo_count; + if (len < qh->maxpacket) { + /* always terminate on short read; it's + * rarely reported as an error. + */ + } else if (urb->actual_length < + urb->transfer_buffer_length) + more = true; + break; + case MUSB_EP0_START: + request = (struct usb_ctrlrequest *) urb->setup_packet; + + if (!request->wLength) { + DBG(4, "start no-DATA\n"); + break; + } else if (request->bRequestType & USB_DIR_IN) { + DBG(4, "start IN-DATA\n"); + musb->ep0_stage = MUSB_EP0_IN; + more = true; + break; + } else { + DBG(4, "start OUT-DATA\n"); + musb->ep0_stage = MUSB_EP0_OUT; + more = true; + } + /* FALLTHROUGH */ + case MUSB_EP0_OUT: + fifo_count = min(qh->maxpacket, ((u16) + (urb->transfer_buffer_length + - urb->actual_length))); + + if (fifo_count) { + fifo_dest = (u8 *) (urb->transfer_buffer + + urb->actual_length); + DBG(3, "Sending %d bytes to %p\n", + fifo_count, fifo_dest); + musb_write_fifo(hw_ep, fifo_count, fifo_dest); + + urb->actual_length += fifo_count; + more = true; + } + break; + default: + ERR("bogus ep0 stage %d\n", musb->ep0_stage); + break; + } + + return more; +} + +/* + * Handle default endpoint interrupt as host. Only called in IRQ time + * from the LinuxIsr() interrupt service routine. + * + * called with controller irqlocked + */ +irqreturn_t musb_h_ep0_irq(struct musb *musb) +{ + struct urb *urb; + u16 csr, len; + int status = 0; + void __iomem *mbase = musb->mregs; + struct musb_hw_ep *hw_ep = musb->control_ep; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh = hw_ep->in_qh; + bool complete = false; + irqreturn_t retval = IRQ_NONE; + + /* ep0 only has one queue, "in" */ + urb = next_urb(qh); + + musb_ep_select(mbase, 0); + csr = musb_readw(epio, MUSB_CSR0); + len = (csr & MUSB_CSR0_RXPKTRDY) + ? musb_readb(epio, MUSB_COUNT0) + : 0; + + DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", + csr, qh, len, urb, musb->ep0_stage); + + /* if we just did status stage, we are done */ + if (MUSB_EP0_STATUS == musb->ep0_stage) { + retval = IRQ_HANDLED; + complete = true; + } + + /* prepare status */ + if (csr & MUSB_CSR0_H_RXSTALL) { + DBG(6, "STALLING ENDPOINT\n"); + status = -EPIPE; + + } else if (csr & MUSB_CSR0_H_ERROR) { + DBG(2, "no response, csr0 %04x\n", csr); + status = -EPROTO; + + } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { + DBG(2, "control NAK timeout\n"); + + /* NOTE: this code path would be a good place to PAUSE a + * control transfer, if another one is queued, so that + * ep0 is more likely to stay busy. + * + * if (qh->ring.next != &musb->control), then + * we have a candidate... NAKing is *NOT* an error + */ + musb_writew(epio, MUSB_CSR0, 0); + retval = IRQ_HANDLED; + } + + if (status) { + DBG(6, "aborting\n"); + retval = IRQ_HANDLED; + if (urb) + urb->status = status; + complete = true; + + /* use the proper sequence to abort the transfer */ + if (csr & MUSB_CSR0_H_REQPKT) { + csr &= ~MUSB_CSR0_H_REQPKT; + musb_writew(epio, MUSB_CSR0, csr); + csr &= ~MUSB_CSR0_H_NAKTIMEOUT; + musb_writew(epio, MUSB_CSR0, csr); + } else { + csr |= MUSB_CSR0_FLUSHFIFO; + musb_writew(epio, MUSB_CSR0, csr); + musb_writew(epio, MUSB_CSR0, csr); + csr &= ~MUSB_CSR0_H_NAKTIMEOUT; + musb_writew(epio, MUSB_CSR0, csr); + } + + musb_writeb(epio, MUSB_NAKLIMIT0, 0); + + /* clear it */ + musb_writew(epio, MUSB_CSR0, 0); + } + + if (unlikely(!urb)) { + /* stop endpoint since we have no place for its data, this + * SHOULD NEVER HAPPEN! */ + ERR("no URB for end 0\n"); + + musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); + musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO); + musb_writew(epio, MUSB_CSR0, 0); + + goto done; + } + + if (!complete) { + /* call common logic and prepare response */ + if (musb_h_ep0_continue(musb, len, urb)) { + /* more packets required */ + csr = (MUSB_EP0_IN == musb->ep0_stage) + ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; + } else { + /* data transfer complete; perform status phase */ + if (usb_pipeout(urb->pipe) + || !urb->transfer_buffer_length) + csr = MUSB_CSR0_H_STATUSPKT + | MUSB_CSR0_H_REQPKT; + else + csr = MUSB_CSR0_H_STATUSPKT + | MUSB_CSR0_TXPKTRDY; + + /* flag status stage */ + musb->ep0_stage = MUSB_EP0_STATUS; + + DBG(5, "ep0 STATUS, csr %04x\n", csr); + + } + musb_writew(epio, MUSB_CSR0, csr); + retval = IRQ_HANDLED; + } else + musb->ep0_stage = MUSB_EP0_IDLE; + + /* call completion handler if done */ + if (complete) + musb_advance_schedule(musb, urb, hw_ep, 1); +done: + return retval; +} + + +#ifdef CONFIG_USB_INVENTRA_DMA + +/* Host side TX (OUT) using Mentor DMA works as follows: + submit_urb -> + - if queue was empty, Program Endpoint + - ... which starts DMA to fifo in mode 1 or 0 + + DMA Isr (transfer complete) -> TxAvail() + - Stop DMA (~DmaEnab) (<--- Alert ... currently happens + only in musb_cleanup_urb) + - TxPktRdy has to be set in mode 0 or for + short packets in mode 1. +*/ + +#endif + +/* Service a Tx-Available or dma completion irq for the endpoint */ +void musb_host_tx(struct musb *musb, u8 epnum) +{ + int pipe; + bool done = false; + u16 tx_csr; + size_t wLength = 0; + u8 *buf = NULL; + struct urb *urb; + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh = hw_ep->out_qh; + u32 status = 0; + void __iomem *mbase = musb->mregs; + struct dma_channel *dma; + + urb = next_urb(qh); + + musb_ep_select(mbase, epnum); + tx_csr = musb_readw(epio, MUSB_TXCSR); + + /* with CPPI, DMA sometimes triggers "extra" irqs */ + if (!urb) { + DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); + goto finish; + } + + pipe = urb->pipe; + dma = is_dma_capable() ? hw_ep->tx_channel : NULL; + DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, + dma ? ", dma" : ""); + + /* check for errors */ + if (tx_csr & MUSB_TXCSR_H_RXSTALL) { + /* dma was disabled, fifo flushed */ + DBG(3, "TX end %d stall\n", epnum); + + /* stall; record URB status */ + status = -EPIPE; + + } else if (tx_csr & MUSB_TXCSR_H_ERROR) { + /* (NON-ISO) dma was disabled, fifo flushed */ + DBG(3, "TX 3strikes on ep=%d\n", epnum); + + status = -ETIMEDOUT; + + } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { + DBG(6, "TX end=%d device not responding\n", epnum); + + /* NOTE: this code path would be a good place to PAUSE a + * transfer, if there's some other (nonperiodic) tx urb + * that could use this fifo. (dma complicates it...) + * + * if (bulk && qh->ring.next != &musb->out_bulk), then + * we have a candidate... NAKing is *NOT* an error + */ + musb_ep_select(mbase, epnum); + musb_writew(epio, MUSB_TXCSR, + MUSB_TXCSR_H_WZC_BITS + | MUSB_TXCSR_TXPKTRDY); + goto finish; + } + + if (status) { + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dma->status = MUSB_DMA_STATUS_CORE_ABORT; + (void) musb->dma_controller->channel_abort(dma); + } + + /* do the proper sequence to abort the transfer in the + * usb core; the dma engine should already be stopped. + */ + musb_h_tx_flush_fifo(hw_ep); + tx_csr &= ~(MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_H_ERROR + | MUSB_TXCSR_H_RXSTALL + | MUSB_TXCSR_H_NAKTIMEOUT + ); + + musb_ep_select(mbase, epnum); + musb_writew(epio, MUSB_TXCSR, tx_csr); + /* REVISIT may need to clear FLUSHFIFO ... */ + musb_writew(epio, MUSB_TXCSR, tx_csr); + musb_writeb(epio, MUSB_TXINTERVAL, 0); + + done = true; + } + + /* second cppi case */ + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr); + goto finish; + + } + + /* REVISIT this looks wrong... */ + if (!status || dma || usb_pipeisoc(pipe)) { + if (dma) + wLength = dma->actual_len; + else + wLength = qh->segsize; + qh->offset += wLength; + + if (usb_pipeisoc(pipe)) { + struct usb_iso_packet_descriptor *d; + + d = urb->iso_frame_desc + qh->iso_idx; + d->actual_length = qh->segsize; + if (++qh->iso_idx >= urb->number_of_packets) { + done = true; + } else { + d++; + buf = urb->transfer_buffer + d->offset; + wLength = d->length; + } + } else if (dma) { + done = true; + } else { + /* see if we need to send more data, or ZLP */ + if (qh->segsize < qh->maxpacket) + done = true; + else if (qh->offset == urb->transfer_buffer_length + && !(urb->transfer_flags + & URB_ZERO_PACKET)) + done = true; + if (!done) { + buf = urb->transfer_buffer + + qh->offset; + wLength = urb->transfer_buffer_length + - qh->offset; + } + } + } + + /* urb->status != -EINPROGRESS means request has been faulted, + * so we must abort this transfer after cleanup + */ + if (urb->status != -EINPROGRESS) { + done = true; + if (status == 0) + status = urb->status; + } + + if (done) { + /* set status */ + urb->status = status; + urb->actual_length = qh->offset; + musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); + + } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) { + /* WARN_ON(!buf); */ + + /* REVISIT: some docs say that when hw_ep->tx_double_buffered, + * (and presumably, fifo is not half-full) we should write TWO + * packets before updating TXCSR ... other docs disagree ... + */ + /* PIO: start next packet in this URB */ + wLength = min(qh->maxpacket, (u16) wLength); + musb_write_fifo(hw_ep, wLength, buf); + qh->segsize = wLength; + + musb_ep_select(mbase, epnum); + musb_writew(epio, MUSB_TXCSR, + MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); + } else + DBG(1, "not complete, but dma enabled?\n"); + +finish: + return; +} + + +#ifdef CONFIG_USB_INVENTRA_DMA + +/* Host side RX (IN) using Mentor DMA works as follows: + submit_urb -> + - if queue was empty, ProgramEndpoint + - first IN token is sent out (by setting ReqPkt) + LinuxIsr -> RxReady() + /\ => first packet is received + | - Set in mode 0 (DmaEnab, ~ReqPkt) + | -> DMA Isr (transfer complete) -> RxReady() + | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) + | - if urb not complete, send next IN token (ReqPkt) + | | else complete urb. + | | + --------------------------- + * + * Nuances of mode 1: + * For short packets, no ack (+RxPktRdy) is sent automatically + * (even if AutoClear is ON) + * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent + * automatically => major problem, as collecting the next packet becomes + * difficult. Hence mode 1 is not used. + * + * REVISIT + * All we care about at this driver level is that + * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; + * (b) termination conditions are: short RX, or buffer full; + * (c) fault modes include + * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. + * (and that endpoint's dma queue stops immediately) + * - overflow (full, PLUS more bytes in the terminal packet) + * + * So for example, usb-storage sets URB_SHORT_NOT_OK, and would + * thus be a great candidate for using mode 1 ... for all but the + * last packet of one URB's transfer. + */ + +#endif + +/* + * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, + * and high-bandwidth IN transfer cases. + */ +void musb_host_rx(struct musb *musb, u8 epnum) +{ + struct urb *urb; + struct musb_hw_ep *hw_ep = musb->endpoints + epnum; + void __iomem *epio = hw_ep->regs; + struct musb_qh *qh = hw_ep->in_qh; + size_t xfer_len; + void __iomem *mbase = musb->mregs; + int pipe; + u16 rx_csr, val; + bool iso_err = false; + bool done = false; + u32 status; + struct dma_channel *dma; + + musb_ep_select(mbase, epnum); + + urb = next_urb(qh); + dma = is_dma_capable() ? hw_ep->rx_channel : NULL; + status = 0; + xfer_len = 0; + + rx_csr = musb_readw(epio, MUSB_RXCSR); + val = rx_csr; + + if (unlikely(!urb)) { + /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least + * usbtest #11 (unlinks) triggers it regularly, sometimes + * with fifo full. (Only with DMA??) + */ + DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, + musb_readw(epio, MUSB_RXCOUNT)); + musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); + return; + } + + pipe = urb->pipe; + + DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", + epnum, rx_csr, urb->actual_length, + dma ? dma->actual_len : 0); + + /* check for errors, concurrent stall & unlink is not really + * handled yet! */ + if (rx_csr & MUSB_RXCSR_H_RXSTALL) { + DBG(3, "RX end %d STALL\n", epnum); + + /* stall; record URB status */ + status = -EPIPE; + + } else if (rx_csr & MUSB_RXCSR_H_ERROR) { + DBG(3, "end %d RX proto error\n", epnum); + + status = -EPROTO; + musb_writeb(epio, MUSB_RXINTERVAL, 0); + + } else if (rx_csr & MUSB_RXCSR_DATAERROR) { + + if (USB_ENDPOINT_XFER_ISOC != qh->type) { + /* NOTE this code path would be a good place to PAUSE a + * transfer, if there's some other (nonperiodic) rx urb + * that could use this fifo. (dma complicates it...) + * + * if (bulk && qh->ring.next != &musb->in_bulk), then + * we have a candidate... NAKing is *NOT* an error + */ + DBG(6, "RX end %d NAK timeout\n", epnum); + musb_ep_select(mbase, epnum); + musb_writew(epio, MUSB_RXCSR, + MUSB_RXCSR_H_WZC_BITS + | MUSB_RXCSR_H_REQPKT); + + goto finish; + } else { + DBG(4, "RX end %d ISO data error\n", epnum); + /* packet error reported later */ + iso_err = true; + } + } + + /* faults abort the transfer */ + if (status) { + /* clean up dma and collect transfer count */ + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dma->status = MUSB_DMA_STATUS_CORE_ABORT; + (void) musb->dma_controller->channel_abort(dma); + xfer_len = dma->actual_len; + } + musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); + musb_writeb(epio, MUSB_RXINTERVAL, 0); + done = true; + goto finish; + } + + if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { + /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ + ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); + goto finish; + } + + /* thorough shutdown for now ... given more precise fault handling + * and better queueing support, we might keep a DMA pipeline going + * while processing this irq for earlier completions. + */ + + /* FIXME this is _way_ too much in-line logic for Mentor DMA */ + +#ifndef CONFIG_USB_INVENTRA_DMA + if (rx_csr & MUSB_RXCSR_H_REQPKT) { + /* REVISIT this happened for a while on some short reads... + * the cleanup still needs investigation... looks bad... + * and also duplicates dma cleanup code above ... plus, + * shouldn't this be the "half full" double buffer case? + */ + if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { + dma->status = MUSB_DMA_STATUS_CORE_ABORT; + (void) musb->dma_controller->channel_abort(dma); + xfer_len = dma->actual_len; + done = true; + } + + DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, + xfer_len, dma ? ", dma" : ""); + rx_csr &= ~MUSB_RXCSR_H_REQPKT; + + musb_ep_select(mbase, epnum); + musb_writew(epio, MUSB_RXCSR, + MUSB_RXCSR_H_WZC_BITS | rx_csr); + } +#endif + if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { + xfer_len = dma->actual_len; + + val &= ~(MUSB_RXCSR_DMAENAB + | MUSB_RXCSR_H_AUTOREQ + | MUSB_RXCSR_AUTOCLEAR + | MUSB_RXCSR_RXPKTRDY); + musb_writew(hw_ep->regs, MUSB_RXCSR, val); + +#ifdef CONFIG_USB_INVENTRA_DMA + /* done if urb buffer is full or short packet is recd */ + done = (urb->actual_length + xfer_len >= + urb->transfer_buffer_length + || dma->actual_len < qh->maxpacket); + + /* send IN token for next packet, without AUTOREQ */ + if (!done) { + val |= MUSB_RXCSR_H_REQPKT; + musb_writew(epio, MUSB_RXCSR, + MUSB_RXCSR_H_WZC_BITS | val); + } + + DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, + done ? "off" : "reset", + musb_readw(epio, MUSB_RXCSR), + musb_readw(epio, MUSB_RXCOUNT)); +#else + done = true; +#endif + } else if (urb->status == -EINPROGRESS) { + /* if no errors, be sure a packet is ready for unloading */ + if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { + status = -EPROTO; + ERR("Rx interrupt with no errors or packet!\n"); + + /* FIXME this is another "SHOULD NEVER HAPPEN" */ + +/* SCRUB (RX) */ + /* do the proper sequence to abort the transfer */ + musb_ep_select(mbase, epnum); + val &= ~MUSB_RXCSR_H_REQPKT; + musb_writew(epio, MUSB_RXCSR, val); + goto finish; + } + + /* we are expecting IN packets */ +#ifdef CONFIG_USB_INVENTRA_DMA + if (dma) { + struct dma_controller *c; + u16 rx_count; + int ret; + + rx_count = musb_readw(epio, MUSB_RXCOUNT); + + DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n", + epnum, rx_count, + urb->transfer_dma + + urb->actual_length, + qh->offset, + urb->transfer_buffer_length); + + c = musb->dma_controller; + + dma->desired_mode = 0; +#ifdef USE_MODE1 + /* because of the issue below, mode 1 will + * only rarely behave with correct semantics. + */ + if ((urb->transfer_flags & + URB_SHORT_NOT_OK) + && (urb->transfer_buffer_length - + urb->actual_length) + > qh->maxpacket) + dma->desired_mode = 1; +#endif + +/* Disadvantage of using mode 1: + * It's basically usable only for mass storage class; essentially all + * other protocols also terminate transfers on short packets. + * + * Details: + * An extra IN token is sent at the end of the transfer (due to AUTOREQ) + * If you try to use mode 1 for (transfer_buffer_length - 512), and try + * to use the extra IN token to grab the last packet using mode 0, then + * the problem is that you cannot be sure when the device will send the + * last packet and RxPktRdy set. Sometimes the packet is recd too soon + * such that it gets lost when RxCSR is re-set at the end of the mode 1 + * transfer, while sometimes it is recd just a little late so that if you + * try to configure for mode 0 soon after the mode 1 transfer is + * completed, you will find rxcount 0. Okay, so you might think why not + * wait for an interrupt when the pkt is recd. Well, you won't get any! + */ + + val = musb_readw(epio, MUSB_RXCSR); + val &= ~MUSB_RXCSR_H_REQPKT; + + if (dma->desired_mode == 0) + val &= ~MUSB_RXCSR_H_AUTOREQ; + else + val |= MUSB_RXCSR_H_AUTOREQ; + val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; + + musb_writew(epio, MUSB_RXCSR, + MUSB_RXCSR_H_WZC_BITS | val); + + /* REVISIT if when actual_length != 0, + * transfer_buffer_length needs to be + * adjusted first... + */ + ret = c->channel_program( + dma, qh->maxpacket, + dma->desired_mode, + urb->transfer_dma + + urb->actual_length, + (dma->desired_mode == 0) + ? rx_count + : urb->transfer_buffer_length); + + if (!ret) { + c->channel_release(dma); + hw_ep->rx_channel = NULL; + dma = NULL; + /* REVISIT reset CSR */ + } + } +#endif /* Mentor DMA */ + + if (!dma) { + done = musb_host_packet_rx(musb, urb, + epnum, iso_err); + DBG(6, "read %spacket\n", done ? "last " : ""); + } + } + + if (dma && usb_pipeisoc(pipe)) { + struct usb_iso_packet_descriptor *d; + int iso_stat = status; + + d = urb->iso_frame_desc + qh->iso_idx; + d->actual_length += xfer_len; + if (iso_err) { + iso_stat = -EILSEQ; + urb->error_count++; + } + d->status = iso_stat; + } + +finish: + urb->actual_length += xfer_len; + qh->offset += xfer_len; + if (done) { + if (urb->status == -EINPROGRESS) + urb->status = status; + musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); + } +} + +/* schedule nodes correspond to peripheral endpoints, like an OHCI QH. + * the software schedule associates multiple such nodes with a given + * host side hardware endpoint + direction; scheduling may activate + * that hardware endpoint. + */ +static int musb_schedule( + struct musb *musb, + struct musb_qh *qh, + int is_in) +{ + int idle; + int best_diff; + int best_end, epnum; + struct musb_hw_ep *hw_ep = NULL; + struct list_head *head = NULL; + + /* use fixed hardware for control and bulk */ + switch (qh->type) { + case USB_ENDPOINT_XFER_CONTROL: + head = &musb->control; + hw_ep = musb->control_ep; + break; + case USB_ENDPOINT_XFER_BULK: + hw_ep = musb->bulk_ep; + if (is_in) + head = &musb->in_bulk; + else + head = &musb->out_bulk; + break; + } + if (head) { + idle = list_empty(head); + list_add_tail(&qh->ring, head); + goto success; + } + + /* else, periodic transfers get muxed to other endpoints */ + + /* FIXME this doesn't consider direction, so it can only + * work for one half of the endpoint hardware, and assumes + * the previous cases handled all non-shared endpoints... + */ + + /* we know this qh hasn't been scheduled, so all we need to do + * is choose which hardware endpoint to put it on ... + * + * REVISIT what we really want here is a regular schedule tree + * like e.g. OHCI uses, but for now musb->periodic is just an + * array of the _single_ logical endpoint associated with a + * given physical one (identity mapping logical->physical). + * + * that simplistic approach makes TT scheduling a lot simpler; + * there is none, and thus none of its complexity... + */ + best_diff = 4096; + best_end = -1; + + for (epnum = 1; epnum < musb->nr_endpoints; epnum++) { + int diff; + + if (musb->periodic[epnum]) + continue; + hw_ep = &musb->endpoints[epnum]; + if (hw_ep == musb->bulk_ep) + continue; + + if (is_in) + diff = hw_ep->max_packet_sz_rx - qh->maxpacket; + else + diff = hw_ep->max_packet_sz_tx - qh->maxpacket; + + if (diff > 0 && best_diff > diff) { + best_diff = diff; + best_end = epnum; + } + } + if (best_end < 0) + return -ENOSPC; + + idle = 1; + hw_ep = musb->endpoints + best_end; + musb->periodic[best_end] = qh; + DBG(4, "qh %p periodic slot %d\n", qh, best_end); +success: + qh->hw_ep = hw_ep; + qh->hep->hcpriv = qh; + if (idle) + musb_start_urb(musb, is_in, qh); + return 0; +} + +static int musb_urb_enqueue( + struct usb_hcd *hcd, + struct urb *urb, + gfp_t mem_flags) +{ + unsigned long flags; + struct musb *musb = hcd_to_musb(hcd); + struct usb_host_endpoint *hep = urb->ep; + struct musb_qh *qh = hep->hcpriv; + struct usb_endpoint_descriptor *epd = &hep->desc; + int ret; + unsigned type_reg; + unsigned interval; + + /* host role must be active */ + if (!is_host_active(musb) || !musb->is_active) + return -ENODEV; + + spin_lock_irqsave(&musb->lock, flags); + ret = usb_hcd_link_urb_to_ep(hcd, urb); + spin_unlock_irqrestore(&musb->lock, flags); + if (ret) + return ret; + + /* DMA mapping was already done, if needed, and this urb is on + * hep->urb_list ... so there's little to do unless hep wasn't + * yet scheduled onto a live qh. + * + * REVISIT best to keep hep->hcpriv valid until the endpoint gets + * disabled, testing for empty qh->ring and avoiding qh setup costs + * except for the first urb queued after a config change. + */ + if (qh) { + urb->hcpriv = qh; + return 0; + } + + /* Allocate and initialize qh, minimizing the work done each time + * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. + * + * REVISIT consider a dedicated qh kmem_cache, so it's harder + * for bugs in other kernel code to break this driver... + */ + qh = kzalloc(sizeof *qh, mem_flags); + if (!qh) { + usb_hcd_unlink_urb_from_ep(hcd, urb); + return -ENOMEM; + } + + qh->hep = hep; + qh->dev = urb->dev; + INIT_LIST_HEAD(&qh->ring); + qh->is_ready = 1; + + qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); + + /* no high bandwidth support yet */ + if (qh->maxpacket & ~0x7ff) { + ret = -EMSGSIZE; + goto done; + } + + qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; + qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + + /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ + qh->addr_reg = (u8) usb_pipedevice(urb->pipe); + + /* precompute rxtype/txtype/type0 register */ + type_reg = (qh->type << 4) | qh->epnum; + switch (urb->dev->speed) { + case USB_SPEED_LOW: + type_reg |= 0xc0; + break; + case USB_SPEED_FULL: + type_reg |= 0x80; + break; + default: + type_reg |= 0x40; + } + qh->type_reg = type_reg; + + /* precompute rxinterval/txinterval register */ + interval = min((u8)16, epd->bInterval); /* log encoding */ + switch (qh->type) { + case USB_ENDPOINT_XFER_INT: + /* fullspeed uses linear encoding */ + if (USB_SPEED_FULL == urb->dev->speed) { + interval = epd->bInterval; + if (!interval) + interval = 1; + } + /* FALLTHROUGH */ + case USB_ENDPOINT_XFER_ISOC: + /* iso always uses log encoding */ + break; + default: + /* REVISIT we actually want to use NAK limits, hinting to the + * transfer scheduling logic to try some other qh, e.g. try + * for 2 msec first: + * + * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; + * + * The downside of disabling this is that transfer scheduling + * gets VERY unfair for nonperiodic transfers; a misbehaving + * peripheral could make that hurt. Or for reads, one that's + * perfectly normal: network and other drivers keep reads + * posted at all times, having one pending for a week should + * be perfectly safe. + * + * The upside of disabling it is avoidng transfer scheduling + * code to put this aside for while. + */ + interval = 0; + } + qh->intv_reg = interval; + + /* precompute addressing for external hub/tt ports */ + if (musb->is_multipoint) { + struct usb_device *parent = urb->dev->parent; + + if (parent != hcd->self.root_hub) { + qh->h_addr_reg = (u8) parent->devnum; + + /* set up tt info if needed */ + if (urb->dev->tt) { + qh->h_port_reg = (u8) urb->dev->ttport; + qh->h_addr_reg |= 0x80; + } + } + } + + /* invariant: hep->hcpriv is null OR the qh that's already scheduled. + * until we get real dma queues (with an entry for each urb/buffer), + * we only have work to do in the former case. + */ + spin_lock_irqsave(&musb->lock, flags); + if (hep->hcpriv) { + /* some concurrent activity submitted another urb to hep... + * odd, rare, error prone, but legal. + */ + kfree(qh); + ret = 0; + } else + ret = musb_schedule(musb, qh, + epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); + + if (ret == 0) { + urb->hcpriv = qh; + /* FIXME set urb->start_frame for iso/intr, it's tested in + * musb_start_urb(), but otherwise only konicawc cares ... + */ + } + spin_unlock_irqrestore(&musb->lock, flags); + +done: + if (ret != 0) { + usb_hcd_unlink_urb_from_ep(hcd, urb); + kfree(qh); + } + return ret; +} + + +/* + * abort a transfer that's at the head of a hardware queue. + * called with controller locked, irqs blocked + * that hardware queue advances to the next transfer, unless prevented + */ +static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) +{ + struct musb_hw_ep *ep = qh->hw_ep; + void __iomem *epio = ep->regs; + unsigned hw_end = ep->epnum; + void __iomem *regs = ep->musb->mregs; + u16 csr; + int status = 0; + + musb_ep_select(regs, hw_end); + + if (is_dma_capable()) { + struct dma_channel *dma; + + dma = is_in ? ep->rx_channel : ep->tx_channel; + if (dma) { + status = ep->musb->dma_controller->channel_abort(dma); + DBG(status ? 1 : 3, + "abort %cX%d DMA for urb %p --> %d\n", + is_in ? 'R' : 'T', ep->epnum, + urb, status); + urb->actual_length += dma->actual_len; + } + } + + /* turn off DMA requests, discard state, stop polling ... */ + if (is_in) { + /* giveback saves bulk toggle */ + csr = musb_h_flush_rxfifo(ep, 0); + + /* REVISIT we still get an irq; should likely clear the + * endpoint's irq status here to avoid bogus irqs. + * clearing that status is platform-specific... + */ + } else { + musb_h_tx_flush_fifo(ep); + csr = musb_readw(epio, MUSB_TXCSR); + csr &= ~(MUSB_TXCSR_AUTOSET + | MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_H_RXSTALL + | MUSB_TXCSR_H_NAKTIMEOUT + | MUSB_TXCSR_H_ERROR + | MUSB_TXCSR_TXPKTRDY); + musb_writew(epio, MUSB_TXCSR, csr); + /* REVISIT may need to clear FLUSHFIFO ... */ + musb_writew(epio, MUSB_TXCSR, csr); + /* flush cpu writebuffer */ + csr = musb_readw(epio, MUSB_TXCSR); + } + if (status == 0) + musb_advance_schedule(ep->musb, urb, ep, is_in); + return status; +} + +static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + struct musb *musb = hcd_to_musb(hcd); + struct musb_qh *qh; + struct list_head *sched; + unsigned long flags; + int ret; + + DBG(4, "urb=%p, dev%d ep%d%s\n", urb, + usb_pipedevice(urb->pipe), + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out"); + + spin_lock_irqsave(&musb->lock, flags); + ret = usb_hcd_check_unlink_urb(hcd, urb, status); + if (ret) + goto done; + + qh = urb->hcpriv; + if (!qh) + goto done; + + /* Any URB not actively programmed into endpoint hardware can be + * immediately given back. Such an URB must be at the head of its + * endpoint queue, unless someday we get real DMA queues. And even + * then, it might not be known to the hardware... + * + * Otherwise abort current transfer, pending dma, etc.; urb->status + * has already been updated. This is a synchronous abort; it'd be + * OK to hold off until after some IRQ, though. + */ + if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) + ret = -EINPROGRESS; + else { + switch (qh->type) { + case USB_ENDPOINT_XFER_CONTROL: + sched = &musb->control; + break; + case USB_ENDPOINT_XFER_BULK: + if (usb_pipein(urb->pipe)) + sched = &musb->in_bulk; + else + sched = &musb->out_bulk; + break; + default: + /* REVISIT when we get a schedule tree, periodic + * transfers won't always be at the head of a + * singleton queue... + */ + sched = NULL; + break; + } + } + + /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ + if (ret < 0 || (sched && qh != first_qh(sched))) { + int ready = qh->is_ready; + + ret = 0; + qh->is_ready = 0; + __musb_giveback(musb, urb, 0); + qh->is_ready = ready; + } else + ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); +done: + spin_unlock_irqrestore(&musb->lock, flags); + return ret; +} + +/* disable an endpoint */ +static void +musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) +{ + u8 epnum = hep->desc.bEndpointAddress; + unsigned long flags; + struct musb *musb = hcd_to_musb(hcd); + u8 is_in = epnum & USB_DIR_IN; + struct musb_qh *qh = hep->hcpriv; + struct urb *urb, *tmp; + struct list_head *sched; + + if (!qh) + return; + + spin_lock_irqsave(&musb->lock, flags); + + switch (qh->type) { + case USB_ENDPOINT_XFER_CONTROL: + sched = &musb->control; + break; + case USB_ENDPOINT_XFER_BULK: + if (is_in) + sched = &musb->in_bulk; + else + sched = &musb->out_bulk; + break; + default: + /* REVISIT when we get a schedule tree, periodic transfers + * won't always be at the head of a singleton queue... + */ + sched = NULL; + break; + } + + /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ + + /* kick first urb off the hardware, if needed */ + qh->is_ready = 0; + if (!sched || qh == first_qh(sched)) { + urb = next_urb(qh); + + /* make software (then hardware) stop ASAP */ + if (!urb->unlinked) + urb->status = -ESHUTDOWN; + + /* cleanup */ + musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); + } else + urb = NULL; + + /* then just nuke all the others */ + list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list) + musb_giveback(qh, urb, -ESHUTDOWN); + + spin_unlock_irqrestore(&musb->lock, flags); +} + +static int musb_h_get_frame_number(struct usb_hcd *hcd) +{ + struct musb *musb = hcd_to_musb(hcd); + + return musb_readw(musb->mregs, MUSB_FRAME); +} + +static int musb_h_start(struct usb_hcd *hcd) +{ + struct musb *musb = hcd_to_musb(hcd); + + /* NOTE: musb_start() is called when the hub driver turns + * on port power, or when (OTG) peripheral starts. + */ + hcd->state = HC_STATE_RUNNING; + musb->port1_status = 0; + return 0; +} + +static void musb_h_stop(struct usb_hcd *hcd) +{ + musb_stop(hcd_to_musb(hcd)); + hcd->state = HC_STATE_HALT; +} + +static int musb_bus_suspend(struct usb_hcd *hcd) +{ + struct musb *musb = hcd_to_musb(hcd); + + if (musb->xceiv.state == OTG_STATE_A_SUSPEND) + return 0; + + if (is_host_active(musb) && musb->is_active) { + WARNING("trying to suspend as %s is_active=%i\n", + otg_state_string(musb), musb->is_active); + return -EBUSY; + } else + return 0; +} + +static int musb_bus_resume(struct usb_hcd *hcd) +{ + /* resuming child port does the work */ + return 0; +} + +const struct hc_driver musb_hc_driver = { + .description = "musb-hcd", + .product_desc = "MUSB HDRC host driver", + .hcd_priv_size = sizeof(struct musb), + .flags = HCD_USB2 | HCD_MEMORY, + + /* not using irq handler or reset hooks from usbcore, since + * those must be shared with peripheral code for OTG configs + */ + + .start = musb_h_start, + .stop = musb_h_stop, + + .get_frame_number = musb_h_get_frame_number, + + .urb_enqueue = musb_urb_enqueue, + .urb_dequeue = musb_urb_dequeue, + .endpoint_disable = musb_h_disable, + + .hub_status_data = musb_hub_status_data, + .hub_control = musb_hub_control, + .bus_suspend = musb_bus_suspend, + .bus_resume = musb_bus_resume, + /* .start_port_reset = NULL, */ + /* .hub_irq_enable = NULL, */ +}; diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h new file mode 100644 index 00000000000..77bcdb9d5b3 --- /dev/null +++ b/drivers/usb/musb/musb_host.h @@ -0,0 +1,110 @@ +/* + * MUSB OTG driver host defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _MUSB_HOST_H +#define _MUSB_HOST_H + +static inline struct usb_hcd *musb_to_hcd(struct musb *musb) +{ + return container_of((void *) musb, struct usb_hcd, hcd_priv); +} + +static inline struct musb *hcd_to_musb(struct usb_hcd *hcd) +{ + return (struct musb *) (hcd->hcd_priv); +} + +/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */ +struct musb_qh { + struct usb_host_endpoint *hep; /* usbcore info */ + struct usb_device *dev; + struct musb_hw_ep *hw_ep; /* current binding */ + + struct list_head ring; /* of musb_qh */ + /* struct musb_qh *next; */ /* for periodic tree */ + + unsigned offset; /* in urb->transfer_buffer */ + unsigned segsize; /* current xfer fragment */ + + u8 type_reg; /* {rx,tx} type register */ + u8 intv_reg; /* {rx,tx} interval register */ + u8 addr_reg; /* device address register */ + u8 h_addr_reg; /* hub address register */ + u8 h_port_reg; /* hub port register */ + + u8 is_ready; /* safe to modify hw_ep */ + u8 type; /* XFERTYPE_* */ + u8 epnum; + u16 maxpacket; + u16 frame; /* for periodic schedule */ + unsigned iso_idx; /* in urb->iso_frame_desc[] */ +}; + +/* map from control or bulk queue head to the first qh on that ring */ +static inline struct musb_qh *first_qh(struct list_head *q) +{ + if (list_empty(q)) + return NULL; + return list_entry(q->next, struct musb_qh, ring); +} + + +extern void musb_root_disconnect(struct musb *musb); + +struct usb_hcd; + +extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf); +extern int musb_hub_control(struct usb_hcd *hcd, + u16 typeReq, u16 wValue, u16 wIndex, + char *buf, u16 wLength); + +extern const struct hc_driver musb_hc_driver; + +static inline struct urb *next_urb(struct musb_qh *qh) +{ +#ifdef CONFIG_USB_MUSB_HDRC_HCD + struct list_head *queue; + + if (!qh) + return NULL; + queue = &qh->hep->urb_list; + if (list_empty(queue)) + return NULL; + return list_entry(queue->next, struct urb, urb_list); +#else + return NULL; +#endif +} + +#endif /* _MUSB_HOST_H */ diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h new file mode 100644 index 00000000000..6bbedae83af --- /dev/null +++ b/drivers/usb/musb/musb_io.h @@ -0,0 +1,115 @@ +/* + * MUSB OTG driver register I/O + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__ +#define __MUSB_LINUX_PLATFORM_ARCH_H__ + +#include + +#ifndef CONFIG_ARM +static inline void readsl(const void __iomem *addr, void *buf, int len) + { insl((unsigned long)addr, buf, len); } +static inline void readsw(const void __iomem *addr, void *buf, int len) + { insw((unsigned long)addr, buf, len); } +static inline void readsb(const void __iomem *addr, void *buf, int len) + { insb((unsigned long)addr, buf, len); } + +static inline void writesl(const void __iomem *addr, const void *buf, int len) + { outsl((unsigned long)addr, buf, len); } +static inline void writesw(const void __iomem *addr, const void *buf, int len) + { outsw((unsigned long)addr, buf, len); } +static inline void writesb(const void __iomem *addr, const void *buf, int len) + { outsb((unsigned long)addr, buf, len); } + +#endif + +/* NOTE: these offsets are all in bytes */ + +static inline u16 musb_readw(const void __iomem *addr, unsigned offset) + { return __raw_readw(addr + offset); } + +static inline u32 musb_readl(const void __iomem *addr, unsigned offset) + { return __raw_readl(addr + offset); } + + +static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data) + { __raw_writew(data, addr + offset); } + +static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data) + { __raw_writel(data, addr + offset); } + + +#ifdef CONFIG_USB_TUSB6010 + +/* + * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum. + */ +static inline u8 musb_readb(const void __iomem *addr, unsigned offset) +{ + u16 tmp; + u8 val; + + tmp = __raw_readw(addr + (offset & ~1)); + if (offset & 1) + val = (tmp >> 8); + else + val = tmp & 0xff; + + return val; +} + +static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) +{ + u16 tmp; + + tmp = __raw_readw(addr + (offset & ~1)); + if (offset & 1) + tmp = (data << 8) | (tmp & 0xff); + else + tmp = (tmp & 0xff00) | data; + + __raw_writew(tmp, addr + (offset & ~1)); +} + +#else + +static inline u8 musb_readb(const void __iomem *addr, unsigned offset) + { return __raw_readb(addr + offset); } + +static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data) + { __raw_writeb(data, addr + offset); } + +#endif /* CONFIG_USB_TUSB6010 */ + +#endif diff --git a/drivers/usb/musb/musb_procfs.c b/drivers/usb/musb/musb_procfs.c new file mode 100644 index 00000000000..55e6b78bdcc --- /dev/null +++ b/drivers/usb/musb/musb_procfs.c @@ -0,0 +1,830 @@ +/* + * MUSB OTG driver debug support + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include /* FIXME remove procfs writes */ +#include + +#include "musb_core.h" + +#include "davinci.h" + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + +static int dump_qh(struct musb_qh *qh, char *buf, unsigned max) +{ + int count; + int tmp; + struct usb_host_endpoint *hep = qh->hep; + struct urb *urb; + + count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n", + qh, qh->dev->devnum, qh->epnum, + ({ char *s; switch (qh->type) { + case USB_ENDPOINT_XFER_BULK: + s = "-bulk"; break; + case USB_ENDPOINT_XFER_INT: + s = "-int"; break; + case USB_ENDPOINT_XFER_CONTROL: + s = ""; break; + default: + s = "iso"; break; + }; s; }), + qh->maxpacket); + if (count <= 0) + return 0; + buf += count; + max -= count; + + list_for_each_entry(urb, &hep->urb_list, urb_list) { + tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n", + usb_pipein(urb->pipe) ? "in" : "out", + urb, urb->actual_length, + urb->transfer_buffer_length); + if (tmp <= 0) + break; + tmp = min(tmp, (int)max); + count += tmp; + buf += tmp; + max -= tmp; + } + return count; +} + +static int +dump_queue(struct list_head *q, char *buf, unsigned max) +{ + int count = 0; + struct musb_qh *qh; + + list_for_each_entry(qh, q, ring) { + int tmp; + + tmp = dump_qh(qh, buf, max); + if (tmp <= 0) + break; + tmp = min(tmp, (int)max); + count += tmp; + buf += tmp; + max -= tmp; + } + return count; +} + +#endif /* HCD */ + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC +static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max) +{ + char *buf = buffer; + int code = 0; + void __iomem *regs = ep->hw_ep->regs; + char *mode = "1buf"; + + if (ep->is_in) { + if (ep->hw_ep->tx_double_buffered) + mode = "2buf"; + } else { + if (ep->hw_ep->rx_double_buffered) + mode = "2buf"; + } + + do { + struct usb_request *req; + + code = snprintf(buf, max, + "\n%s (hw%d): %s%s, csr %04x maxp %04x\n", + ep->name, ep->current_epnum, + mode, ep->dma ? " dma" : "", + musb_readw(regs, + (ep->is_in || !ep->current_epnum) + ? MUSB_TXCSR + : MUSB_RXCSR), + musb_readw(regs, ep->is_in + ? MUSB_TXMAXP + : MUSB_RXMAXP) + ); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + + if (is_cppi_enabled() && ep->current_epnum) { + unsigned cppi = ep->current_epnum - 1; + void __iomem *base = ep->musb->ctrl_base; + unsigned off1 = cppi << 2; + void __iomem *ram = base; + char tmp[16]; + + if (ep->is_in) { + ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi); + tmp[0] = 0; + } else { + ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi); + snprintf(tmp, sizeof tmp, "%d left, ", + musb_readl(base, + DAVINCI_RXCPPI_BUFCNT0_REG + off1)); + } + + code = snprintf(buf, max, "%cX DMA%d: %s" + "%08x %08x, %08x %08x; " + "%08x %08x %08x .. %08x\n", + ep->is_in ? 'T' : 'R', + ep->current_epnum - 1, tmp, + musb_readl(ram, 0 * 4), + musb_readl(ram, 1 * 4), + musb_readl(ram, 2 * 4), + musb_readl(ram, 3 * 4), + musb_readl(ram, 4 * 4), + musb_readl(ram, 5 * 4), + musb_readl(ram, 6 * 4), + musb_readl(ram, 7 * 4)); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } + + if (list_empty(&ep->req_list)) { + code = snprintf(buf, max, "\t(queue empty)\n"); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + break; + } + list_for_each_entry(req, &ep->req_list, list) { + code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n", + req, + req->zero ? "zero, " : "", + req->short_not_ok ? "!short, " : "", + req->actual, req->length); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } + } while (0); + return buf - buffer; +} +#endif + +static int +dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max) +{ + int code = 0; + char *buf = aBuffer; + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; + + do { + musb_ep_select(musb->mregs, epnum); +#ifdef CONFIG_USB_MUSB_HDRC_HCD + if (is_host_active(musb)) { + int dump_rx, dump_tx; + void __iomem *regs = hw_ep->regs; + + /* TEMPORARY (!) until we have a real periodic + * schedule tree ... + */ + if (!epnum) { + /* control is shared, uses RX queue + * but (mostly) shadowed tx registers + */ + dump_tx = !list_empty(&musb->control); + dump_rx = 0; + } else if (hw_ep == musb->bulk_ep) { + dump_tx = !list_empty(&musb->out_bulk); + dump_rx = !list_empty(&musb->in_bulk); + } else if (musb->periodic[epnum]) { + struct usb_host_endpoint *hep; + + hep = musb->periodic[epnum]->hep; + dump_rx = hep->desc.bEndpointAddress + & USB_ENDPOINT_DIR_MASK; + dump_tx = !dump_rx; + } else + break; + /* END TEMPORARY */ + + + if (dump_rx) { + code = snprintf(buf, max, + "\nRX%d: %s rxcsr %04x interval %02x " + "max %04x type %02x; " + "dev %d hub %d port %d" + "\n", + epnum, + hw_ep->rx_double_buffered + ? "2buf" : "1buf", + musb_readw(regs, MUSB_RXCSR), + musb_readb(regs, MUSB_RXINTERVAL), + musb_readw(regs, MUSB_RXMAXP), + musb_readb(regs, MUSB_RXTYPE), + /* FIXME: assumes multipoint */ + musb_readb(musb->mregs, + MUSB_BUSCTL_OFFSET(epnum, + MUSB_RXFUNCADDR)), + musb_readb(musb->mregs, + MUSB_BUSCTL_OFFSET(epnum, + MUSB_RXHUBADDR)), + musb_readb(musb->mregs, + MUSB_BUSCTL_OFFSET(epnum, + MUSB_RXHUBPORT)) + ); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + + if (is_cppi_enabled() + && epnum + && hw_ep->rx_channel) { + unsigned cppi = epnum - 1; + unsigned off1 = cppi << 2; + void __iomem *base; + void __iomem *ram; + char tmp[16]; + + base = musb->ctrl_base; + ram = DAVINCI_RXCPPI_STATERAM_OFFSET( + cppi) + base; + snprintf(tmp, sizeof tmp, "%d left, ", + musb_readl(base, + DAVINCI_RXCPPI_BUFCNT0_REG + + off1)); + + code = snprintf(buf, max, + " rx dma%d: %s" + "%08x %08x, %08x %08x; " + "%08x %08x %08x .. %08x\n", + cppi, tmp, + musb_readl(ram, 0 * 4), + musb_readl(ram, 1 * 4), + musb_readl(ram, 2 * 4), + musb_readl(ram, 3 * 4), + musb_readl(ram, 4 * 4), + musb_readl(ram, 5 * 4), + musb_readl(ram, 6 * 4), + musb_readl(ram, 7 * 4)); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } + + if (hw_ep == musb->bulk_ep + && !list_empty( + &musb->in_bulk)) { + code = dump_queue(&musb->in_bulk, + buf, max); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } else if (musb->periodic[epnum]) { + code = dump_qh(musb->periodic[epnum], + buf, max); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } + } + + if (dump_tx) { + code = snprintf(buf, max, + "\nTX%d: %s txcsr %04x interval %02x " + "max %04x type %02x; " + "dev %d hub %d port %d" + "\n", + epnum, + hw_ep->tx_double_buffered + ? "2buf" : "1buf", + musb_readw(regs, MUSB_TXCSR), + musb_readb(regs, MUSB_TXINTERVAL), + musb_readw(regs, MUSB_TXMAXP), + musb_readb(regs, MUSB_TXTYPE), + /* FIXME: assumes multipoint */ + musb_readb(musb->mregs, + MUSB_BUSCTL_OFFSET(epnum, + MUSB_TXFUNCADDR)), + musb_readb(musb->mregs, + MUSB_BUSCTL_OFFSET(epnum, + MUSB_TXHUBADDR)), + musb_readb(musb->mregs, + MUSB_BUSCTL_OFFSET(epnum, + MUSB_TXHUBPORT)) + ); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + + if (is_cppi_enabled() + && epnum + && hw_ep->tx_channel) { + unsigned cppi = epnum - 1; + void __iomem *base; + void __iomem *ram; + + base = musb->ctrl_base; + ram = DAVINCI_RXCPPI_STATERAM_OFFSET( + cppi) + base; + code = snprintf(buf, max, + " tx dma%d: " + "%08x %08x, %08x %08x; " + "%08x %08x %08x .. %08x\n", + cppi, + musb_readl(ram, 0 * 4), + musb_readl(ram, 1 * 4), + musb_readl(ram, 2 * 4), + musb_readl(ram, 3 * 4), + musb_readl(ram, 4 * 4), + musb_readl(ram, 5 * 4), + musb_readl(ram, 6 * 4), + musb_readl(ram, 7 * 4)); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } + + if (hw_ep == musb->control_ep + && !list_empty( + &musb->control)) { + code = dump_queue(&musb->control, + buf, max); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } else if (hw_ep == musb->bulk_ep + && !list_empty( + &musb->out_bulk)) { + code = dump_queue(&musb->out_bulk, + buf, max); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } else if (musb->periodic[epnum]) { + code = dump_qh(musb->periodic[epnum], + buf, max); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } + } + } +#endif +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + if (is_peripheral_active(musb)) { + code = 0; + + if (hw_ep->ep_in.desc || !epnum) { + code = dump_ep(&hw_ep->ep_in, buf, max); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } + if (hw_ep->ep_out.desc) { + code = dump_ep(&hw_ep->ep_out, buf, max); + if (code <= 0) + break; + code = min(code, (int) max); + buf += code; + max -= code; + } + } +#endif + } while (0); + + return buf - aBuffer; +} + +/* Dump the current status and compile options. + * @param musb the device driver instance + * @param buffer where to dump the status; it must be big enough to hold the + * result otherwise "BAD THINGS HAPPENS(TM)". + */ +static int dump_header_stats(struct musb *musb, char *buffer) +{ + int code, count = 0; + const void __iomem *mbase = musb->mregs; + + *buffer = 0; + count = sprintf(buffer, "Status: %sHDRC, Mode=%s " + "(Power=%02x, DevCtl=%02x)\n", + (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb), + musb_readb(mbase, MUSB_POWER), + musb_readb(mbase, MUSB_DEVCTL)); + if (count <= 0) + return 0; + buffer += count; + + code = sprintf(buffer, "OTG state: %s; %sactive\n", + otg_state_string(musb), + musb->is_active ? "" : "in"); + if (code <= 0) + goto done; + buffer += code; + count += code; + + code = sprintf(buffer, + "Options: " +#ifdef CONFIG_MUSB_PIO_ONLY + "pio" +#elif defined(CONFIG_USB_TI_CPPI_DMA) + "cppi-dma" +#elif defined(CONFIG_USB_INVENTRA_DMA) + "musb-dma" +#elif defined(CONFIG_USB_TUSB_OMAP_DMA) + "tusb-omap-dma" +#else + "?dma?" +#endif + ", " +#ifdef CONFIG_USB_MUSB_OTG + "otg (peripheral+host)" +#elif defined(CONFIG_USB_GADGET_MUSB_HDRC) + "peripheral" +#elif defined(CONFIG_USB_MUSB_HDRC_HCD) + "host" +#endif + ", debug=%d [eps=%d]\n", + debug, + musb->nr_endpoints); + if (code <= 0) + goto done; + count += code; + buffer += code; + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + code = sprintf(buffer, "Peripheral address: %02x\n", + musb_readb(musb->ctrl_base, MUSB_FADDR)); + if (code <= 0) + goto done; + buffer += code; + count += code; +#endif + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + code = sprintf(buffer, "Root port status: %08x\n", + musb->port1_status); + if (code <= 0) + goto done; + buffer += code; + count += code; +#endif + +#ifdef CONFIG_ARCH_DAVINCI + code = sprintf(buffer, + "DaVinci: ctrl=%02x stat=%1x phy=%03x\n" + "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x" + "\n", + musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG), + musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG), + __raw_readl((void __force __iomem *) + IO_ADDRESS(USBPHY_CTL_PADDR)), + musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG), + musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG), + musb_readl(musb->ctrl_base, + DAVINCI_USB_INT_SOURCE_REG), + musb_readl(musb->ctrl_base, + DAVINCI_USB_INT_MASK_REG)); + if (code <= 0) + goto done; + count += code; + buffer += code; +#endif /* DAVINCI */ + +#ifdef CONFIG_USB_TUSB6010 + code = sprintf(buffer, + "TUSB6010: devconf %08x, phy enable %08x drive %08x" + "\n\totg %03x timer %08x" + "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x" + "\n", + musb_readl(musb->ctrl_base, TUSB_DEV_CONF), + musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE), + musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL), + musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT), + musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER), + musb_readl(musb->ctrl_base, TUSB_PRCM_CONF), + musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT), + musb_readl(musb->ctrl_base, TUSB_INT_SRC), + musb_readl(musb->ctrl_base, TUSB_INT_MASK)); + if (code <= 0) + goto done; + count += code; + buffer += code; +#endif /* DAVINCI */ + + if (is_cppi_enabled() && musb->dma_controller) { + code = sprintf(buffer, + "CPPI: txcr=%d txsrc=%01x txena=%01x; " + "rxcr=%d rxsrc=%01x rxena=%01x " + "\n", + musb_readl(musb->ctrl_base, + DAVINCI_TXCPPI_CTRL_REG), + musb_readl(musb->ctrl_base, + DAVINCI_TXCPPI_RAW_REG), + musb_readl(musb->ctrl_base, + DAVINCI_TXCPPI_INTENAB_REG), + musb_readl(musb->ctrl_base, + DAVINCI_RXCPPI_CTRL_REG), + musb_readl(musb->ctrl_base, + DAVINCI_RXCPPI_RAW_REG), + musb_readl(musb->ctrl_base, + DAVINCI_RXCPPI_INTENAB_REG)); + if (code <= 0) + goto done; + count += code; + buffer += code; + } + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + if (is_peripheral_enabled(musb)) { + code = sprintf(buffer, "Gadget driver: %s\n", + musb->gadget_driver + ? musb->gadget_driver->driver.name + : "(none)"); + if (code <= 0) + goto done; + count += code; + buffer += code; + } +#endif + +done: + return count; +} + +/* Write to ProcFS + * + * C soft-connect + * c soft-disconnect + * I enable HS + * i disable HS + * s stop session + * F force session (OTG-unfriendly) + * E rElinquish bus (OTG) + * H request host mode + * h cancel host request + * T start sending TEST_PACKET + * D set/query the debug level + */ +static int musb_proc_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + char cmd; + u8 reg; + struct musb *musb = (struct musb *)data; + void __iomem *mbase = musb->mregs; + + /* MOD_INC_USE_COUNT; */ + + if (unlikely(copy_from_user(&cmd, buffer, 1))) + return -EFAULT; + + switch (cmd) { + case 'C': + if (mbase) { + reg = musb_readb(mbase, MUSB_POWER) + | MUSB_POWER_SOFTCONN; + musb_writeb(mbase, MUSB_POWER, reg); + } + break; + + case 'c': + if (mbase) { + reg = musb_readb(mbase, MUSB_POWER) + & ~MUSB_POWER_SOFTCONN; + musb_writeb(mbase, MUSB_POWER, reg); + } + break; + + case 'I': + if (mbase) { + reg = musb_readb(mbase, MUSB_POWER) + | MUSB_POWER_HSENAB; + musb_writeb(mbase, MUSB_POWER, reg); + } + break; + + case 'i': + if (mbase) { + reg = musb_readb(mbase, MUSB_POWER) + & ~MUSB_POWER_HSENAB; + musb_writeb(mbase, MUSB_POWER, reg); + } + break; + + case 'F': + reg = musb_readb(mbase, MUSB_DEVCTL); + reg |= MUSB_DEVCTL_SESSION; + musb_writeb(mbase, MUSB_DEVCTL, reg); + break; + + case 'H': + if (mbase) { + reg = musb_readb(mbase, MUSB_DEVCTL); + reg |= MUSB_DEVCTL_HR; + musb_writeb(mbase, MUSB_DEVCTL, reg); + /* MUSB_HST_MODE( ((struct musb*)data) ); */ + /* WARNING("Host Mode\n"); */ + } + break; + + case 'h': + if (mbase) { + reg = musb_readb(mbase, MUSB_DEVCTL); + reg &= ~MUSB_DEVCTL_HR; + musb_writeb(mbase, MUSB_DEVCTL, reg); + } + break; + + case 'T': + if (mbase) { + musb_load_testpacket(musb); + musb_writeb(mbase, MUSB_TESTMODE, + MUSB_TEST_PACKET); + } + break; + +#if (MUSB_DEBUG > 0) + /* set/read debug level */ + case 'D':{ + if (count > 1) { + char digits[8], *p = digits; + int i = 0, level = 0, sign = 1; + int len = min(count - 1, (unsigned long)8); + + if (copy_from_user(&digits, &buffer[1], len)) + return -EFAULT; + + /* optional sign */ + if (*p == '-') { + len -= 1; + sign = -sign; + p++; + } + + /* read it */ + while (i++ < len && *p > '0' && *p < '9') { + level = level * 10 + (*p - '0'); + p++; + } + + level *= sign; + DBG(1, "debug level %d\n", level); + debug = level; + } + } + break; + + + case '?': + INFO("?: you are seeing it\n"); + INFO("C/c: soft connect enable/disable\n"); + INFO("I/i: hispeed enable/disable\n"); + INFO("F: force session start\n"); + INFO("H: host mode\n"); + INFO("T: start sending TEST_PACKET\n"); + INFO("D: set/read dbug level\n"); + break; +#endif + + default: + ERR("Command %c not implemented\n", cmd); + break; + } + + musb_platform_try_idle(musb, 0); + + return count; +} + +static int musb_proc_read(char *page, char **start, + off_t off, int count, int *eof, void *data) +{ + char *buffer = page; + int code = 0; + unsigned long flags; + struct musb *musb = data; + unsigned epnum; + + count -= off; + count -= 1; /* for NUL at end */ + if (count <= 0) + return -EINVAL; + + spin_lock_irqsave(&musb->lock, flags); + + code = dump_header_stats(musb, buffer); + if (code > 0) { + buffer += code; + count -= code; + } + + /* generate the report for the end points */ + /* REVISIT ... not unless something's connected! */ + for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints; + epnum++) { + code = dump_end_info(musb, epnum, buffer, count); + if (code > 0) { + buffer += code; + count -= code; + } + } + + musb_platform_try_idle(musb, 0); + + spin_unlock_irqrestore(&musb->lock, flags); + *eof = 1; + + return buffer - page; +} + +void __devexit musb_debug_delete(char *name, struct musb *musb) +{ + if (musb->proc_entry) + remove_proc_entry(name, NULL); +} + +struct proc_dir_entry *__init +musb_debug_create(char *name, struct musb *data) +{ + struct proc_dir_entry *pde; + + /* FIXME convert everything to seq_file; then later, debugfs */ + + if (!name) + return NULL; + + pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL); + data->proc_entry = pde; + if (pde) { + pde->data = data; + /* pde->owner = THIS_MODULE; */ + + pde->read_proc = musb_proc_read; + pde->write_proc = musb_proc_write; + + pde->size = 0; + + pr_debug("Registered /proc/%s\n", name); + } else { + pr_debug("Cannot create a valid proc file entry"); + } + + return pde; +} diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h new file mode 100644 index 00000000000..9c228661aa5 --- /dev/null +++ b/drivers/usb/musb/musb_regs.h @@ -0,0 +1,300 @@ +/* + * MUSB OTG driver register defines + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __MUSB_REGS_H__ +#define __MUSB_REGS_H__ + +#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */ + +/* + * Common USB registers + */ + +#define MUSB_FADDR 0x00 /* 8-bit */ +#define MUSB_POWER 0x01 /* 8-bit */ + +#define MUSB_INTRTX 0x02 /* 16-bit */ +#define MUSB_INTRRX 0x04 +#define MUSB_INTRTXE 0x06 +#define MUSB_INTRRXE 0x08 +#define MUSB_INTRUSB 0x0A /* 8 bit */ +#define MUSB_INTRUSBE 0x0B /* 8 bit */ +#define MUSB_FRAME 0x0C +#define MUSB_INDEX 0x0E /* 8 bit */ +#define MUSB_TESTMODE 0x0F /* 8 bit */ + +/* Get offset for a given FIFO from musb->mregs */ +#ifdef CONFIG_USB_TUSB6010 +#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20)) +#else +#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4)) +#endif + +/* + * Additional Control Registers + */ + +#define MUSB_DEVCTL 0x60 /* 8 bit */ + +/* These are always controlled through the INDEX register */ +#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */ +#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */ +#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */ +#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */ + +/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */ +#define MUSB_HWVERS 0x6C /* 8 bit */ + +#define MUSB_EPINFO 0x78 /* 8 bit */ +#define MUSB_RAMINFO 0x79 /* 8 bit */ +#define MUSB_LINKINFO 0x7a /* 8 bit */ +#define MUSB_VPLEN 0x7b /* 8 bit */ +#define MUSB_HS_EOF1 0x7c /* 8 bit */ +#define MUSB_FS_EOF1 0x7d /* 8 bit */ +#define MUSB_LS_EOF1 0x7e /* 8 bit */ + +/* Offsets to endpoint registers */ +#define MUSB_TXMAXP 0x00 +#define MUSB_TXCSR 0x02 +#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */ +#define MUSB_RXMAXP 0x04 +#define MUSB_RXCSR 0x06 +#define MUSB_RXCOUNT 0x08 +#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */ +#define MUSB_TXTYPE 0x0A +#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */ +#define MUSB_TXINTERVAL 0x0B +#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */ +#define MUSB_RXTYPE 0x0C +#define MUSB_RXINTERVAL 0x0D +#define MUSB_FIFOSIZE 0x0F +#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */ + +/* Offsets to endpoint registers in indexed model (using INDEX register) */ +#define MUSB_INDEXED_OFFSET(_epnum, _offset) \ + (0x10 + (_offset)) + +/* Offsets to endpoint registers in flat models */ +#define MUSB_FLAT_OFFSET(_epnum, _offset) \ + (0x100 + (0x10*(_epnum)) + (_offset)) + +#ifdef CONFIG_USB_TUSB6010 +/* TUSB6010 EP0 configuration register is special */ +#define MUSB_TUSB_OFFSET(_epnum, _offset) \ + (0x10 + _offset) +#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */ +#endif + +/* "bus control"/target registers, for host side multipoint (external hubs) */ +#define MUSB_TXFUNCADDR 0x00 +#define MUSB_TXHUBADDR 0x02 +#define MUSB_TXHUBPORT 0x03 + +#define MUSB_RXFUNCADDR 0x04 +#define MUSB_RXHUBADDR 0x06 +#define MUSB_RXHUBPORT 0x07 + +#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \ + (0x80 + (8*(_epnum)) + (_offset)) + +/* + * MUSB Register bits + */ + +/* POWER */ +#define MUSB_POWER_ISOUPDATE 0x80 +#define MUSB_POWER_SOFTCONN 0x40 +#define MUSB_POWER_HSENAB 0x20 +#define MUSB_POWER_HSMODE 0x10 +#define MUSB_POWER_RESET 0x08 +#define MUSB_POWER_RESUME 0x04 +#define MUSB_POWER_SUSPENDM 0x02 +#define MUSB_POWER_ENSUSPEND 0x01 + +/* INTRUSB */ +#define MUSB_INTR_SUSPEND 0x01 +#define MUSB_INTR_RESUME 0x02 +#define MUSB_INTR_RESET 0x04 +#define MUSB_INTR_BABBLE 0x04 +#define MUSB_INTR_SOF 0x08 +#define MUSB_INTR_CONNECT 0x10 +#define MUSB_INTR_DISCONNECT 0x20 +#define MUSB_INTR_SESSREQ 0x40 +#define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */ + +/* DEVCTL */ +#define MUSB_DEVCTL_BDEVICE 0x80 +#define MUSB_DEVCTL_FSDEV 0x40 +#define MUSB_DEVCTL_LSDEV 0x20 +#define MUSB_DEVCTL_VBUS 0x18 +#define MUSB_DEVCTL_VBUS_SHIFT 3 +#define MUSB_DEVCTL_HM 0x04 +#define MUSB_DEVCTL_HR 0x02 +#define MUSB_DEVCTL_SESSION 0x01 + +/* TESTMODE */ +#define MUSB_TEST_FORCE_HOST 0x80 +#define MUSB_TEST_FIFO_ACCESS 0x40 +#define MUSB_TEST_FORCE_FS 0x20 +#define MUSB_TEST_FORCE_HS 0x10 +#define MUSB_TEST_PACKET 0x08 +#define MUSB_TEST_K 0x04 +#define MUSB_TEST_J 0x02 +#define MUSB_TEST_SE0_NAK 0x01 + +/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */ +#define MUSB_FIFOSZ_DPB 0x10 +/* Allocation size (8, 16, 32, ... 4096) */ +#define MUSB_FIFOSZ_SIZE 0x0f + +/* CSR0 */ +#define MUSB_CSR0_FLUSHFIFO 0x0100 +#define MUSB_CSR0_TXPKTRDY 0x0002 +#define MUSB_CSR0_RXPKTRDY 0x0001 + +/* CSR0 in Peripheral mode */ +#define MUSB_CSR0_P_SVDSETUPEND 0x0080 +#define MUSB_CSR0_P_SVDRXPKTRDY 0x0040 +#define MUSB_CSR0_P_SENDSTALL 0x0020 +#define MUSB_CSR0_P_SETUPEND 0x0010 +#define MUSB_CSR0_P_DATAEND 0x0008 +#define MUSB_CSR0_P_SENTSTALL 0x0004 + +/* CSR0 in Host mode */ +#define MUSB_CSR0_H_DIS_PING 0x0800 +#define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */ +#define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */ +#define MUSB_CSR0_H_NAKTIMEOUT 0x0080 +#define MUSB_CSR0_H_STATUSPKT 0x0040 +#define MUSB_CSR0_H_REQPKT 0x0020 +#define MUSB_CSR0_H_ERROR 0x0010 +#define MUSB_CSR0_H_SETUPPKT 0x0008 +#define MUSB_CSR0_H_RXSTALL 0x0004 + +/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */ +#define MUSB_CSR0_P_WZC_BITS \ + (MUSB_CSR0_P_SENTSTALL) +#define MUSB_CSR0_H_WZC_BITS \ + (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \ + | MUSB_CSR0_RXPKTRDY) + +/* TxType/RxType */ +#define MUSB_TYPE_SPEED 0xc0 +#define MUSB_TYPE_SPEED_SHIFT 6 +#define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */ +#define MUSB_TYPE_PROTO_SHIFT 4 +#define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */ + +/* CONFIGDATA */ +#define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */ +#define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */ +#define MUSB_CONFIGDATA_BIGENDIAN 0x20 +#define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */ +#define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */ +#define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */ +#define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */ +#define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */ + +/* TXCSR in Peripheral and Host mode */ +#define MUSB_TXCSR_AUTOSET 0x8000 +#define MUSB_TXCSR_MODE 0x2000 +#define MUSB_TXCSR_DMAENAB 0x1000 +#define MUSB_TXCSR_FRCDATATOG 0x0800 +#define MUSB_TXCSR_DMAMODE 0x0400 +#define MUSB_TXCSR_CLRDATATOG 0x0040 +#define MUSB_TXCSR_FLUSHFIFO 0x0008 +#define MUSB_TXCSR_FIFONOTEMPTY 0x0002 +#define MUSB_TXCSR_TXPKTRDY 0x0001 + +/* TXCSR in Peripheral mode */ +#define MUSB_TXCSR_P_ISO 0x4000 +#define MUSB_TXCSR_P_INCOMPTX 0x0080 +#define MUSB_TXCSR_P_SENTSTALL 0x0020 +#define MUSB_TXCSR_P_SENDSTALL 0x0010 +#define MUSB_TXCSR_P_UNDERRUN 0x0004 + +/* TXCSR in Host mode */ +#define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200 +#define MUSB_TXCSR_H_DATATOGGLE 0x0100 +#define MUSB_TXCSR_H_NAKTIMEOUT 0x0080 +#define MUSB_TXCSR_H_RXSTALL 0x0020 +#define MUSB_TXCSR_H_ERROR 0x0004 + +/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ +#define MUSB_TXCSR_P_WZC_BITS \ + (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \ + | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY) +#define MUSB_TXCSR_H_WZC_BITS \ + (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \ + | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY) + +/* RXCSR in Peripheral and Host mode */ +#define MUSB_RXCSR_AUTOCLEAR 0x8000 +#define MUSB_RXCSR_DMAENAB 0x2000 +#define MUSB_RXCSR_DISNYET 0x1000 +#define MUSB_RXCSR_PID_ERR 0x1000 +#define MUSB_RXCSR_DMAMODE 0x0800 +#define MUSB_RXCSR_INCOMPRX 0x0100 +#define MUSB_RXCSR_CLRDATATOG 0x0080 +#define MUSB_RXCSR_FLUSHFIFO 0x0010 +#define MUSB_RXCSR_DATAERROR 0x0008 +#define MUSB_RXCSR_FIFOFULL 0x0002 +#define MUSB_RXCSR_RXPKTRDY 0x0001 + +/* RXCSR in Peripheral mode */ +#define MUSB_RXCSR_P_ISO 0x4000 +#define MUSB_RXCSR_P_SENTSTALL 0x0040 +#define MUSB_RXCSR_P_SENDSTALL 0x0020 +#define MUSB_RXCSR_P_OVERRUN 0x0004 + +/* RXCSR in Host mode */ +#define MUSB_RXCSR_H_AUTOREQ 0x4000 +#define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400 +#define MUSB_RXCSR_H_DATATOGGLE 0x0200 +#define MUSB_RXCSR_H_RXSTALL 0x0040 +#define MUSB_RXCSR_H_REQPKT 0x0020 +#define MUSB_RXCSR_H_ERROR 0x0004 + +/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */ +#define MUSB_RXCSR_P_WZC_BITS \ + (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \ + | MUSB_RXCSR_RXPKTRDY) +#define MUSB_RXCSR_H_WZC_BITS \ + (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \ + | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY) + +/* HUBADDR */ +#define MUSB_HUBADDR_MULTI_TT 0x80 + +#endif /* __MUSB_REGS_H__ */ diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c new file mode 100644 index 00000000000..e0e9ce58417 --- /dev/null +++ b/drivers/usb/musb/musb_virthub.c @@ -0,0 +1,425 @@ +/* + * MUSB OTG driver virtual root hub support + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2006 by Texas Instruments + * Copyright (C) 2006-2007 Nokia Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "musb_core.h" + + +static void musb_port_suspend(struct musb *musb, bool do_suspend) +{ + u8 power; + void __iomem *mbase = musb->mregs; + + if (!is_host_active(musb)) + return; + + /* NOTE: this doesn't necessarily put PHY into low power mode, + * turning off its clock; that's a function of PHY integration and + * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect + * SE0 changing to connect (J) or wakeup (K) states. + */ + power = musb_readb(mbase, MUSB_POWER); + if (do_suspend) { + int retries = 10000; + + power &= ~MUSB_POWER_RESUME; + power |= MUSB_POWER_SUSPENDM; + musb_writeb(mbase, MUSB_POWER, power); + + /* Needed for OPT A tests */ + power = musb_readb(mbase, MUSB_POWER); + while (power & MUSB_POWER_SUSPENDM) { + power = musb_readb(mbase, MUSB_POWER); + if (retries-- < 1) + break; + } + + DBG(3, "Root port suspended, power %02x\n", power); + + musb->port1_status |= USB_PORT_STAT_SUSPEND; + switch (musb->xceiv.state) { + case OTG_STATE_A_HOST: + musb->xceiv.state = OTG_STATE_A_SUSPEND; + musb->is_active = is_otg_enabled(musb) + && musb->xceiv.host->b_hnp_enable; + musb_platform_try_idle(musb, 0); + break; +#ifdef CONFIG_USB_MUSB_OTG + case OTG_STATE_B_HOST: + musb->xceiv.state = OTG_STATE_B_WAIT_ACON; + musb->is_active = is_otg_enabled(musb) + && musb->xceiv.host->b_hnp_enable; + musb_platform_try_idle(musb, 0); + break; +#endif + default: + DBG(1, "bogus rh suspend? %s\n", + otg_state_string(musb)); + } + } else if (power & MUSB_POWER_SUSPENDM) { + power &= ~MUSB_POWER_SUSPENDM; + power |= MUSB_POWER_RESUME; + musb_writeb(mbase, MUSB_POWER, power); + + DBG(3, "Root port resuming, power %02x\n", power); + + /* later, GetPortStatus will stop RESUME signaling */ + musb->port1_status |= MUSB_PORT_STAT_RESUME; + musb->rh_timer = jiffies + msecs_to_jiffies(20); + } +} + +static void musb_port_reset(struct musb *musb, bool do_reset) +{ + u8 power; + void __iomem *mbase = musb->mregs; + +#ifdef CONFIG_USB_MUSB_OTG + if (musb->xceiv.state == OTG_STATE_B_IDLE) { + DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n"); + musb->port1_status &= ~USB_PORT_STAT_RESET; + return; + } +#endif + + if (!is_host_active(musb)) + return; + + /* NOTE: caller guarantees it will turn off the reset when + * the appropriate amount of time has passed + */ + power = musb_readb(mbase, MUSB_POWER); + if (do_reset) { + + /* + * If RESUME is set, we must make sure it stays minimum 20 ms. + * Then we must clear RESUME and wait a bit to let musb start + * generating SOFs. If we don't do this, OPT HS A 6.8 tests + * fail with "Error! Did not receive an SOF before suspend + * detected". + */ + if (power & MUSB_POWER_RESUME) { + while (time_before(jiffies, musb->rh_timer)) + msleep(1); + musb_writeb(mbase, MUSB_POWER, + power & ~MUSB_POWER_RESUME); + msleep(1); + } + + musb->ignore_disconnect = true; + power &= 0xf0; + musb_writeb(mbase, MUSB_POWER, + power | MUSB_POWER_RESET); + + musb->port1_status |= USB_PORT_STAT_RESET; + musb->port1_status &= ~USB_PORT_STAT_ENABLE; + musb->rh_timer = jiffies + msecs_to_jiffies(50); + } else { + DBG(4, "root port reset stopped\n"); + musb_writeb(mbase, MUSB_POWER, + power & ~MUSB_POWER_RESET); + + musb->ignore_disconnect = false; + + power = musb_readb(mbase, MUSB_POWER); + if (power & MUSB_POWER_HSMODE) { + DBG(4, "high-speed device connected\n"); + musb->port1_status |= USB_PORT_STAT_HIGH_SPEED; + } + + musb->port1_status &= ~USB_PORT_STAT_RESET; + musb->port1_status |= USB_PORT_STAT_ENABLE + | (USB_PORT_STAT_C_RESET << 16) + | (USB_PORT_STAT_C_ENABLE << 16); + usb_hcd_poll_rh_status(musb_to_hcd(musb)); + + musb->vbuserr_retry = VBUSERR_RETRY_COUNT; + } +} + +void musb_root_disconnect(struct musb *musb) +{ + musb->port1_status = (1 << USB_PORT_FEAT_POWER) + | (1 << USB_PORT_FEAT_C_CONNECTION); + + usb_hcd_poll_rh_status(musb_to_hcd(musb)); + musb->is_active = 0; + + switch (musb->xceiv.state) { + case OTG_STATE_A_HOST: + case OTG_STATE_A_SUSPEND: + musb->xceiv.state = OTG_STATE_A_WAIT_BCON; + musb->is_active = 0; + break; + case OTG_STATE_A_WAIT_VFALL: + musb->xceiv.state = OTG_STATE_B_IDLE; + break; + default: + DBG(1, "host disconnect (%s)\n", otg_state_string(musb)); + } +} + + +/*---------------------------------------------------------------------*/ + +/* Caller may or may not hold musb->lock */ +int musb_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct musb *musb = hcd_to_musb(hcd); + int retval = 0; + + /* called in_irq() via usb_hcd_poll_rh_status() */ + if (musb->port1_status & 0xffff0000) { + *buf = 0x02; + retval = 1; + } + return retval; +} + +int musb_hub_control( + struct usb_hcd *hcd, + u16 typeReq, + u16 wValue, + u16 wIndex, + char *buf, + u16 wLength) +{ + struct musb *musb = hcd_to_musb(hcd); + u32 temp; + int retval = 0; + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + + if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) { + spin_unlock_irqrestore(&musb->lock, flags); + return -ESHUTDOWN; + } + + /* hub features: always zero, setting is a NOP + * port features: reported, sometimes updated when host is active + * no indicators + */ + switch (typeReq) { + case ClearHubFeature: + case SetHubFeature: + switch (wValue) { + case C_HUB_OVER_CURRENT: + case C_HUB_LOCAL_POWER: + break; + default: + goto error; + } + break; + case ClearPortFeature: + if ((wIndex & 0xff) != 1) + goto error; + + switch (wValue) { + case USB_PORT_FEAT_ENABLE: + break; + case USB_PORT_FEAT_SUSPEND: + musb_port_suspend(musb, false); + break; + case USB_PORT_FEAT_POWER: + if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) + musb_set_vbus(musb, 0); + break; + case USB_PORT_FEAT_C_CONNECTION: + case USB_PORT_FEAT_C_ENABLE: + case USB_PORT_FEAT_C_OVER_CURRENT: + case USB_PORT_FEAT_C_RESET: + case USB_PORT_FEAT_C_SUSPEND: + break; + default: + goto error; + } + DBG(5, "clear feature %d\n", wValue); + musb->port1_status &= ~(1 << wValue); + break; + case GetHubDescriptor: + { + struct usb_hub_descriptor *desc = (void *)buf; + + desc->bDescLength = 9; + desc->bDescriptorType = 0x29; + desc->bNbrPorts = 1; + desc->wHubCharacteristics = __constant_cpu_to_le16( + 0x0001 /* per-port power switching */ + | 0x0010 /* no overcurrent reporting */ + ); + desc->bPwrOn2PwrGood = 5; /* msec/2 */ + desc->bHubContrCurrent = 0; + + /* workaround bogus struct definition */ + desc->DeviceRemovable[0] = 0x02; /* port 1 */ + desc->DeviceRemovable[1] = 0xff; + } + break; + case GetHubStatus: + temp = 0; + *(__le32 *) buf = cpu_to_le32(temp); + break; + case GetPortStatus: + if (wIndex != 1) + goto error; + + /* finish RESET signaling? */ + if ((musb->port1_status & USB_PORT_STAT_RESET) + && time_after_eq(jiffies, musb->rh_timer)) + musb_port_reset(musb, false); + + /* finish RESUME signaling? */ + if ((musb->port1_status & MUSB_PORT_STAT_RESUME) + && time_after_eq(jiffies, musb->rh_timer)) { + u8 power; + + power = musb_readb(musb->mregs, MUSB_POWER); + power &= ~MUSB_POWER_RESUME; + DBG(4, "root port resume stopped, power %02x\n", + power); + musb_writeb(musb->mregs, MUSB_POWER, power); + + /* ISSUE: DaVinci (RTL 1.300) disconnects after + * resume of high speed peripherals (but not full + * speed ones). + */ + + musb->is_active = 1; + musb->port1_status &= ~(USB_PORT_STAT_SUSPEND + | MUSB_PORT_STAT_RESUME); + musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; + usb_hcd_poll_rh_status(musb_to_hcd(musb)); + /* NOTE: it might really be A_WAIT_BCON ... */ + musb->xceiv.state = OTG_STATE_A_HOST; + } + + put_unaligned(cpu_to_le32(musb->port1_status + & ~MUSB_PORT_STAT_RESUME), + (__le32 *) buf); + + /* port change status is more interesting */ + DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n", + musb->port1_status); + break; + case SetPortFeature: + if ((wIndex & 0xff) != 1) + goto error; + + switch (wValue) { + case USB_PORT_FEAT_POWER: + /* NOTE: this controller has a strange state machine + * that involves "requesting sessions" according to + * magic side effects from incompletely-described + * rules about startup... + * + * This call is what really starts the host mode; be + * very careful about side effects if you reorder any + * initialization logic, e.g. for OTG, or change any + * logic relating to VBUS power-up. + */ + if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) + musb_start(musb); + break; + case USB_PORT_FEAT_RESET: + musb_port_reset(musb, true); + break; + case USB_PORT_FEAT_SUSPEND: + musb_port_suspend(musb, true); + break; + case USB_PORT_FEAT_TEST: + if (unlikely(is_host_active(musb))) + goto error; + + wIndex >>= 8; + switch (wIndex) { + case 1: + pr_debug("TEST_J\n"); + temp = MUSB_TEST_J; + break; + case 2: + pr_debug("TEST_K\n"); + temp = MUSB_TEST_K; + break; + case 3: + pr_debug("TEST_SE0_NAK\n"); + temp = MUSB_TEST_SE0_NAK; + break; + case 4: + pr_debug("TEST_PACKET\n"); + temp = MUSB_TEST_PACKET; + musb_load_testpacket(musb); + break; + case 5: + pr_debug("TEST_FORCE_ENABLE\n"); + temp = MUSB_TEST_FORCE_HOST + | MUSB_TEST_FORCE_HS; + + musb_writeb(musb->mregs, MUSB_DEVCTL, + MUSB_DEVCTL_SESSION); + break; + case 6: + pr_debug("TEST_FIFO_ACCESS\n"); + temp = MUSB_TEST_FIFO_ACCESS; + break; + default: + goto error; + } + musb_writeb(musb->mregs, MUSB_TESTMODE, temp); + break; + default: + goto error; + } + DBG(5, "set feature %d\n", wValue); + musb->port1_status |= 1 << wValue; + break; + + default: +error: + /* "protocol stall" on error */ + retval = -EPIPE; + } + spin_unlock_irqrestore(&musb->lock, flags); + return retval; +} diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c new file mode 100644 index 00000000000..9ba8fb7fcd2 --- /dev/null +++ b/drivers/usb/musb/musbhsdma.c @@ -0,0 +1,433 @@ +/* + * MUSB OTG driver - support for Mentor's DMA controller + * + * Copyright 2005 Mentor Graphics Corporation + * Copyright (C) 2005-2007 by Texas Instruments + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#include +#include +#include +#include "musb_core.h" + +#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) +#include "omap2430.h" +#endif + +#define MUSB_HSDMA_BASE 0x200 +#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0) +#define MUSB_HSDMA_CONTROL 0x4 +#define MUSB_HSDMA_ADDRESS 0x8 +#define MUSB_HSDMA_COUNT 0xc + +#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset) \ + (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset) + +/* control register (16-bit): */ +#define MUSB_HSDMA_ENABLE_SHIFT 0 +#define MUSB_HSDMA_TRANSMIT_SHIFT 1 +#define MUSB_HSDMA_MODE1_SHIFT 2 +#define MUSB_HSDMA_IRQENABLE_SHIFT 3 +#define MUSB_HSDMA_ENDPOINT_SHIFT 4 +#define MUSB_HSDMA_BUSERROR_SHIFT 8 +#define MUSB_HSDMA_BURSTMODE_SHIFT 9 +#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT) +#define MUSB_HSDMA_BURSTMODE_UNSPEC 0 +#define MUSB_HSDMA_BURSTMODE_INCR4 1 +#define MUSB_HSDMA_BURSTMODE_INCR8 2 +#define MUSB_HSDMA_BURSTMODE_INCR16 3 + +#define MUSB_HSDMA_CHANNELS 8 + +struct musb_dma_controller; + +struct musb_dma_channel { + struct dma_channel Channel; + struct musb_dma_controller *controller; + u32 dwStartAddress; + u32 len; + u16 wMaxPacketSize; + u8 bIndex; + u8 epnum; + u8 transmit; +}; + +struct musb_dma_controller { + struct dma_controller Controller; + struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS]; + void *pDmaPrivate; + void __iomem *pCoreBase; + u8 bChannelCount; + u8 bmUsedChannels; + u8 irq; +}; + +static int dma_controller_start(struct dma_controller *c) +{ + /* nothing to do */ + return 0; +} + +static void dma_channel_release(struct dma_channel *pChannel); + +static int dma_controller_stop(struct dma_controller *c) +{ + struct musb_dma_controller *controller = + container_of(c, struct musb_dma_controller, Controller); + struct musb *musb = (struct musb *) controller->pDmaPrivate; + struct dma_channel *pChannel; + u8 bBit; + + if (controller->bmUsedChannels != 0) { + dev_err(musb->controller, + "Stopping DMA controller while channel active\n"); + + for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) { + if (controller->bmUsedChannels & (1 << bBit)) { + pChannel = &controller->aChannel[bBit].Channel; + dma_channel_release(pChannel); + + if (!controller->bmUsedChannels) + break; + } + } + } + return 0; +} + +static struct dma_channel *dma_channel_allocate(struct dma_controller *c, + struct musb_hw_ep *hw_ep, u8 transmit) +{ + u8 bBit; + struct dma_channel *pChannel = NULL; + struct musb_dma_channel *pImplChannel = NULL; + struct musb_dma_controller *controller = + container_of(c, struct musb_dma_controller, Controller); + + for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) { + if (!(controller->bmUsedChannels & (1 << bBit))) { + controller->bmUsedChannels |= (1 << bBit); + pImplChannel = &(controller->aChannel[bBit]); + pImplChannel->controller = controller; + pImplChannel->bIndex = bBit; + pImplChannel->epnum = hw_ep->epnum; + pImplChannel->transmit = transmit; + pChannel = &(pImplChannel->Channel); + pChannel->private_data = pImplChannel; + pChannel->status = MUSB_DMA_STATUS_FREE; + pChannel->max_len = 0x10000; + /* Tx => mode 1; Rx => mode 0 */ + pChannel->desired_mode = transmit; + pChannel->actual_len = 0; + break; + } + } + return pChannel; +} + +static void dma_channel_release(struct dma_channel *pChannel) +{ + struct musb_dma_channel *pImplChannel = + (struct musb_dma_channel *) pChannel->private_data; + + pChannel->actual_len = 0; + pImplChannel->dwStartAddress = 0; + pImplChannel->len = 0; + + pImplChannel->controller->bmUsedChannels &= + ~(1 << pImplChannel->bIndex); + + pChannel->status = MUSB_DMA_STATUS_UNKNOWN; +} + +static void configure_channel(struct dma_channel *pChannel, + u16 packet_sz, u8 mode, + dma_addr_t dma_addr, u32 len) +{ + struct musb_dma_channel *pImplChannel = + (struct musb_dma_channel *) pChannel->private_data; + struct musb_dma_controller *controller = pImplChannel->controller; + void __iomem *mbase = controller->pCoreBase; + u8 bChannel = pImplChannel->bIndex; + u16 csr = 0; + + DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n", + pChannel, packet_sz, dma_addr, len, mode); + + if (mode) { + csr |= 1 << MUSB_HSDMA_MODE1_SHIFT; + BUG_ON(len < packet_sz); + + if (packet_sz >= 64) { + csr |= MUSB_HSDMA_BURSTMODE_INCR16 + << MUSB_HSDMA_BURSTMODE_SHIFT; + } else if (packet_sz >= 32) { + csr |= MUSB_HSDMA_BURSTMODE_INCR8 + << MUSB_HSDMA_BURSTMODE_SHIFT; + } else if (packet_sz >= 16) { + csr |= MUSB_HSDMA_BURSTMODE_INCR4 + << MUSB_HSDMA_BURSTMODE_SHIFT; + } + } + + csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT) + | (1 << MUSB_HSDMA_ENABLE_SHIFT) + | (1 << MUSB_HSDMA_IRQENABLE_SHIFT) + | (pImplChannel->transmit + ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT) + : 0); + + /* address/count */ + musb_writel(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS), + dma_addr); + musb_writel(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT), + len); + + /* control (this should start things) */ + musb_writew(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL), + csr); +} + +static int dma_channel_program(struct dma_channel *pChannel, + u16 packet_sz, u8 mode, + dma_addr_t dma_addr, u32 len) +{ + struct musb_dma_channel *pImplChannel = + (struct musb_dma_channel *) pChannel->private_data; + + DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", + pImplChannel->epnum, + pImplChannel->transmit ? "Tx" : "Rx", + packet_sz, dma_addr, len, mode); + + BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN || + pChannel->status == MUSB_DMA_STATUS_BUSY); + + pChannel->actual_len = 0; + pImplChannel->dwStartAddress = dma_addr; + pImplChannel->len = len; + pImplChannel->wMaxPacketSize = packet_sz; + pChannel->status = MUSB_DMA_STATUS_BUSY; + + if ((mode == 1) && (len >= packet_sz)) + configure_channel(pChannel, packet_sz, 1, dma_addr, len); + else + configure_channel(pChannel, packet_sz, 0, dma_addr, len); + + return true; +} + +static int dma_channel_abort(struct dma_channel *pChannel) +{ + struct musb_dma_channel *pImplChannel = + (struct musb_dma_channel *) pChannel->private_data; + u8 bChannel = pImplChannel->bIndex; + void __iomem *mbase = pImplChannel->controller->pCoreBase; + u16 csr; + + if (pChannel->status == MUSB_DMA_STATUS_BUSY) { + if (pImplChannel->transmit) { + + csr = musb_readw(mbase, + MUSB_EP_OFFSET(pImplChannel->epnum, + MUSB_TXCSR)); + csr &= ~(MUSB_TXCSR_AUTOSET | + MUSB_TXCSR_DMAENAB | + MUSB_TXCSR_DMAMODE); + musb_writew(mbase, + MUSB_EP_OFFSET(pImplChannel->epnum, + MUSB_TXCSR), + csr); + } else { + csr = musb_readw(mbase, + MUSB_EP_OFFSET(pImplChannel->epnum, + MUSB_RXCSR)); + csr &= ~(MUSB_RXCSR_AUTOCLEAR | + MUSB_RXCSR_DMAENAB | + MUSB_RXCSR_DMAMODE); + musb_writew(mbase, + MUSB_EP_OFFSET(pImplChannel->epnum, + MUSB_RXCSR), + csr); + } + + musb_writew(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL), + 0); + musb_writel(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS), + 0); + musb_writel(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT), + 0); + + pChannel->status = MUSB_DMA_STATUS_FREE; + } + return 0; +} + +static irqreturn_t dma_controller_irq(int irq, void *private_data) +{ + struct musb_dma_controller *controller = + (struct musb_dma_controller *)private_data; + struct musb_dma_channel *pImplChannel; + struct musb *musb = controller->pDmaPrivate; + void __iomem *mbase = controller->pCoreBase; + struct dma_channel *pChannel; + u8 bChannel; + u16 csr; + u32 dwAddress; + u8 int_hsdma; + irqreturn_t retval = IRQ_NONE; + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + + int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR); + if (!int_hsdma) + goto done; + + for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) { + if (int_hsdma & (1 << bChannel)) { + pImplChannel = (struct musb_dma_channel *) + &(controller->aChannel[bChannel]); + pChannel = &pImplChannel->Channel; + + csr = musb_readw(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bChannel, + MUSB_HSDMA_CONTROL)); + + if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT)) + pImplChannel->Channel.status = + MUSB_DMA_STATUS_BUS_ABORT; + else { + u8 devctl; + + dwAddress = musb_readl(mbase, + MUSB_HSDMA_CHANNEL_OFFSET( + bChannel, + MUSB_HSDMA_ADDRESS)); + pChannel->actual_len = dwAddress + - pImplChannel->dwStartAddress; + + DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n", + pChannel, pImplChannel->dwStartAddress, + dwAddress, pChannel->actual_len, + pImplChannel->len, + (pChannel->actual_len + < pImplChannel->len) ? + "=> reconfig 0" : "=> complete"); + + devctl = musb_readb(mbase, MUSB_DEVCTL); + + pChannel->status = MUSB_DMA_STATUS_FREE; + + /* completed */ + if ((devctl & MUSB_DEVCTL_HM) + && (pImplChannel->transmit) + && ((pChannel->desired_mode == 0) + || (pChannel->actual_len & + (pImplChannel->wMaxPacketSize - 1))) + ) { + /* Send out the packet */ + musb_ep_select(mbase, + pImplChannel->epnum); + musb_writew(mbase, MUSB_EP_OFFSET( + pImplChannel->epnum, + MUSB_TXCSR), + MUSB_TXCSR_TXPKTRDY); + } else + musb_dma_completion( + musb, + pImplChannel->epnum, + pImplChannel->transmit); + } + } + } + retval = IRQ_HANDLED; +done: + spin_unlock_irqrestore(&musb->lock, flags); + return retval; +} + +void dma_controller_destroy(struct dma_controller *c) +{ + struct musb_dma_controller *controller; + + controller = container_of(c, struct musb_dma_controller, Controller); + if (!controller) + return; + + if (controller->irq) + free_irq(controller->irq, c); + + kfree(controller); +} + +struct dma_controller *__init +dma_controller_create(struct musb *musb, void __iomem *pCoreBase) +{ + struct musb_dma_controller *controller; + struct device *dev = musb->controller; + struct platform_device *pdev = to_platform_device(dev); + int irq = platform_get_irq(pdev, 1); + + if (irq == 0) { + dev_err(dev, "No DMA interrupt line!\n"); + return NULL; + } + + controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL); + if (!controller) + return NULL; + + controller->bChannelCount = MUSB_HSDMA_CHANNELS; + controller->pDmaPrivate = musb; + controller->pCoreBase = pCoreBase; + + controller->Controller.start = dma_controller_start; + controller->Controller.stop = dma_controller_stop; + controller->Controller.channel_alloc = dma_channel_allocate; + controller->Controller.channel_release = dma_channel_release; + controller->Controller.channel_program = dma_channel_program; + controller->Controller.channel_abort = dma_channel_abort; + + if (request_irq(irq, dma_controller_irq, IRQF_DISABLED, + musb->controller->bus_id, &controller->Controller)) { + dev_err(dev, "request_irq %d failed!\n", irq); + dma_controller_destroy(&controller->Controller); + return NULL; + } + + controller->irq = irq; + + return &controller->Controller; +} diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c new file mode 100644 index 00000000000..298b22e6ad0 --- /dev/null +++ b/drivers/usb/musb/omap2430.c @@ -0,0 +1,324 @@ +/* + * Copyright (C) 2005-2007 by Texas Instruments + * Some code has been taken from tusb6010.c + * Copyrights for that are attributable to: + * Copyright (C) 2006 Nokia Corporation + * Jarkko Nikula + * Tony Lindgren + * + * This file is part of the Inventra Controller Driver for Linux. + * + * The Inventra Controller Driver for Linux is free software; you + * can redistribute it and/or modify it under the terms of the GNU + * General Public License version 2 as published by the Free Software + * Foundation. + * + * The Inventra Controller Driver for Linux is distributed in + * the hope that it will be useful, but WITHOUT ANY WARRANTY; + * without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + * License for more details. + * + * You should have received a copy of the GNU General Public License + * along with The Inventra Controller Driver for Linux ; if not, + * write to the Free Software Foundation, Inc., 59 Temple Place, + * Suite 330, Boston, MA 02111-1307 USA + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "musb_core.h" +#include "omap2430.h" + +#ifdef CONFIG_ARCH_OMAP3430 +#define get_cpu_rev() 2 +#endif + +#define MUSB_TIMEOUT_A_WAIT_BCON 1100 + +static struct timer_list musb_idle_timer; + +static void musb_do_idle(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + unsigned long flags; + u8 power; + u8 devctl; + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + spin_lock_irqsave(&musb->lock, flags); + + switch (musb->xceiv.state) { + case OTG_STATE_A_WAIT_BCON: + devctl &= ~MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) { + musb->xceiv.state = OTG_STATE_B_IDLE; + MUSB_DEV_MODE(musb); + } else { + musb->xceiv.state = OTG_STATE_A_IDLE; + MUSB_HST_MODE(musb); + } + break; +#ifdef CONFIG_USB_MUSB_HDRC_HCD + case OTG_STATE_A_SUSPEND: + /* finish RESUME signaling? */ + if (musb->port1_status & MUSB_PORT_STAT_RESUME) { + power = musb_readb(musb->mregs, MUSB_POWER); + power &= ~MUSB_POWER_RESUME; + DBG(1, "root port resume stopped, power %02x\n", power); + musb_writeb(musb->mregs, MUSB_POWER, power); + musb->is_active = 1; + musb->port1_status &= ~(USB_PORT_STAT_SUSPEND + | MUSB_PORT_STAT_RESUME); + musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; + usb_hcd_poll_rh_status(musb_to_hcd(musb)); + /* NOTE: it might really be A_WAIT_BCON ... */ + musb->xceiv.state = OTG_STATE_A_HOST; + } + break; +#endif +#ifdef CONFIG_USB_MUSB_HDRC_HCD + case OTG_STATE_A_HOST: + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if (devctl & MUSB_DEVCTL_BDEVICE) + musb->xceiv.state = OTG_STATE_B_IDLE; + else + musb->xceiv.state = OTG_STATE_A_WAIT_BCON; +#endif + default: + break; + } + spin_unlock_irqrestore(&musb->lock, flags); +} + + +void musb_platform_try_idle(struct musb *musb, unsigned long timeout) +{ + unsigned long default_timeout = jiffies + msecs_to_jiffies(3); + static unsigned long last_timer; + + if (timeout == 0) + timeout = default_timeout; + + /* Never idle if active, or when VBUS timeout is not set as host */ + if (musb->is_active || ((musb->a_wait_bcon == 0) + && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { + DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); + del_timer(&musb_idle_timer); + last_timer = jiffies; + return; + } + + if (time_after(last_timer, timeout)) { + if (!timer_pending(&musb_idle_timer)) + last_timer = timeout; + else { + DBG(4, "Longer idle timer already pending, ignoring\n"); + return; + } + } + last_timer = timeout; + + DBG(4, "%s inactive, for idle timer for %lu ms\n", + otg_state_string(musb), + (unsigned long)jiffies_to_msecs(timeout - jiffies)); + mod_timer(&musb_idle_timer, timeout); +} + +void musb_platform_enable(struct musb *musb) +{ +} +void musb_platform_disable(struct musb *musb) +{ +} +static void omap_vbus_power(struct musb *musb, int is_on, int sleeping) +{ +} + +static void omap_set_vbus(struct musb *musb, int is_on) +{ + u8 devctl; + /* HDRC controls CPEN, but beware current surges during device + * connect. They can trigger transient overcurrent conditions + * that must be ignored. + */ + + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + if (is_on) { + musb->is_active = 1; + musb->xceiv.default_a = 1; + musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; + devctl |= MUSB_DEVCTL_SESSION; + + MUSB_HST_MODE(musb); + } else { + musb->is_active = 0; + + /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and + * jumping right to B_IDLE... + */ + + musb->xceiv.default_a = 0; + musb->xceiv.state = OTG_STATE_B_IDLE; + devctl &= ~MUSB_DEVCTL_SESSION; + + MUSB_DEV_MODE(musb); + } + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + DBG(1, "VBUS %s, devctl %02x " + /* otg %3x conf %08x prcm %08x */ "\n", + otg_state_string(musb), + musb_readb(musb->mregs, MUSB_DEVCTL)); +} +static int omap_set_power(struct otg_transceiver *x, unsigned mA) +{ + return 0; +} + +static int musb_platform_resume(struct musb *musb); + +void musb_platform_set_mode(struct musb *musb, u8 musb_mode) +{ + u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + devctl |= MUSB_DEVCTL_SESSION; + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + switch (musb_mode) { + case MUSB_HOST: + otg_set_host(&musb->xceiv, musb->xceiv.host); + break; + case MUSB_PERIPHERAL: + otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget); + break; + case MUSB_OTG: + break; + } +} + +int __init musb_platform_init(struct musb *musb) +{ + u32 l; + +#if defined(CONFIG_ARCH_OMAP2430) + omap_cfg_reg(AE5_2430_USB0HS_STP); +#endif + + musb_platform_resume(musb); + + l = omap_readl(OTG_SYSCONFIG); + l &= ~ENABLEWAKEUP; /* disable wakeup */ + l &= ~NOSTDBY; /* remove possible nostdby */ + l |= SMARTSTDBY; /* enable smart standby */ + l &= ~AUTOIDLE; /* disable auto idle */ + l &= ~NOIDLE; /* remove possible noidle */ + l |= SMARTIDLE; /* enable smart idle */ + l |= AUTOIDLE; /* enable auto idle */ + omap_writel(l, OTG_SYSCONFIG); + + l = omap_readl(OTG_INTERFSEL); + l |= ULPI_12PIN; + omap_writel(l, OTG_INTERFSEL); + + pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " + "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", + omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG), + omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL), + omap_readl(OTG_SIMENABLE)); + + omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1); + + if (is_host_enabled(musb)) + musb->board_set_vbus = omap_set_vbus; + if (is_peripheral_enabled(musb)) + musb->xceiv.set_power = omap_set_power; + musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON; + + setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); + + return 0; +} + +int musb_platform_suspend(struct musb *musb) +{ + u32 l; + + if (!musb->clock) + return 0; + + /* in any role */ + l = omap_readl(OTG_FORCESTDBY); + l |= ENABLEFORCE; /* enable MSTANDBY */ + omap_writel(l, OTG_FORCESTDBY); + + l = omap_readl(OTG_SYSCONFIG); + l |= ENABLEWAKEUP; /* enable wakeup */ + omap_writel(l, OTG_SYSCONFIG); + + if (musb->xceiv.set_suspend) + musb->xceiv.set_suspend(&musb->xceiv, 1); + + if (musb->set_clock) + musb->set_clock(musb->clock, 0); + else + clk_disable(musb->clock); + + return 0; +} + +static int musb_platform_resume(struct musb *musb) +{ + u32 l; + + if (!musb->clock) + return 0; + + if (musb->xceiv.set_suspend) + musb->xceiv.set_suspend(&musb->xceiv, 0); + + if (musb->set_clock) + musb->set_clock(musb->clock, 1); + else + clk_enable(musb->clock); + + l = omap_readl(OTG_SYSCONFIG); + l &= ~ENABLEWAKEUP; /* disable wakeup */ + omap_writel(l, OTG_SYSCONFIG); + + l = omap_readl(OTG_FORCESTDBY); + l &= ~ENABLEFORCE; /* disable MSTANDBY */ + omap_writel(l, OTG_FORCESTDBY); + + return 0; +} + + +int musb_platform_exit(struct musb *musb) +{ + + omap_vbus_power(musb, 0 /*off*/, 1); + + musb_platform_suspend(musb); + + clk_put(musb->clock); + musb->clock = 0; + + return 0; +} diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h new file mode 100644 index 00000000000..786a62071f7 --- /dev/null +++ b/drivers/usb/musb/omap2430.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2005-2006 by Texas Instruments + * + * The Inventra Controller Driver for Linux is free software; you + * can redistribute it and/or modify it under the terms of the GNU + * General Public License version 2 as published by the Free Software + * Foundation. + */ + +#ifndef __MUSB_OMAP243X_H__ +#define __MUSB_OMAP243X_H__ + +#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) +#include +#include + +/* + * OMAP2430-specific definitions + */ + +#define MENTOR_BASE_OFFSET 0 +#if defined(CONFIG_ARCH_OMAP2430) +#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE) +#elif defined(CONFIG_ARCH_OMAP3430) +#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE) +#endif +#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset)) +#define OTG_REVISION OMAP_HSOTG(0x0) +#define OTG_SYSCONFIG OMAP_HSOTG(0x4) +# define MIDLEMODE 12 /* bit position */ +# define FORCESTDBY (0 << MIDLEMODE) +# define NOSTDBY (1 << MIDLEMODE) +# define SMARTSTDBY (2 << MIDLEMODE) +# define SIDLEMODE 3 /* bit position */ +# define FORCEIDLE (0 << SIDLEMODE) +# define NOIDLE (1 << SIDLEMODE) +# define SMARTIDLE (2 << SIDLEMODE) +# define ENABLEWAKEUP (1 << 2) +# define SOFTRST (1 << 1) +# define AUTOIDLE (1 << 0) +#define OTG_SYSSTATUS OMAP_HSOTG(0x8) +# define RESETDONE (1 << 0) +#define OTG_INTERFSEL OMAP_HSOTG(0xc) +# define EXTCP (1 << 2) +# define PHYSEL 0 /* bit position */ +# define UTMI_8BIT (0 << PHYSEL) +# define ULPI_12PIN (1 << PHYSEL) +# define ULPI_8PIN (2 << PHYSEL) +#define OTG_SIMENABLE OMAP_HSOTG(0x10) +# define TM1 (1 << 0) +#define OTG_FORCESTDBY OMAP_HSOTG(0x14) +# define ENABLEFORCE (1 << 0) + +#endif /* CONFIG_ARCH_OMAP2430 */ + +#endif /* __MUSB_OMAP243X_H__ */ diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c new file mode 100644 index 00000000000..b73b036f3d7 --- /dev/null +++ b/drivers/usb/musb/tusb6010.c @@ -0,0 +1,1151 @@ +/* + * TUSB6010 USB 2.0 OTG Dual Role controller + * + * Copyright (C) 2006 Nokia Corporation + * Jarkko Nikula + * Tony Lindgren + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Notes: + * - Driver assumes that interface to external host (main CPU) is + * configured for NOR FLASH interface instead of VLYNQ serial + * interface. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "musb_core.h" + +static void tusb_source_power(struct musb *musb, int is_on); + +#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) +#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) + +/* + * Checks the revision. We need to use the DMA register as 3.0 does not + * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV. + */ +u8 tusb_get_revision(struct musb *musb) +{ + void __iomem *tbase = musb->ctrl_base; + u32 die_id; + u8 rev; + + rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff; + if (TUSB_REV_MAJOR(rev) == 3) { + die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, + TUSB_DIDR1_HI)); + if (die_id >= TUSB_DIDR1_HI_REV_31) + rev |= 1; + } + + return rev; +} + +static int __init tusb_print_revision(struct musb *musb) +{ + void __iomem *tbase = musb->ctrl_base; + u8 rev; + + rev = tusb_get_revision(musb); + + pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n", + "prcm", + TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)), + TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)), + "int", + TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), + TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), + "gpio", + TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)), + TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)), + "dma", + TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), + TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), + "dieid", + TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)), + "rev", + TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev)); + + return tusb_get_revision(musb); +} + +#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \ + | TUSB_PHY_OTG_CTRL_TESTM0) + +/* + * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0. + * Disables power detection in PHY for the duration of idle. + */ +static void tusb_wbus_quirk(struct musb *musb, int enabled) +{ + void __iomem *tbase = musb->ctrl_base; + static u32 phy_otg_ctrl, phy_otg_ena; + u32 tmp; + + if (enabled) { + phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); + phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); + tmp = TUSB_PHY_OTG_CTRL_WRPROTECT + | phy_otg_ena | WBUS_QUIRK_MASK; + musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); + tmp = phy_otg_ena & ~WBUS_QUIRK_MASK; + tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2; + musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); + DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n", + musb_readl(tbase, TUSB_PHY_OTG_CTRL), + musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); + } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE) + & TUSB_PHY_OTG_CTRL_TESTM2) { + tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl; + musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); + tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena; + musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); + DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n", + musb_readl(tbase, TUSB_PHY_OTG_CTRL), + musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); + phy_otg_ctrl = 0; + phy_otg_ena = 0; + } +} + +/* + * TUSB 6010 may use a parallel bus that doesn't support byte ops; + * so both loading and unloading FIFOs need explicit byte counts. + */ + +static inline void +tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len) +{ + u32 val; + int i; + + if (len > 4) { + for (i = 0; i < (len >> 2); i++) { + memcpy(&val, buf, 4); + musb_writel(fifo, 0, val); + buf += 4; + } + len %= 4; + } + if (len > 0) { + /* Write the rest 1 - 3 bytes to FIFO */ + memcpy(&val, buf, len); + musb_writel(fifo, 0, val); + } +} + +static inline void tusb_fifo_read_unaligned(void __iomem *fifo, + void __iomem *buf, u16 len) +{ + u32 val; + int i; + + if (len > 4) { + for (i = 0; i < (len >> 2); i++) { + val = musb_readl(fifo, 0); + memcpy(buf, &val, 4); + buf += 4; + } + len %= 4; + } + if (len > 0) { + /* Read the rest 1 - 3 bytes from FIFO */ + val = musb_readl(fifo, 0); + memcpy(buf, &val, len); + } +} + +void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf) +{ + void __iomem *ep_conf = hw_ep->conf; + void __iomem *fifo = hw_ep->fifo; + u8 epnum = hw_ep->epnum; + + prefetch(buf); + + DBG(4, "%cX ep%d fifo %p count %d buf %p\n", + 'T', epnum, fifo, len, buf); + + if (epnum) + musb_writel(ep_conf, TUSB_EP_TX_OFFSET, + TUSB_EP_CONFIG_XFR_SIZE(len)); + else + musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX | + TUSB_EP0_CONFIG_XFR_SIZE(len)); + + if (likely((0x01 & (unsigned long) buf) == 0)) { + + /* Best case is 32bit-aligned destination address */ + if ((0x02 & (unsigned long) buf) == 0) { + if (len >= 4) { + writesl(fifo, buf, len >> 2); + buf += (len & ~0x03); + len &= 0x03; + } + } else { + if (len >= 2) { + u32 val; + int i; + + /* Cannot use writesw, fifo is 32-bit */ + for (i = 0; i < (len >> 2); i++) { + val = (u32)(*(u16 *)buf); + buf += 2; + val |= (*(u16 *)buf) << 16; + buf += 2; + musb_writel(fifo, 0, val); + } + len &= 0x03; + } + } + } + + if (len > 0) + tusb_fifo_write_unaligned(fifo, buf, len); +} + +void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf) +{ + void __iomem *ep_conf = hw_ep->conf; + void __iomem *fifo = hw_ep->fifo; + u8 epnum = hw_ep->epnum; + + DBG(4, "%cX ep%d fifo %p count %d buf %p\n", + 'R', epnum, fifo, len, buf); + + if (epnum) + musb_writel(ep_conf, TUSB_EP_RX_OFFSET, + TUSB_EP_CONFIG_XFR_SIZE(len)); + else + musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len)); + + if (likely((0x01 & (unsigned long) buf) == 0)) { + + /* Best case is 32bit-aligned destination address */ + if ((0x02 & (unsigned long) buf) == 0) { + if (len >= 4) { + readsl(fifo, buf, len >> 2); + buf += (len & ~0x03); + len &= 0x03; + } + } else { + if (len >= 2) { + u32 val; + int i; + + /* Cannot use readsw, fifo is 32-bit */ + for (i = 0; i < (len >> 2); i++) { + val = musb_readl(fifo, 0); + *(u16 *)buf = (u16)(val & 0xffff); + buf += 2; + *(u16 *)buf = (u16)(val >> 16); + buf += 2; + } + len &= 0x03; + } + } + } + + if (len > 0) + tusb_fifo_read_unaligned(fifo, buf, len); +} + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + +/* This is used by gadget drivers, and OTG transceiver logic, allowing + * at most mA current to be drawn from VBUS during a Default-B session + * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host + * mode), or low power Default-B sessions, something else supplies power. + * Caller must take care of locking. + */ +static int tusb_draw_power(struct otg_transceiver *x, unsigned mA) +{ + struct musb *musb = container_of(x, struct musb, xceiv); + void __iomem *tbase = musb->ctrl_base; + u32 reg; + + /* + * Keep clock active when enabled. Note that this is not tied to + * drawing VBUS, as with OTG mA can be less than musb->min_power. + */ + if (musb->set_clock) { + if (mA) + musb->set_clock(musb->clock, 1); + else + musb->set_clock(musb->clock, 0); + } + + /* tps65030 seems to consume max 100mA, with maybe 60mA available + * (measured on one board) for things other than tps and tusb. + * + * Boards sharing the CPU clock with CLKIN will need to prevent + * certain idle sleep states while the USB link is active. + * + * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }. + * The actual current usage would be very board-specific. For now, + * it's simpler to just use an aggregate (also board-specific). + */ + if (x->default_a || mA < (musb->min_power << 1)) + mA = 0; + + reg = musb_readl(tbase, TUSB_PRCM_MNGMT); + if (mA) { + musb->is_bus_powered = 1; + reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN; + } else { + musb->is_bus_powered = 0; + reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); + } + musb_writel(tbase, TUSB_PRCM_MNGMT, reg); + + DBG(2, "draw max %d mA VBUS\n", mA); + return 0; +} + +#else +#define tusb_draw_power NULL +#endif + +/* workaround for issue 13: change clock during chip idle + * (to be fixed in rev3 silicon) ... symptoms include disconnect + * or looping suspend/resume cycles + */ +static void tusb_set_clock_source(struct musb *musb, unsigned mode) +{ + void __iomem *tbase = musb->ctrl_base; + u32 reg; + + reg = musb_readl(tbase, TUSB_PRCM_CONF); + reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3); + + /* 0 = refclk (clkin, XI) + * 1 = PHY 60 MHz (internal PLL) + * 2 = not supported + * 3 = what? + */ + if (mode > 0) + reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3); + + musb_writel(tbase, TUSB_PRCM_CONF, reg); + + /* FIXME tusb6010_platform_retime(mode == 0); */ +} + +/* + * Idle TUSB6010 until next wake-up event; NOR access always wakes. + * Other code ensures that we idle unless we're connected _and_ the + * USB link is not suspended ... and tells us the relevant wakeup + * events. SW_EN for voltage is handled separately. + */ +void tusb_allow_idle(struct musb *musb, u32 wakeup_enables) +{ + void __iomem *tbase = musb->ctrl_base; + u32 reg; + + if ((wakeup_enables & TUSB_PRCM_WBUS) + && (tusb_get_revision(musb) == TUSB_REV_30)) + tusb_wbus_quirk(musb, 1); + + tusb_set_clock_source(musb, 0); + + wakeup_enables |= TUSB_PRCM_WNORCS; + musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables); + + /* REVISIT writeup of WID implies that if WID set and ID is grounded, + * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared. + * Presumably that's mostly to save power, hence WID is immaterial ... + */ + + reg = musb_readl(tbase, TUSB_PRCM_MNGMT); + /* issue 4: when driving vbus, use hipower (vbus_det) comparator */ + if (is_host_active(musb)) { + reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; + reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN; + } else { + reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN; + reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; + } + reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE; + musb_writel(tbase, TUSB_PRCM_MNGMT, reg); + + DBG(6, "idle, wake on %02x\n", wakeup_enables); +} + +/* + * Updates cable VBUS status. Caller must take care of locking. + */ +int musb_platform_get_vbus_status(struct musb *musb) +{ + void __iomem *tbase = musb->ctrl_base; + u32 otg_stat, prcm_mngmt; + int ret = 0; + + otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); + prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT); + + /* Temporarily enable VBUS detection if it was disabled for + * suspend mode. Unless it's enabled otg_stat and devctl will + * not show correct VBUS state. + */ + if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) { + u32 tmp = prcm_mngmt; + tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; + musb_writel(tbase, TUSB_PRCM_MNGMT, tmp); + otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); + musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt); + } + + if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) + ret = 1; + + return ret; +} + +static struct timer_list musb_idle_timer; + +static void musb_do_idle(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + unsigned long flags; + + spin_lock_irqsave(&musb->lock, flags); + + switch (musb->xceiv.state) { + case OTG_STATE_A_WAIT_BCON: + if ((musb->a_wait_bcon != 0) + && (musb->idle_timeout == 0 + || time_after(jiffies, musb->idle_timeout))) { + DBG(4, "Nothing connected %s, turning off VBUS\n", + otg_state_string(musb)); + } + /* FALLTHROUGH */ + case OTG_STATE_A_IDLE: + tusb_source_power(musb, 0); + default: + break; + } + + if (!musb->is_active) { + u32 wakeups; + + /* wait until khubd handles port change status */ + if (is_host_active(musb) && (musb->port1_status >> 16)) + goto done; + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + if (is_peripheral_enabled(musb) && !musb->gadget_driver) + wakeups = 0; + else { + wakeups = TUSB_PRCM_WHOSTDISCON + | TUSB_PRCM_WBUS + | TUSB_PRCM_WVBUS; + if (is_otg_enabled(musb)) + wakeups |= TUSB_PRCM_WID; + } +#else + wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS; +#endif + tusb_allow_idle(musb, wakeups); + } +done: + spin_unlock_irqrestore(&musb->lock, flags); +} + +/* + * Maybe put TUSB6010 into idle mode mode depending on USB link status, + * like "disconnected" or "suspended". We'll be woken out of it by + * connect, resume, or disconnect. + * + * Needs to be called as the last function everywhere where there is + * register access to TUSB6010 because of NOR flash wake-up. + * Caller should own controller spinlock. + * + * Delay because peripheral enables D+ pullup 3msec after SE0, and + * we don't want to treat that full speed J as a wakeup event. + * ... peripherals must draw only suspend current after 10 msec. + */ +void musb_platform_try_idle(struct musb *musb, unsigned long timeout) +{ + unsigned long default_timeout = jiffies + msecs_to_jiffies(3); + static unsigned long last_timer; + + if (timeout == 0) + timeout = default_timeout; + + /* Never idle if active, or when VBUS timeout is not set as host */ + if (musb->is_active || ((musb->a_wait_bcon == 0) + && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { + DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); + del_timer(&musb_idle_timer); + last_timer = jiffies; + return; + } + + if (time_after(last_timer, timeout)) { + if (!timer_pending(&musb_idle_timer)) + last_timer = timeout; + else { + DBG(4, "Longer idle timer already pending, ignoring\n"); + return; + } + } + last_timer = timeout; + + DBG(4, "%s inactive, for idle timer for %lu ms\n", + otg_state_string(musb), + (unsigned long)jiffies_to_msecs(timeout - jiffies)); + mod_timer(&musb_idle_timer, timeout); +} + +/* ticks of 60 MHz clock */ +#define DEVCLOCK 60000000 +#define OTG_TIMER_MS(msecs) ((msecs) \ + ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \ + | TUSB_DEV_OTG_TIMER_ENABLE) \ + : 0) + +static void tusb_source_power(struct musb *musb, int is_on) +{ + void __iomem *tbase = musb->ctrl_base; + u32 conf, prcm, timer; + u8 devctl; + + /* HDRC controls CPEN, but beware current surges during device + * connect. They can trigger transient overcurrent conditions + * that must be ignored. + */ + + prcm = musb_readl(tbase, TUSB_PRCM_MNGMT); + conf = musb_readl(tbase, TUSB_DEV_CONF); + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + + if (is_on) { + if (musb->set_clock) + musb->set_clock(musb->clock, 1); + timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); + musb->xceiv.default_a = 1; + musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; + devctl |= MUSB_DEVCTL_SESSION; + + conf |= TUSB_DEV_CONF_USB_HOST_MODE; + MUSB_HST_MODE(musb); + } else { + u32 otg_stat; + + timer = 0; + + /* If ID pin is grounded, we want to be a_idle */ + otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); + if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) { + switch (musb->xceiv.state) { + case OTG_STATE_A_WAIT_VRISE: + case OTG_STATE_A_WAIT_BCON: + musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; + break; + case OTG_STATE_A_WAIT_VFALL: + musb->xceiv.state = OTG_STATE_A_IDLE; + break; + default: + musb->xceiv.state = OTG_STATE_A_IDLE; + } + musb->is_active = 0; + musb->xceiv.default_a = 1; + MUSB_HST_MODE(musb); + } else { + musb->is_active = 0; + musb->xceiv.default_a = 0; + musb->xceiv.state = OTG_STATE_B_IDLE; + MUSB_DEV_MODE(musb); + } + + devctl &= ~MUSB_DEVCTL_SESSION; + conf &= ~TUSB_DEV_CONF_USB_HOST_MODE; + if (musb->set_clock) + musb->set_clock(musb->clock, 0); + } + prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); + + musb_writel(tbase, TUSB_PRCM_MNGMT, prcm); + musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer); + musb_writel(tbase, TUSB_DEV_CONF, conf); + musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); + + DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n", + otg_state_string(musb), + musb_readb(musb->mregs, MUSB_DEVCTL), + musb_readl(tbase, TUSB_DEV_OTG_STAT), + conf, prcm); +} + +/* + * Sets the mode to OTG, peripheral or host by changing the ID detection. + * Caller must take care of locking. + * + * Note that if a mini-A cable is plugged in the ID line will stay down as + * the weak ID pull-up is not able to pull the ID up. + * + * REVISIT: It would be possible to add support for changing between host + * and peripheral modes in non-OTG configurations by reconfiguring hardware + * and then setting musb->board_mode. For now, only support OTG mode. + */ +void musb_platform_set_mode(struct musb *musb, u8 musb_mode) +{ + void __iomem *tbase = musb->ctrl_base; + u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf; + + if (musb->board_mode != MUSB_OTG) { + ERR("Changing mode currently only supported in OTG mode\n"); + return; + } + + otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); + phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); + phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); + dev_conf = musb_readl(tbase, TUSB_DEV_CONF); + + switch (musb_mode) { + +#ifdef CONFIG_USB_MUSB_HDRC_HCD + case MUSB_HOST: /* Disable PHY ID detect, ground ID */ + phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; + phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; + dev_conf |= TUSB_DEV_CONF_ID_SEL; + dev_conf &= ~TUSB_DEV_CONF_SOFT_ID; + break; +#endif + +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */ + phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; + phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; + dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); + break; +#endif + +#ifdef CONFIG_USB_MUSB_OTG + case MUSB_OTG: /* Use PHY ID detection */ + phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; + phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; + dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); + break; +#endif + + default: + DBG(2, "Trying to set unknown mode %i\n", musb_mode); + } + + musb_writel(tbase, TUSB_PHY_OTG_CTRL, + TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl); + musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, + TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena); + musb_writel(tbase, TUSB_DEV_CONF, dev_conf); + + otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); + if ((musb_mode == MUSB_PERIPHERAL) && + !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) + INFO("Cannot be peripheral with mini-A cable " + "otg_stat: %08x\n", otg_stat); +} + +static inline unsigned long +tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) +{ + u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); + unsigned long idle_timeout = 0; + + /* ID pin */ + if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) { + int default_a; + + if (is_otg_enabled(musb)) + default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS); + else + default_a = is_host_enabled(musb); + DBG(2, "Default-%c\n", default_a ? 'A' : 'B'); + musb->xceiv.default_a = default_a; + tusb_source_power(musb, default_a); + + /* Don't allow idling immediately */ + if (default_a) + idle_timeout = jiffies + (HZ * 3); + } + + /* VBUS state change */ + if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) { + + /* B-dev state machine: no vbus ~= disconnect */ + if ((is_otg_enabled(musb) && !musb->xceiv.default_a) + || !is_host_enabled(musb)) { +#ifdef CONFIG_USB_MUSB_HDRC_HCD + /* ? musb_root_disconnect(musb); */ + musb->port1_status &= + ~(USB_PORT_STAT_CONNECTION + | USB_PORT_STAT_ENABLE + | USB_PORT_STAT_LOW_SPEED + | USB_PORT_STAT_HIGH_SPEED + | USB_PORT_STAT_TEST + ); +#endif + + if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) { + DBG(1, "Forcing disconnect (no interrupt)\n"); + if (musb->xceiv.state != OTG_STATE_B_IDLE) { + /* INTR_DISCONNECT can hide... */ + musb->xceiv.state = OTG_STATE_B_IDLE; + musb->int_usb |= MUSB_INTR_DISCONNECT; + } + musb->is_active = 0; + } + DBG(2, "vbus change, %s, otg %03x\n", + otg_state_string(musb), otg_stat); + idle_timeout = jiffies + (1 * HZ); + schedule_work(&musb->irq_work); + + } else /* A-dev state machine */ { + DBG(2, "vbus change, %s, otg %03x\n", + otg_state_string(musb), otg_stat); + + switch (musb->xceiv.state) { + case OTG_STATE_A_IDLE: + DBG(2, "Got SRP, turning on VBUS\n"); + musb_set_vbus(musb, 1); + + /* CONNECT can wake if a_wait_bcon is set */ + if (musb->a_wait_bcon != 0) + musb->is_active = 0; + else + musb->is_active = 1; + + /* + * OPT FS A TD.4.6 needs few seconds for + * A_WAIT_VRISE + */ + idle_timeout = jiffies + (2 * HZ); + + break; + case OTG_STATE_A_WAIT_VRISE: + /* ignore; A-session-valid < VBUS_VALID/2, + * we monitor this with the timer + */ + break; + case OTG_STATE_A_WAIT_VFALL: + /* REVISIT this irq triggers during short + * spikes caused by enumeration ... + */ + if (musb->vbuserr_retry) { + musb->vbuserr_retry--; + tusb_source_power(musb, 1); + } else { + musb->vbuserr_retry + = VBUSERR_RETRY_COUNT; + tusb_source_power(musb, 0); + } + break; + default: + break; + } + } + } + + /* OTG timer expiration */ + if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) { + u8 devctl; + + DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat); + + switch (musb->xceiv.state) { + case OTG_STATE_A_WAIT_VRISE: + /* VBUS has probably been valid for a while now, + * but may well have bounced out of range a bit + */ + devctl = musb_readb(musb->mregs, MUSB_DEVCTL); + if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) { + if ((devctl & MUSB_DEVCTL_VBUS) + != MUSB_DEVCTL_VBUS) { + DBG(2, "devctl %02x\n", devctl); + break; + } + musb->xceiv.state = OTG_STATE_A_WAIT_BCON; + musb->is_active = 0; + idle_timeout = jiffies + + msecs_to_jiffies(musb->a_wait_bcon); + } else { + /* REVISIT report overcurrent to hub? */ + ERR("vbus too slow, devctl %02x\n", devctl); + tusb_source_power(musb, 0); + } + break; + case OTG_STATE_A_WAIT_BCON: + if (musb->a_wait_bcon != 0) + idle_timeout = jiffies + + msecs_to_jiffies(musb->a_wait_bcon); + break; + case OTG_STATE_A_SUSPEND: + break; + case OTG_STATE_B_WAIT_ACON: + break; + default: + break; + } + } + schedule_work(&musb->irq_work); + + return idle_timeout; +} + +static irqreturn_t tusb_interrupt(int irq, void *__hci) +{ + struct musb *musb = __hci; + void __iomem *tbase = musb->ctrl_base; + unsigned long flags, idle_timeout = 0; + u32 int_mask, int_src; + + spin_lock_irqsave(&musb->lock, flags); + + /* Mask all interrupts to allow using both edge and level GPIO irq */ + int_mask = musb_readl(tbase, TUSB_INT_MASK); + musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); + + int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS; + DBG(3, "TUSB IRQ %08x\n", int_src); + + musb->int_usb = (u8) int_src; + + /* Acknowledge wake-up source interrupts */ + if (int_src & TUSB_INT_SRC_DEV_WAKEUP) { + u32 reg; + u32 i; + + if (tusb_get_revision(musb) == TUSB_REV_30) + tusb_wbus_quirk(musb, 0); + + /* there are issues re-locking the PLL on wakeup ... */ + + /* work around issue 8 */ + for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) { + musb_writel(tbase, TUSB_SCRATCH_PAD, 0); + musb_writel(tbase, TUSB_SCRATCH_PAD, i); + reg = musb_readl(tbase, TUSB_SCRATCH_PAD); + if (reg == i) + break; + DBG(6, "TUSB NOR not ready\n"); + } + + /* work around issue 13 (2nd half) */ + tusb_set_clock_source(musb, 1); + + reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE); + musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); + if (reg & ~TUSB_PRCM_WNORCS) { + musb->is_active = 1; + schedule_work(&musb->irq_work); + } + DBG(3, "wake %sactive %02x\n", + musb->is_active ? "" : "in", reg); + + /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */ + } + + if (int_src & TUSB_INT_SRC_USB_IP_CONN) + del_timer(&musb_idle_timer); + + /* OTG state change reports (annoyingly) not issued by Mentor core */ + if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG + | TUSB_INT_SRC_OTG_TIMEOUT + | TUSB_INT_SRC_ID_STATUS_CHNG)) + idle_timeout = tusb_otg_ints(musb, int_src, tbase); + + /* TX dma callback must be handled here, RX dma callback is + * handled in tusb_omap_dma_cb. + */ + if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) { + u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC); + u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK); + + DBG(3, "DMA IRQ %08x\n", dma_src); + real_dma_src = ~real_dma_src & dma_src; + if (tusb_dma_omap() && real_dma_src) { + int tx_source = (real_dma_src & 0xffff); + int i; + + for (i = 1; i <= 15; i++) { + if (tx_source & (1 << i)) { + DBG(3, "completing ep%i %s\n", i, "tx"); + musb_dma_completion(musb, i, 1); + } + } + } + musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src); + } + + /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */ + if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) { + u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC); + + musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src); + musb->int_rx = (((musb_src >> 16) & 0xffff) << 1); + musb->int_tx = (musb_src & 0xffff); + } else { + musb->int_rx = 0; + musb->int_tx = 0; + } + + if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff)) + musb_interrupt(musb); + + /* Acknowledge TUSB interrupts. Clear only non-reserved bits */ + musb_writel(tbase, TUSB_INT_SRC_CLEAR, + int_src & ~TUSB_INT_MASK_RESERVED_BITS); + + musb_platform_try_idle(musb, idle_timeout); + + musb_writel(tbase, TUSB_INT_MASK, int_mask); + spin_unlock_irqrestore(&musb->lock, flags); + + return IRQ_HANDLED; +} + +static int dma_off; + +/* + * Enables TUSB6010. Caller must take care of locking. + * REVISIT: + * - Check what is unnecessary in MGC_HdrcStart() + */ +void musb_platform_enable(struct musb *musb) +{ + void __iomem *tbase = musb->ctrl_base; + + /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF. + * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */ + musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF); + + /* Setup TUSB interrupt, disable DMA and GPIO interrupts */ + musb_writel(tbase, TUSB_USBIP_INT_MASK, 0); + musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); + musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); + + /* Clear all subsystem interrups */ + musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff); + musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff); + musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff); + + /* Acknowledge pending interrupt(s) */ + musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS); + + /* Only 0 clock cycles for minimum interrupt de-assertion time and + * interrupt polarity active low seems to work reliably here */ + musb_writel(tbase, TUSB_INT_CTRL_CONF, + TUSB_INT_CTRL_CONF_INT_RELCYC(0)); + + set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); + + /* maybe force into the Default-A OTG state machine */ + if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) + & TUSB_DEV_OTG_STAT_ID_STATUS)) + musb_writel(tbase, TUSB_INT_SRC_SET, + TUSB_INT_SRC_ID_STATUS_CHNG); + + if (is_dma_capable() && dma_off) + printk(KERN_WARNING "%s %s: dma not reactivated\n", + __FILE__, __func__); + else + dma_off = 1; +} + +/* + * Disables TUSB6010. Caller must take care of locking. + */ +void musb_platform_disable(struct musb *musb) +{ + void __iomem *tbase = musb->ctrl_base; + + /* FIXME stop DMA, IRQs, timers, ... */ + + /* disable all IRQs */ + musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); + musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff); + musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); + musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); + + del_timer(&musb_idle_timer); + + if (is_dma_capable() && !dma_off) { + printk(KERN_WARNING "%s %s: dma still active\n", + __FILE__, __func__); + dma_off = 1; + } +} + +/* + * Sets up TUSB6010 CPU interface specific signals and registers + * Note: Settings optimized for OMAP24xx + */ +static void __init tusb_setup_cpu_interface(struct musb *musb) +{ + void __iomem *tbase = musb->ctrl_base; + + /* + * Disable GPIO[5:0] pullups (used as output DMA requests) + * Don't disable GPIO[7:6] as they are needed for wake-up. + */ + musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F); + + /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */ + musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF); + + /* Turn GPIO[5:0] to DMAREQ[5:0] signals */ + musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f)); + + /* Burst size 16x16 bits, all six DMA requests enabled, DMA request + * de-assertion time 2 system clocks p 62 */ + musb_writel(tbase, TUSB_DMA_REQ_CONF, + TUSB_DMA_REQ_CONF_BURST_SIZE(2) | + TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) | + TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); + + /* Set 0 wait count for synchronous burst access */ + musb_writel(tbase, TUSB_WAIT_COUNT, 1); +} + +static int __init tusb_start(struct musb *musb) +{ + void __iomem *tbase = musb->ctrl_base; + int ret = 0; + unsigned long flags; + u32 reg; + + if (musb->board_set_power) + ret = musb->board_set_power(1); + if (ret != 0) { + printk(KERN_ERR "tusb: Cannot enable TUSB6010\n"); + return ret; + } + + spin_lock_irqsave(&musb->lock, flags); + + if (musb_readl(tbase, TUSB_PROD_TEST_RESET) != + TUSB_PROD_TEST_RESET_VAL) { + printk(KERN_ERR "tusb: Unable to detect TUSB6010\n"); + goto err; + } + + ret = tusb_print_revision(musb); + if (ret < 2) { + printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n", + ret); + goto err; + } + + /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when + * NOR FLASH interface is used */ + musb_writel(tbase, TUSB_VLYNQ_CTRL, 8); + + /* Select PHY free running 60MHz as a system clock */ + tusb_set_clock_source(musb, 1); + + /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for + * power saving, enable VBus detect and session end comparators, + * enable IDpullup, enable VBus charging */ + musb_writel(tbase, TUSB_PRCM_MNGMT, + TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) | + TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN | + TUSB_PRCM_MNGMT_OTG_SESS_END_EN | + TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN | + TUSB_PRCM_MNGMT_OTG_ID_PULLUP); + tusb_setup_cpu_interface(musb); + + /* simplify: always sense/pullup ID pins, as if in OTG mode */ + reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); + reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; + musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg); + + reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL); + reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; + musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg); + + spin_unlock_irqrestore(&musb->lock, flags); + + return 0; + +err: + spin_unlock_irqrestore(&musb->lock, flags); + + if (musb->board_set_power) + musb->board_set_power(0); + + return -ENODEV; +} + +int __init musb_platform_init(struct musb *musb) +{ + struct platform_device *pdev; + struct resource *mem; + void __iomem *sync; + int ret; + + pdev = to_platform_device(musb->controller); + + /* dma address for async dma */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + musb->async = mem->start; + + /* dma address for sync dma */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!mem) { + pr_debug("no sync dma resource?\n"); + return -ENODEV; + } + musb->sync = mem->start; + + sync = ioremap(mem->start, mem->end - mem->start + 1); + if (!sync) { + pr_debug("ioremap for sync failed\n"); + return -ENOMEM; + } + musb->sync_va = sync; + + /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400, + * FIFOs at 0x600, TUSB at 0x800 + */ + musb->mregs += TUSB_BASE_OFFSET; + + ret = tusb_start(musb); + if (ret) { + printk(KERN_ERR "Could not start tusb6010 (%d)\n", + ret); + return -ENODEV; + } + musb->isr = tusb_interrupt; + + if (is_host_enabled(musb)) + musb->board_set_vbus = tusb_source_power; + if (is_peripheral_enabled(musb)) + musb->xceiv.set_power = tusb_draw_power; + + setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); + + return ret; +} + +int musb_platform_exit(struct musb *musb) +{ + del_timer_sync(&musb_idle_timer); + + if (musb->board_set_power) + musb->board_set_power(0); + + iounmap(musb->sync_va); + + return 0; +} diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h new file mode 100644 index 00000000000..db6dad0750a --- /dev/null +++ b/drivers/usb/musb/tusb6010.h @@ -0,0 +1,402 @@ +/* + * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller + * + * Copyright (C) 2006 Nokia Corporation + * Jarkko Nikula + * Tony Lindgren + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __TUSB6010_H__ +#define __TUSB6010_H__ + +extern u8 tusb_get_revision(struct musb *musb); + +#ifdef CONFIG_USB_TUSB6010 +#define musb_in_tusb() 1 +#else +#define musb_in_tusb() 0 +#endif + +#ifdef CONFIG_USB_TUSB_OMAP_DMA +#define tusb_dma_omap() 1 +#else +#define tusb_dma_omap() 0 +#endif + +/* VLYNQ control register. 32-bit at offset 0x000 */ +#define TUSB_VLYNQ_CTRL 0x004 + +/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */ +#define TUSB_BASE_OFFSET 0x400 + +/* FIFO registers 32-bit at offset 0x600 */ +#define TUSB_FIFO_BASE 0x600 + +/* Device System & Control registers. 32-bit at offset 0x800 */ +#define TUSB_SYS_REG_BASE 0x800 + +#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000) +#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16) +#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15) +#define TUSB_DEV_CONF_SOFT_ID (1 << 1) +#define TUSB_DEV_CONF_ID_SEL (1 << 0) + +#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004) +#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008) +#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24) +#define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23) +#define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19) +#define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18) +#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17) +#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16) +#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15) +#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14) +#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13) +#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12) +#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11) +#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10) +#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9) +#define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7) +#define TUSB_PHY_OTG_CTRL_PD (1 << 6) +#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5) +#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4) +#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3) +#define TUSB_PHY_OTG_CTRL_RESET (1 << 2) +#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1) +#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0) + +/*OTG status register */ +#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c) +#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8) +#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7) +#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6) +#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5) +#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4) +#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3) +#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2) +#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0) +#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1) +#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0) + +#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010) +# define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31) +# define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff) +#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014) + +/* PRCM configuration register */ +#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018) +#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24) +#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16) + +/* PRCM management register */ +#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c) +#define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25) +#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24) +#define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20) +#define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19) +#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18) +#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17) +#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10) +#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9) +#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8) +#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4) +#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3) +#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2) +#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1) +#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0) + +/* Wake-up source clear and mask registers */ +#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020) +#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028) +#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c) +#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13) +#define TUSB_PRCM_WGPIO_7 (1 << 12) +#define TUSB_PRCM_WGPIO_6 (1 << 11) +#define TUSB_PRCM_WGPIO_5 (1 << 10) +#define TUSB_PRCM_WGPIO_4 (1 << 9) +#define TUSB_PRCM_WGPIO_3 (1 << 8) +#define TUSB_PRCM_WGPIO_2 (1 << 7) +#define TUSB_PRCM_WGPIO_1 (1 << 6) +#define TUSB_PRCM_WGPIO_0 (1 << 5) +#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */ +#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */ +#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */ +#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */ +#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */ + +#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030) +#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034) +#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038) +#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c) +#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040) +#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044) +#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048) +#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c) +#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050) +#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054) +#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058) +#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c) +#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060) +#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064) +#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068) +#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c) + +/* NOR flash interrupt source registers */ +#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070) +#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074) +#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078) +#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c) +#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24) +#define TUSB_INT_SRC_USB_IP_CORE (1 << 17) +#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16) +#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15) +#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14) +#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13) +#define TUSB_INT_SRC_DEV_READY (1 << 12) +#define TUSB_INT_SRC_USB_IP_TX (1 << 9) +#define TUSB_INT_SRC_USB_IP_RX (1 << 8) +#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7) +#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6) +#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5) +#define TUSB_INT_SRC_USB_IP_CONN (1 << 4) +#define TUSB_INT_SRC_USB_IP_SOF (1 << 3) +#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2) +#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1) +#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0) + +/* NOR flash interrupt registers reserved bits. Must be written as 0 */ +#define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17) +#define TUSB_INT_MASK_RESERVED_13 (1 << 13) +#define TUSB_INT_MASK_RESERVED_8 (0xf << 8) +#define TUSB_INT_SRC_RESERVED_26 (0x1f << 26) +#define TUSB_INT_SRC_RESERVED_18 (0x3f << 18) +#define TUSB_INT_SRC_RESERVED_10 (0x03 << 10) + +/* Reserved bits for NOR flash interrupt mask and clear register */ +#define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \ + TUSB_INT_MASK_RESERVED_13 | \ + TUSB_INT_MASK_RESERVED_8) + +/* Reserved bits for NOR flash interrupt status register */ +#define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \ + TUSB_INT_SRC_RESERVED_18 | \ + TUSB_INT_SRC_RESERVED_10) + +#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080) +#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084) +#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100) +#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104) +#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108) +#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148) + +/* Offsets from each ep base register */ +#define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */ +#define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */ +#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188 + +#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8) +#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4) +#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8) + +/* Device System & Control register bitfields */ +#define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18) +#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17) +#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16) +#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24) +#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26) +#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20) +#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16) +#define TUSB_EP0_CONFIG_SW_EN (1 << 8) +#define TUSB_EP0_CONFIG_DIR_TX (1 << 7) +#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f) +#define TUSB_EP_CONFIG_SW_EN (1 << 31) +#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff) +#define TUSB_PROD_TEST_RESET_VAL 0xa596 +#define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20) + +#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8) +#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc) +#define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf) +#define TUSB_DIDR1_HI_REV_20 0 +#define TUSB_DIDR1_HI_REV_30 1 +#define TUSB_DIDR1_HI_REV_31 2 + +#define TUSB_REV_10 0x10 +#define TUSB_REV_20 0x20 +#define TUSB_REV_30 0x30 +#define TUSB_REV_31 0x31 + +/*----------------------------------------------------------------------------*/ + +#ifdef CONFIG_USB_TUSB6010 + +/* configuration parameters specific to this silicon */ + +/* Number of Tx endpoints. Legal values are 1 - 16 (this value includes EP0) */ +#define MUSB_C_NUM_EPT 16 + +/* Number of Rx endpoints. Legal values are 1 - 16 (this value includes EP0) */ +#define MUSB_C_NUM_EPR 16 + +/* Endpoint 1 to 15 direction types. C_EP1_DEF is defined if either Tx endpoint + * 1 or Rx endpoint 1 are used. + */ +#define MUSB_C_EP1_DEF + +/* C_EP1_TX_DEF is defined if Tx endpoint 1 is used */ +#define MUSB_C_EP1_TX_DEF + +/* C_EP1_RX_DEF is defined if Rx endpoint 1 is used */ +#define MUSB_C_EP1_RX_DEF + +/* C_EP1_TOR_DEF is defined if Tx endpoint 1 and Rx endpoint 1 share a FIFO */ +/* #define C_EP1_TOR_DEF */ + +/* C_EP1_TAR_DEF is defined if both Tx endpoint 1 and Rx endpoint 1 are used + * and do not share a FIFO. + */ +#define MUSB_C_EP1_TAR_DEF + +/* Similarly for all other used endpoints */ +#define MUSB_C_EP2_DEF +#define MUSB_C_EP2_TX_DEF +#define MUSB_C_EP2_RX_DEF +#define MUSB_C_EP2_TAR_DEF +#define MUSB_C_EP3_DEF +#define MUSB_C_EP3_TX_DEF +#define MUSB_C_EP3_RX_DEF +#define MUSB_C_EP3_TAR_DEF +#define MUSB_C_EP4_DEF +#define MUSB_C_EP4_TX_DEF +#define MUSB_C_EP4_RX_DEF +#define MUSB_C_EP4_TAR_DEF + +/* Endpoint 1 to 15 FIFO address bits. Legal values are 3 to 13 - corresponding + * to FIFO sizes of 8 to 8192 bytes. If an Tx endpoint shares a FIFO with an Rx + * endpoint then the Rx FIFO size must be the same as the Tx FIFO size. All + * endpoints 1 to 15 must be defined, unused endpoints should be set to 2. + */ +#define MUSB_C_EP1T_BITS 5 +#define MUSB_C_EP1R_BITS 5 +#define MUSB_C_EP2T_BITS 5 +#define MUSB_C_EP2R_BITS 5 +#define MUSB_C_EP3T_BITS 3 +#define MUSB_C_EP3R_BITS 3 +#define MUSB_C_EP4T_BITS 3 +#define MUSB_C_EP4R_BITS 3 + +#define MUSB_C_EP5T_BITS 2 +#define MUSB_C_EP5R_BITS 2 +#define MUSB_C_EP6T_BITS 2 +#define MUSB_C_EP6R_BITS 2 +#define MUSB_C_EP7T_BITS 2 +#define MUSB_C_EP7R_BITS 2 +#define MUSB_C_EP8T_BITS 2 +#define MUSB_C_EP8R_BITS 2 +#define MUSB_C_EP9T_BITS 2 +#define MUSB_C_EP9R_BITS 2 +#define MUSB_C_EP10T_BITS 2 +#define MUSB_C_EP10R_BITS 2 +#define MUSB_C_EP11T_BITS 2 +#define MUSB_C_EP11R_BITS 2 +#define MUSB_C_EP12T_BITS 2 +#define MUSB_C_EP12R_BITS 2 +#define MUSB_C_EP13T_BITS 2 +#define MUSB_C_EP13R_BITS 2 +#define MUSB_C_EP14T_BITS 2 +#define MUSB_C_EP14R_BITS 2 +#define MUSB_C_EP15T_BITS 2 +#define MUSB_C_EP15R_BITS 2 + +/* Define the following constant if the USB2.0 Transceiver Macrocell data width + * is 16-bits. + */ +/* #define C_UTM_16 */ + +/* Define this constant if the CPU uses big-endian byte ordering. */ +/* #define C_BIGEND */ + +/* Define the following constant if any Tx endpoint is required to support + * multiple bulk packets. + */ +/* #define C_MP_TX */ + +/* Define the following constant if any Rx endpoint is required to support + * multiple bulk packets. + */ +/* #define C_MP_RX */ + +/* Define the following constant if any Tx endpoint is required to support high + * bandwidth ISO. + */ +/* #define C_HB_TX */ + +/* Define the following constant if any Rx endpoint is required to support high + * bandwidth ISO. + */ +/* #define C_HB_RX */ + +/* Define the following constant if software connect/disconnect control is + * required. + */ +#define MUSB_C_SOFT_CON + +/* Define the following constant if Vendor Control Registers are required. */ +/* #define C_VEND_REG */ + +/* Vendor control register widths. */ +#define MUSB_C_VCTL_BITS 4 +#define MUSB_C_VSTAT_BITS 8 + +/* Define the following constant to include a DMA controller. */ +/* #define C_DMA */ + +/* Define the following constant if 2 or more DMA channels are required. */ +/* #define C_DMA2 */ + +/* Define the following constant if 3 or more DMA channels are required. */ +/* #define C_DMA3 */ + +/* Define the following constant if 4 or more DMA channels are required. */ +/* #define C_DMA4 */ + +/* Define the following constant if 5 or more DMA channels are required. */ +/* #define C_DMA5 */ + +/* Define the following constant if 6 or more DMA channels are required. */ +/* #define C_DMA6 */ + +/* Define the following constant if 7 or more DMA channels are required. */ +/* #define C_DMA7 */ + +/* Define the following constant if 8 or more DMA channels are required. */ +/* #define C_DMA8 */ + +/* Enable Dynamic FIFO Sizing */ +#define MUSB_C_DYNFIFO_DEF + +/* Derived constants. The following constants are derived from the previous + * configuration constants + */ + +/* Total number of endpoints. Legal values are 2 - 16. This must be equal to + * the larger of C_NUM_EPT, C_NUM_EPR + */ +/* #define MUSB_C_NUM_EPS 5 */ + +/* C_EPMAX_BITS is equal to the largest endpoint FIFO word address bits */ +#define MUSB_C_EPMAX_BITS 11 + +/* C_RAM_BITS is the number of address bits required to address the RAM (32-bit + * addresses). It is defined as log2 of the sum of 2** of all the endpoint FIFO + * dword address bits (rounded up). + */ +#define MUSB_C_RAM_BITS 12 + +#endif /* CONFIG_USB_TUSB6010 */ + +#endif /* __TUSB6010_H__ */ diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c new file mode 100644 index 00000000000..52f7f29cebd --- /dev/null +++ b/drivers/usb/musb/tusb6010_omap.c @@ -0,0 +1,719 @@ +/* + * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface + * + * Copyright (C) 2006 Nokia Corporation + * Tony Lindgren + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "musb_core.h" + +#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data) + +#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */ + +struct tusb_omap_dma_ch { + struct musb *musb; + void __iomem *tbase; + unsigned long phys_offset; + int epnum; + u8 tx; + struct musb_hw_ep *hw_ep; + + int ch; + s8 dmareq; + s8 sync_dev; + + struct tusb_omap_dma *tusb_dma; + + void __iomem *dma_addr; + + u32 len; + u16 packet_sz; + u16 transfer_packet_sz; + u32 transfer_len; + u32 completed_len; +}; + +struct tusb_omap_dma { + struct dma_controller controller; + struct musb *musb; + void __iomem *tbase; + + int ch; + s8 dmareq; + s8 sync_dev; + unsigned multichannel:1; +}; + +static int tusb_omap_dma_start(struct dma_controller *c) +{ + struct tusb_omap_dma *tusb_dma; + + tusb_dma = container_of(c, struct tusb_omap_dma, controller); + + /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ + + return 0; +} + +static int tusb_omap_dma_stop(struct dma_controller *c) +{ + struct tusb_omap_dma *tusb_dma; + + tusb_dma = container_of(c, struct tusb_omap_dma, controller); + + /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */ + + return 0; +} + +/* + * Allocate dmareq0 to the current channel unless it's already taken + */ +static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat) +{ + u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); + + if (reg != 0) { + DBG(3, "ep%i dmareq0 is busy for ep%i\n", + chdat->epnum, reg & 0xf); + return -EAGAIN; + } + + if (chdat->tx) + reg = (1 << 4) | chdat->epnum; + else + reg = chdat->epnum; + + musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); + + return 0; +} + +static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat) +{ + u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); + + if ((reg & 0xf) != chdat->epnum) { + printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n", + chdat->epnum, reg & 0xf); + return; + } + musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0); +} + +/* + * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in + * musb_gadget.c. + */ +static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data) +{ + struct dma_channel *channel = (struct dma_channel *)data; + struct tusb_omap_dma_ch *chdat = to_chdat(channel); + struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; + struct musb *musb = chdat->musb; + struct musb_hw_ep *hw_ep = chdat->hw_ep; + void __iomem *ep_conf = hw_ep->conf; + void __iomem *mbase = musb->mregs; + unsigned long remaining, flags, pio; + int ch; + + spin_lock_irqsave(&musb->lock, flags); + + if (tusb_dma->multichannel) + ch = chdat->ch; + else + ch = tusb_dma->ch; + + if (ch_status != OMAP_DMA_BLOCK_IRQ) + printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status); + + DBG(3, "ep%i %s dma callback ch: %i status: %x\n", + chdat->epnum, chdat->tx ? "tx" : "rx", + ch, ch_status); + + if (chdat->tx) + remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); + else + remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); + + remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining); + + /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */ + if (unlikely(remaining > chdat->transfer_len)) { + DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n", + chdat->tx ? "tx" : "rx", chdat->ch, + remaining); + remaining = 0; + } + + channel->actual_len = chdat->transfer_len - remaining; + pio = chdat->len - channel->actual_len; + + DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len); + + /* Transfer remaining 1 - 31 bytes */ + if (pio > 0 && pio < 32) { + u8 *buf; + + DBG(3, "Using PIO for remaining %lu bytes\n", pio); + buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len; + if (chdat->tx) { + dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), + chdat->transfer_len, DMA_TO_DEVICE); + musb_write_fifo(hw_ep, pio, buf); + } else { + musb_read_fifo(hw_ep, pio, buf); + dma_cache_maint(phys_to_virt((u32)chdat->dma_addr), + chdat->transfer_len, DMA_FROM_DEVICE); + } + channel->actual_len += pio; + } + + if (!tusb_dma->multichannel) + tusb_omap_free_shared_dmareq(chdat); + + channel->status = MUSB_DMA_STATUS_FREE; + + /* Handle only RX callbacks here. TX callbacks must be handled based + * on the TUSB DMA status interrupt. + * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback + * interrupt for RX and TX. + */ + if (!chdat->tx) + musb_dma_completion(musb, chdat->epnum, chdat->tx); + + /* We must terminate short tx transfers manually by setting TXPKTRDY. + * REVISIT: This same problem may occur with other MUSB dma as well. + * Easy to test with g_ether by pinging the MUSB board with ping -s54. + */ + if ((chdat->transfer_len < chdat->packet_sz) + || (chdat->transfer_len % chdat->packet_sz != 0)) { + u16 csr; + + if (chdat->tx) { + DBG(3, "terminating short tx packet\n"); + musb_ep_select(mbase, chdat->epnum); + csr = musb_readw(hw_ep->regs, MUSB_TXCSR); + csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY + | MUSB_TXCSR_P_WZC_BITS; + musb_writew(hw_ep->regs, MUSB_TXCSR, csr); + } + } + + spin_unlock_irqrestore(&musb->lock, flags); +} + +static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, + u8 rndis_mode, dma_addr_t dma_addr, u32 len) +{ + struct tusb_omap_dma_ch *chdat = to_chdat(channel); + struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; + struct musb *musb = chdat->musb; + struct musb_hw_ep *hw_ep = chdat->hw_ep; + void __iomem *mbase = musb->mregs; + void __iomem *ep_conf = hw_ep->conf; + dma_addr_t fifo = hw_ep->fifo_sync; + struct omap_dma_channel_params dma_params; + u32 dma_remaining; + int src_burst, dst_burst; + u16 csr; + int ch; + s8 dmareq; + s8 sync_dev; + + if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz)) + return false; + + /* + * HW issue #10: Async dma will eventually corrupt the XFR_SIZE + * register which will cause missed DMA interrupt. We could try to + * use a timer for the callback, but it is unsafe as the XFR_SIZE + * register is corrupt, and we won't know if the DMA worked. + */ + if (dma_addr & 0x2) + return false; + + /* + * Because of HW issue #10, it seems like mixing sync DMA and async + * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before + * using the channel for DMA. + */ + if (chdat->tx) + dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET); + else + dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET); + + dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining); + if (dma_remaining) { + DBG(2, "Busy %s dma ch%i, not using: %08x\n", + chdat->tx ? "tx" : "rx", chdat->ch, + dma_remaining); + return false; + } + + chdat->transfer_len = len & ~0x1f; + + if (len < packet_sz) + chdat->transfer_packet_sz = chdat->transfer_len; + else + chdat->transfer_packet_sz = packet_sz; + + if (tusb_dma->multichannel) { + ch = chdat->ch; + dmareq = chdat->dmareq; + sync_dev = chdat->sync_dev; + } else { + if (tusb_omap_use_shared_dmareq(chdat) != 0) { + DBG(3, "could not get dma for ep%i\n", chdat->epnum); + return false; + } + if (tusb_dma->ch < 0) { + /* REVISIT: This should get blocked earlier, happens + * with MSC ErrorRecoveryTest + */ + WARN_ON(1); + return false; + } + + ch = tusb_dma->ch; + dmareq = tusb_dma->dmareq; + sync_dev = tusb_dma->sync_dev; + omap_set_dma_callback(ch, tusb_omap_dma_cb, channel); + } + + chdat->packet_sz = packet_sz; + chdat->len = len; + channel->actual_len = 0; + chdat->dma_addr = (void __iomem *)dma_addr; + channel->status = MUSB_DMA_STATUS_BUSY; + + /* Since we're recycling dma areas, we need to clean or invalidate */ + if (chdat->tx) + dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE); + else + dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE); + + /* Use 16-bit transfer if dma_addr is not 32-bit aligned */ + if ((dma_addr & 0x3) == 0) { + dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; + dma_params.elem_count = 8; /* Elements in frame */ + } else { + dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; + dma_params.elem_count = 16; /* Elements in frame */ + fifo = hw_ep->fifo_async; + } + + dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */ + + DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", + chdat->epnum, chdat->tx ? "tx" : "rx", + ch, dma_addr, chdat->transfer_len, len, + chdat->transfer_packet_sz, packet_sz); + + /* + * Prepare omap DMA for transfer + */ + if (chdat->tx) { + dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; + dma_params.src_start = (unsigned long)dma_addr; + dma_params.src_ei = 0; + dma_params.src_fi = 0; + + dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX; + dma_params.dst_start = (unsigned long)fifo; + dma_params.dst_ei = 1; + dma_params.dst_fi = -31; /* Loop 32 byte window */ + + dma_params.trigger = sync_dev; + dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; + dma_params.src_or_dst_synch = 0; /* Dest sync */ + + src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */ + dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */ + } else { + dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX; + dma_params.src_start = (unsigned long)fifo; + dma_params.src_ei = 1; + dma_params.src_fi = -31; /* Loop 32 byte window */ + + dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; + dma_params.dst_start = (unsigned long)dma_addr; + dma_params.dst_ei = 0; + dma_params.dst_fi = 0; + + dma_params.trigger = sync_dev; + dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; + dma_params.src_or_dst_synch = 1; /* Source sync */ + + src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */ + dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */ + } + + DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n", + chdat->epnum, chdat->tx ? "tx" : "rx", + (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16, + ((dma_addr & 0x3) == 0) ? "sync" : "async", + dma_params.src_start, dma_params.dst_start); + + omap_set_dma_params(ch, &dma_params); + omap_set_dma_src_burst_mode(ch, src_burst); + omap_set_dma_dest_burst_mode(ch, dst_burst); + omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED); + + /* + * Prepare MUSB for DMA transfer + */ + if (chdat->tx) { + musb_ep_select(mbase, chdat->epnum); + csr = musb_readw(hw_ep->regs, MUSB_TXCSR); + csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB + | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); + csr &= ~MUSB_TXCSR_P_UNDERRUN; + musb_writew(hw_ep->regs, MUSB_TXCSR, csr); + } else { + musb_ep_select(mbase, chdat->epnum); + csr = musb_readw(hw_ep->regs, MUSB_RXCSR); + csr |= MUSB_RXCSR_DMAENAB; + csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE); + musb_writew(hw_ep->regs, MUSB_RXCSR, + csr | MUSB_RXCSR_P_WZC_BITS); + } + + /* + * Start DMA transfer + */ + omap_start_dma(ch); + + if (chdat->tx) { + /* Send transfer_packet_sz packets at a time */ + musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, + chdat->transfer_packet_sz); + + musb_writel(ep_conf, TUSB_EP_TX_OFFSET, + TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); + } else { + /* Receive transfer_packet_sz packets at a time */ + musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, + chdat->transfer_packet_sz << 16); + + musb_writel(ep_conf, TUSB_EP_RX_OFFSET, + TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); + } + + return true; +} + +static int tusb_omap_dma_abort(struct dma_channel *channel) +{ + struct tusb_omap_dma_ch *chdat = to_chdat(channel); + struct tusb_omap_dma *tusb_dma = chdat->tusb_dma; + + if (!tusb_dma->multichannel) { + if (tusb_dma->ch >= 0) { + omap_stop_dma(tusb_dma->ch); + omap_free_dma(tusb_dma->ch); + tusb_dma->ch = -1; + } + + tusb_dma->dmareq = -1; + tusb_dma->sync_dev = -1; + } + + channel->status = MUSB_DMA_STATUS_FREE; + + return 0; +} + +static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat) +{ + u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); + int i, dmareq_nr = -1; + + const int sync_dev[6] = { + OMAP24XX_DMA_EXT_DMAREQ0, + OMAP24XX_DMA_EXT_DMAREQ1, + OMAP242X_DMA_EXT_DMAREQ2, + OMAP242X_DMA_EXT_DMAREQ3, + OMAP242X_DMA_EXT_DMAREQ4, + OMAP242X_DMA_EXT_DMAREQ5, + }; + + for (i = 0; i < MAX_DMAREQ; i++) { + int cur = (reg & (0xf << (i * 5))) >> (i * 5); + if (cur == 0) { + dmareq_nr = i; + break; + } + } + + if (dmareq_nr == -1) + return -EAGAIN; + + reg |= (chdat->epnum << (dmareq_nr * 5)); + if (chdat->tx) + reg |= ((1 << 4) << (dmareq_nr * 5)); + musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); + + chdat->dmareq = dmareq_nr; + chdat->sync_dev = sync_dev[chdat->dmareq]; + + return 0; +} + +static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat) +{ + u32 reg; + + if (!chdat || chdat->dmareq < 0) + return; + + reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); + reg &= ~(0x1f << (chdat->dmareq * 5)); + musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg); + + chdat->dmareq = -1; + chdat->sync_dev = -1; +} + +static struct dma_channel *dma_channel_pool[MAX_DMAREQ]; + +static struct dma_channel * +tusb_omap_dma_allocate(struct dma_controller *c, + struct musb_hw_ep *hw_ep, + u8 tx) +{ + int ret, i; + const char *dev_name; + struct tusb_omap_dma *tusb_dma; + struct musb *musb; + void __iomem *tbase; + struct dma_channel *channel = NULL; + struct tusb_omap_dma_ch *chdat = NULL; + u32 reg; + + tusb_dma = container_of(c, struct tusb_omap_dma, controller); + musb = tusb_dma->musb; + tbase = musb->ctrl_base; + + reg = musb_readl(tbase, TUSB_DMA_INT_MASK); + if (tx) + reg &= ~(1 << hw_ep->epnum); + else + reg &= ~(1 << (hw_ep->epnum + 15)); + musb_writel(tbase, TUSB_DMA_INT_MASK, reg); + + /* REVISIT: Why does dmareq5 not work? */ + if (hw_ep->epnum == 0) { + DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx"); + return NULL; + } + + for (i = 0; i < MAX_DMAREQ; i++) { + struct dma_channel *ch = dma_channel_pool[i]; + if (ch->status == MUSB_DMA_STATUS_UNKNOWN) { + ch->status = MUSB_DMA_STATUS_FREE; + channel = ch; + chdat = ch->private_data; + break; + } + } + + if (!channel) + return NULL; + + if (tx) { + chdat->tx = 1; + dev_name = "TUSB transmit"; + } else { + chdat->tx = 0; + dev_name = "TUSB receive"; + } + + chdat->musb = tusb_dma->musb; + chdat->tbase = tusb_dma->tbase; + chdat->hw_ep = hw_ep; + chdat->epnum = hw_ep->epnum; + chdat->dmareq = -1; + chdat->completed_len = 0; + chdat->tusb_dma = tusb_dma; + + channel->max_len = 0x7fffffff; + channel->desired_mode = 0; + channel->actual_len = 0; + + if (tusb_dma->multichannel) { + ret = tusb_omap_dma_allocate_dmareq(chdat); + if (ret != 0) + goto free_dmareq; + + ret = omap_request_dma(chdat->sync_dev, dev_name, + tusb_omap_dma_cb, channel, &chdat->ch); + if (ret != 0) + goto free_dmareq; + } else if (tusb_dma->ch == -1) { + tusb_dma->dmareq = 0; + tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0; + + /* Callback data gets set later in the shared dmareq case */ + ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared", + tusb_omap_dma_cb, NULL, &tusb_dma->ch); + if (ret != 0) + goto free_dmareq; + + chdat->dmareq = -1; + chdat->ch = -1; + } + + DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n", + chdat->epnum, + chdat->tx ? "tx" : "rx", + chdat->ch >= 0 ? "dedicated" : "shared", + chdat->ch >= 0 ? chdat->ch : tusb_dma->ch, + chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq, + chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev); + + return channel; + +free_dmareq: + tusb_omap_dma_free_dmareq(chdat); + + DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum); + channel->status = MUSB_DMA_STATUS_UNKNOWN; + + return NULL; +} + +static void tusb_omap_dma_release(struct dma_channel *channel) +{ + struct tusb_omap_dma_ch *chdat = to_chdat(channel); + struct musb *musb = chdat->musb; + void __iomem *tbase = musb->ctrl_base; + u32 reg; + + DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch); + + reg = musb_readl(tbase, TUSB_DMA_INT_MASK); + if (chdat->tx) + reg |= (1 << chdat->epnum); + else + reg |= (1 << (chdat->epnum + 15)); + musb_writel(tbase, TUSB_DMA_INT_MASK, reg); + + reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR); + if (chdat->tx) + reg |= (1 << chdat->epnum); + else + reg |= (1 << (chdat->epnum + 15)); + musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg); + + channel->status = MUSB_DMA_STATUS_UNKNOWN; + + if (chdat->ch >= 0) { + omap_stop_dma(chdat->ch); + omap_free_dma(chdat->ch); + chdat->ch = -1; + } + + if (chdat->dmareq >= 0) + tusb_omap_dma_free_dmareq(chdat); + + channel = NULL; +} + +void dma_controller_destroy(struct dma_controller *c) +{ + struct tusb_omap_dma *tusb_dma; + int i; + + tusb_dma = container_of(c, struct tusb_omap_dma, controller); + for (i = 0; i < MAX_DMAREQ; i++) { + struct dma_channel *ch = dma_channel_pool[i]; + if (ch) { + kfree(ch->private_data); + kfree(ch); + } + } + + if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0) + omap_free_dma(tusb_dma->ch); + + kfree(tusb_dma); +} + +struct dma_controller *__init +dma_controller_create(struct musb *musb, void __iomem *base) +{ + void __iomem *tbase = musb->ctrl_base; + struct tusb_omap_dma *tusb_dma; + int i; + + /* REVISIT: Get dmareq lines used from board-*.c */ + + musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff); + musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0); + + musb_writel(tbase, TUSB_DMA_REQ_CONF, + TUSB_DMA_REQ_CONF_BURST_SIZE(2) + | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) + | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); + + tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL); + if (!tusb_dma) + goto cleanup; + + tusb_dma->musb = musb; + tusb_dma->tbase = musb->ctrl_base; + + tusb_dma->ch = -1; + tusb_dma->dmareq = -1; + tusb_dma->sync_dev = -1; + + tusb_dma->controller.start = tusb_omap_dma_start; + tusb_dma->controller.stop = tusb_omap_dma_stop; + tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate; + tusb_dma->controller.channel_release = tusb_omap_dma_release; + tusb_dma->controller.channel_program = tusb_omap_dma_program; + tusb_dma->controller.channel_abort = tusb_omap_dma_abort; + + if (tusb_get_revision(musb) >= TUSB_REV_30) + tusb_dma->multichannel = 1; + + for (i = 0; i < MAX_DMAREQ; i++) { + struct dma_channel *ch; + struct tusb_omap_dma_ch *chdat; + + ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL); + if (!ch) + goto cleanup; + + dma_channel_pool[i] = ch; + + chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL); + if (!chdat) + goto cleanup; + + ch->status = MUSB_DMA_STATUS_UNKNOWN; + ch->private_data = chdat; + } + + return &tusb_dma->controller; + +cleanup: + dma_controller_destroy(&tusb_dma->controller); + + return NULL; +} diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h new file mode 100644 index 00000000000..d325a0d5bf4 --- /dev/null +++ b/include/linux/usb/musb.h @@ -0,0 +1,70 @@ +/* + * This is used to for host and peripheral modes of the driver for + * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. + * + * Board initialization should put one of these into dev->platform_data, + * probably on some platform_device named "musb_hdrc". It encapsulates + * key configuration differences between boards. + */ + +/* The USB role is defined by the connector used on the board, so long as + * standards are being followed. (Developer boards sometimes won't.) + */ +enum musb_mode { + MUSB_UNDEFINED = 0, + MUSB_HOST, /* A or Mini-A connector */ + MUSB_PERIPHERAL, /* B or Mini-B connector */ + MUSB_OTG /* Mini-AB connector */ +}; + +struct clk; + +struct musb_hdrc_platform_data { + /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ + u8 mode; + + /* for clk_get() */ + const char *clock; + + /* (HOST or OTG) switch VBUS on/off */ + int (*set_vbus)(struct device *dev, int is_on); + + /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ + u8 power; + + /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ + u8 min_power; + + /* (HOST or OTG) msec/2 after VBUS on till power good */ + u8 potpgt; + + /* TBD: chip defaults should probably go someplace else, + * e.g. number of tx/rx endpoints, etc + */ + unsigned multipoint:1; + + /* Power the device on or off */ + int (*set_power)(int state); + + /* Turn device clock on or off */ + int (*set_clock)(struct clk *clock, int is_on); +}; + + +/* TUSB 6010 support */ + +#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ +#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ +#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ + +#ifdef CONFIG_ARCH_OMAP2 + +extern int __init tusb6010_setup_interface( + struct musb_hdrc_platform_data *data, + unsigned ps_refclk, unsigned waitpin, + unsigned async_cs, unsigned sync_cs, + unsigned irq, unsigned dmachan); + +extern int tusb6010_platform_retime(unsigned is_refclk); + +#endif /* OMAP2 */ -- cgit v1.2.3-70-g09d2 From ca6d1b1333bc2e61e37982de1f28d8604c232414 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Fri, 8 Aug 2008 12:40:54 +0300 Subject: usb: musb: pass configuration specifics via pdata Use platform_data to pass musb configuration-specific details to musb driver. This patch will prevent that other platforms selecting HAVE_CLK and enabling musb won't break tree building. The other parts of it will come when linux-omap merge up more omap2/3 board-files. Signed-off-by: Felipe Balbi Acked-by: Paul Mundt Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-omap2/usb-tusb6010.c | 1 - drivers/usb/musb/musb_core.c | 37 ++++---- drivers/usb/musb/musb_core.h | 14 +-- drivers/usb/musb/tusb6010.h | 169 ------------------------------------- include/linux/usb/musb.h | 38 +++++++-- 5 files changed, 51 insertions(+), 208 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c index 1607c941d95..10ef464d6be 100644 --- a/arch/arm/mach-omap2/usb-tusb6010.c +++ b/arch/arm/mach-omap2/usb-tusb6010.c @@ -317,7 +317,6 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data, printk(error, 6, status); return -ENODEV; } - data->multipoint = 1; tusb_device.dev.platform_data = data; /* REVISIT let the driver know what DMA channels work */ diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 462586d06da..d68ec6daf33 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -990,12 +990,6 @@ static void musb_shutdown(struct platform_device *pdev) * We don't currently use dynamic fifo setup capability to do anything * more than selecting one of a bunch of predefined configurations. */ -#ifdef MUSB_C_DYNFIFO_DEF -#define can_dynfifo() 1 -#else -#define can_dynfifo() 0 -#endif - #if defined(CONFIG_USB_TUSB6010) || \ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) static ushort __initdata fifo_mode = 4; @@ -1008,8 +1002,6 @@ module_param(fifo_mode, ushort, 0); MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration"); -#define DYN_FIFO_SIZE (1<<(MUSB_C_RAM_BITS+2)) - enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed)); enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed)); @@ -1119,11 +1111,12 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep, c_size = size - 3; if (cfg->mode == BUF_DOUBLE) { - if ((offset + (maxpacket << 1)) > DYN_FIFO_SIZE) + if ((offset + (maxpacket << 1)) > + (1 << (musb->config->ram_bits + 2))) return -EMSGSIZE; c_size |= MUSB_FIFOSZ_DPB; } else { - if ((offset + maxpacket) > DYN_FIFO_SIZE) + if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2))) return -EMSGSIZE; } @@ -1219,13 +1212,13 @@ static int __init ep_config_from_table(struct musb *musb) /* assert(offset > 0) */ /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would - * be better than static MUSB_C_NUM_EPS and DYN_FIFO_SIZE... + * be better than static musb->config->num_eps and DYN_FIFO_SIZE... */ for (i = 0; i < n; i++) { u8 epn = cfg->hw_ep_num; - if (epn >= MUSB_C_NUM_EPS) { + if (epn >= musb->config->num_eps) { pr_debug("%s: invalid ep %d\n", musb_driver_name, epn); continue; @@ -1242,8 +1235,8 @@ static int __init ep_config_from_table(struct musb *musb) printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n", musb_driver_name, - n + 1, MUSB_C_NUM_EPS * 2 - 1, - offset, DYN_FIFO_SIZE); + n + 1, musb->config->num_eps * 2 - 1, + offset, (1 << (musb->config->ram_bits + 2))); #ifdef CONFIG_USB_MUSB_HDRC_HCD if (!musb->bulk_ep) { @@ -1270,7 +1263,7 @@ static int __init ep_config_from_hw(struct musb *musb) /* FIXME pick up ep0 maxpacket size */ - for (epnum = 1; epnum < MUSB_C_NUM_EPS; epnum++) { + for (epnum = 1; epnum < musb->config->num_eps; epnum++) { musb_ep_select(mbase, epnum); hw_ep = musb->endpoints + epnum; @@ -1424,14 +1417,14 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) musb->epmask = 1; if (reg & MUSB_CONFIGDATA_DYNFIFO) { - if (can_dynfifo()) + if (musb->config->dyn_fifo) status = ep_config_from_table(musb); else { ERR("reconfigure software for Dynamic FIFOs\n"); status = -ENODEV; } } else { - if (!can_dynfifo()) + if (!musb->config->dyn_fifo) status = ep_config_from_hw(musb); else { ERR("reconfigure software for static FIFOs\n"); @@ -1788,7 +1781,8 @@ static void musb_irq_work(struct work_struct *data) */ static struct musb *__init -allocate_instance(struct device *dev, void __iomem *mbase) +allocate_instance(struct device *dev, + struct musb_hdrc_config *config, void __iomem *mbase) { struct musb *musb; struct musb_hw_ep *ep; @@ -1820,8 +1814,9 @@ allocate_instance(struct device *dev, void __iomem *mbase) musb->mregs = mbase; musb->ctrl_base = mbase; musb->nIrq = -ENODEV; + musb->config = config; for (epnum = 0, ep = musb->endpoints; - epnum < MUSB_C_NUM_EPS; + epnum < musb->config->num_eps; epnum++, ep++) { ep->musb = musb; @@ -1929,7 +1924,7 @@ bad_config: } /* allocate */ - musb = allocate_instance(dev, ctrl); + musb = allocate_instance(dev, plat->config, ctrl); if (!musb) return -ENOMEM; @@ -1987,7 +1982,7 @@ bad_config: musb_generic_disable(musb); /* setup musb parts of the core (especially endpoints) */ - status = musb_core_init(plat->multipoint + status = musb_core_init(plat->config->multipoint ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC, musb); if (status < 0) diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 90035c12ab5..eade46d8170 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -56,18 +56,6 @@ struct musb_ep; #include "musb_debug.h" #include "musb_dma.h" -#ifdef CONFIG_USB_MUSB_SOC -/* - * Get core configuration from a header converted (by cfg_conv) - * from the Verilog config file generated by the core config utility - * - * For now we assume that header is provided along with other - * arch-specific files. Discrete chips will need a build tweak. - * So will using AHB IDs from silicon that provides them. - */ -#include -#endif - #include "musb_io.h" #include "musb_regs.h" @@ -440,6 +428,8 @@ struct musb { struct usb_gadget_driver *gadget_driver; /* its driver */ #endif + struct musb_hdrc_config *config; + #ifdef MUSB_CONFIG_PROC_FS struct proc_dir_entry *proc_entry; #endif diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h index db6dad0750a..ab8c96286ce 100644 --- a/drivers/usb/musb/tusb6010.h +++ b/drivers/usb/musb/tusb6010.h @@ -230,173 +230,4 @@ extern u8 tusb_get_revision(struct musb *musb); #define TUSB_REV_30 0x30 #define TUSB_REV_31 0x31 -/*----------------------------------------------------------------------------*/ - -#ifdef CONFIG_USB_TUSB6010 - -/* configuration parameters specific to this silicon */ - -/* Number of Tx endpoints. Legal values are 1 - 16 (this value includes EP0) */ -#define MUSB_C_NUM_EPT 16 - -/* Number of Rx endpoints. Legal values are 1 - 16 (this value includes EP0) */ -#define MUSB_C_NUM_EPR 16 - -/* Endpoint 1 to 15 direction types. C_EP1_DEF is defined if either Tx endpoint - * 1 or Rx endpoint 1 are used. - */ -#define MUSB_C_EP1_DEF - -/* C_EP1_TX_DEF is defined if Tx endpoint 1 is used */ -#define MUSB_C_EP1_TX_DEF - -/* C_EP1_RX_DEF is defined if Rx endpoint 1 is used */ -#define MUSB_C_EP1_RX_DEF - -/* C_EP1_TOR_DEF is defined if Tx endpoint 1 and Rx endpoint 1 share a FIFO */ -/* #define C_EP1_TOR_DEF */ - -/* C_EP1_TAR_DEF is defined if both Tx endpoint 1 and Rx endpoint 1 are used - * and do not share a FIFO. - */ -#define MUSB_C_EP1_TAR_DEF - -/* Similarly for all other used endpoints */ -#define MUSB_C_EP2_DEF -#define MUSB_C_EP2_TX_DEF -#define MUSB_C_EP2_RX_DEF -#define MUSB_C_EP2_TAR_DEF -#define MUSB_C_EP3_DEF -#define MUSB_C_EP3_TX_DEF -#define MUSB_C_EP3_RX_DEF -#define MUSB_C_EP3_TAR_DEF -#define MUSB_C_EP4_DEF -#define MUSB_C_EP4_TX_DEF -#define MUSB_C_EP4_RX_DEF -#define MUSB_C_EP4_TAR_DEF - -/* Endpoint 1 to 15 FIFO address bits. Legal values are 3 to 13 - corresponding - * to FIFO sizes of 8 to 8192 bytes. If an Tx endpoint shares a FIFO with an Rx - * endpoint then the Rx FIFO size must be the same as the Tx FIFO size. All - * endpoints 1 to 15 must be defined, unused endpoints should be set to 2. - */ -#define MUSB_C_EP1T_BITS 5 -#define MUSB_C_EP1R_BITS 5 -#define MUSB_C_EP2T_BITS 5 -#define MUSB_C_EP2R_BITS 5 -#define MUSB_C_EP3T_BITS 3 -#define MUSB_C_EP3R_BITS 3 -#define MUSB_C_EP4T_BITS 3 -#define MUSB_C_EP4R_BITS 3 - -#define MUSB_C_EP5T_BITS 2 -#define MUSB_C_EP5R_BITS 2 -#define MUSB_C_EP6T_BITS 2 -#define MUSB_C_EP6R_BITS 2 -#define MUSB_C_EP7T_BITS 2 -#define MUSB_C_EP7R_BITS 2 -#define MUSB_C_EP8T_BITS 2 -#define MUSB_C_EP8R_BITS 2 -#define MUSB_C_EP9T_BITS 2 -#define MUSB_C_EP9R_BITS 2 -#define MUSB_C_EP10T_BITS 2 -#define MUSB_C_EP10R_BITS 2 -#define MUSB_C_EP11T_BITS 2 -#define MUSB_C_EP11R_BITS 2 -#define MUSB_C_EP12T_BITS 2 -#define MUSB_C_EP12R_BITS 2 -#define MUSB_C_EP13T_BITS 2 -#define MUSB_C_EP13R_BITS 2 -#define MUSB_C_EP14T_BITS 2 -#define MUSB_C_EP14R_BITS 2 -#define MUSB_C_EP15T_BITS 2 -#define MUSB_C_EP15R_BITS 2 - -/* Define the following constant if the USB2.0 Transceiver Macrocell data width - * is 16-bits. - */ -/* #define C_UTM_16 */ - -/* Define this constant if the CPU uses big-endian byte ordering. */ -/* #define C_BIGEND */ - -/* Define the following constant if any Tx endpoint is required to support - * multiple bulk packets. - */ -/* #define C_MP_TX */ - -/* Define the following constant if any Rx endpoint is required to support - * multiple bulk packets. - */ -/* #define C_MP_RX */ - -/* Define the following constant if any Tx endpoint is required to support high - * bandwidth ISO. - */ -/* #define C_HB_TX */ - -/* Define the following constant if any Rx endpoint is required to support high - * bandwidth ISO. - */ -/* #define C_HB_RX */ - -/* Define the following constant if software connect/disconnect control is - * required. - */ -#define MUSB_C_SOFT_CON - -/* Define the following constant if Vendor Control Registers are required. */ -/* #define C_VEND_REG */ - -/* Vendor control register widths. */ -#define MUSB_C_VCTL_BITS 4 -#define MUSB_C_VSTAT_BITS 8 - -/* Define the following constant to include a DMA controller. */ -/* #define C_DMA */ - -/* Define the following constant if 2 or more DMA channels are required. */ -/* #define C_DMA2 */ - -/* Define the following constant if 3 or more DMA channels are required. */ -/* #define C_DMA3 */ - -/* Define the following constant if 4 or more DMA channels are required. */ -/* #define C_DMA4 */ - -/* Define the following constant if 5 or more DMA channels are required. */ -/* #define C_DMA5 */ - -/* Define the following constant if 6 or more DMA channels are required. */ -/* #define C_DMA6 */ - -/* Define the following constant if 7 or more DMA channels are required. */ -/* #define C_DMA7 */ - -/* Define the following constant if 8 or more DMA channels are required. */ -/* #define C_DMA8 */ - -/* Enable Dynamic FIFO Sizing */ -#define MUSB_C_DYNFIFO_DEF - -/* Derived constants. The following constants are derived from the previous - * configuration constants - */ - -/* Total number of endpoints. Legal values are 2 - 16. This must be equal to - * the larger of C_NUM_EPT, C_NUM_EPR - */ -/* #define MUSB_C_NUM_EPS 5 */ - -/* C_EPMAX_BITS is equal to the largest endpoint FIFO word address bits */ -#define MUSB_C_EPMAX_BITS 11 - -/* C_RAM_BITS is the number of address bits required to address the RAM (32-bit - * addresses). It is defined as log2 of the sum of 2** of all the endpoint FIFO - * dword address bits (rounded up). - */ -#define MUSB_C_RAM_BITS 12 - -#endif /* CONFIG_USB_TUSB6010 */ - #endif /* __TUSB6010_H__ */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index d325a0d5bf4..630962c04ca 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -19,6 +19,36 @@ enum musb_mode { struct clk; +struct musb_hdrc_eps_bits { + const char name[16]; + u8 bits; +}; + +struct musb_hdrc_config { + /* MUSB configuration-specific details */ + unsigned multipoint:1; /* multipoint device */ + unsigned dyn_fifo:1; /* supports dynamic fifo sizing */ + unsigned soft_con:1; /* soft connect required */ + unsigned utm_16:1; /* utm data witdh is 16 bits */ + unsigned big_endian:1; /* true if CPU uses big-endian */ + unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ + unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ + unsigned high_iso_tx:1; /* Tx ep required for HB iso */ + unsigned high_iso_rx:1; /* Rx ep required for HD iso */ + unsigned dma:1; /* supports DMA */ + unsigned vendor_req:1; /* vendor registers required */ + + u8 num_eps; /* number of endpoints _with_ ep0 */ + u8 dma_channels; /* number of dma channels */ + u8 dyn_fifo_size; /* dynamic size in bytes */ + u8 vendor_ctrl; /* vendor control reg width */ + u8 vendor_stat; /* vendor status reg witdh */ + u8 dma_req_chan; /* bitmask for required dma channels */ + u8 ram_bits; /* ram address size */ + + struct musb_hdrc_eps_bits *eps_bits; +}; + struct musb_hdrc_platform_data { /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ u8 mode; @@ -38,16 +68,14 @@ struct musb_hdrc_platform_data { /* (HOST or OTG) msec/2 after VBUS on till power good */ u8 potpgt; - /* TBD: chip defaults should probably go someplace else, - * e.g. number of tx/rx endpoints, etc - */ - unsigned multipoint:1; - /* Power the device on or off */ int (*set_power)(int state); /* Turn device clock on or off */ int (*set_clock)(struct clk *clock, int is_on); + + /* MUSB configuration-specific details */ + struct musb_hdrc_config *config; }; -- cgit v1.2.3-70-g09d2 From 0e06b50dda5965e0f8a15b0be14b759ead54fd2a Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Thu, 14 Aug 2008 14:29:57 +0800 Subject: Blackfin arch: cleanup cache lock code - remove cheesy read_iloc() function - move invalidate_entire_icache function to lock.S - export proper prototypes for functions in lock.S - only build lock.S when BFIN_ICACHE_LOCK is enabled Signed-off-by: Mike Frysinger Signed-off-by: Bryan Wu --- arch/blackfin/kernel/setup.c | 2 +- arch/blackfin/mach-common/Makefile | 3 ++- arch/blackfin/mach-common/lock.S | 45 ++++++++++++++++++++++++++------------ include/asm-blackfin/bfin-global.h | 6 ++++- 4 files changed, 39 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 936c06d820d..2ae84fea89e 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -1059,7 +1059,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, BFIN_DLINES); #ifdef CONFIG_BFIN_ICACHE_LOCK - switch (read_iloc()) { + switch ((bfin_read_IMEM_CONTROL() >> 3) & WAYALL_L) { case WAY0_L: seq_printf(m, "Way0 Locked-Down\n"); break; diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile index 862cd73c950..e6ed57c56d4 100644 --- a/arch/blackfin/mach-common/Makefile +++ b/arch/blackfin/mach-common/Makefile @@ -4,8 +4,9 @@ obj-y := \ cache.o entry.o head.o \ - interrupt.o lock.o irqpanic.o arch_checks.o ints-priority.o + interrupt.o irqpanic.o arch_checks.o ints-priority.o +obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o obj-$(CONFIG_PM) += pm.o dpmc_modes.o obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o diff --git a/arch/blackfin/mach-common/lock.S b/arch/blackfin/mach-common/lock.S index 30b887e67dd..9daf01201e9 100644 --- a/arch/blackfin/mach-common/lock.S +++ b/arch/blackfin/mach-common/lock.S @@ -28,13 +28,10 @@ */ #include -#include #include .text -#ifdef CONFIG_BFIN_ICACHE_LOCK - /* When you come here, it is assumed that * R0 - Which way to be locked */ @@ -189,18 +186,38 @@ ENTRY(_cache_lock) RTS; ENDPROC(_cache_lock) -#endif /* BFIN_ICACHE_LOCK */ - -/* Return the ILOC bits of IMEM_CONTROL +/* Invalidate the Entire Instruction cache by + * disabling IMC bit */ +ENTRY(_invalidate_entire_icache) + [--SP] = ( R7:5); -ENTRY(_read_iloc) - P1.H = HI(IMEM_CONTROL); - P1.L = LO(IMEM_CONTROL); - R1 = 0xF; - R0 = [P1]; - R0 = R0 >> 3; - R0 = R0 & R1; + P0.L = LO(IMEM_CONTROL); + P0.H = HI(IMEM_CONTROL); + R7 = [P0]; + + /* Clear the IMC bit , All valid bits in the instruction + * cache are set to the invalid state + */ + BITCLR(R7,IMC_P); + CLI R6; + SSYNC; /* SSYNC required before invalidating cache. */ + .align 8; + [P0] = R7; + SSYNC; + STI R6; + + /* Configures the instruction cache agian */ + R6 = (IMC | ENICPLB); + R7 = R7 | R6; + + CLI R6; + SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ + .align 8; + [P0] = R7; + SSYNC; + STI R6; + ( R7:5) = [SP++]; RTS; -ENDPROC(_read_iloc) +ENDPROC(_invalidate_entire_icache) diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h index 93ae5335e8a..78eb389d200 100644 --- a/include/asm-blackfin/bfin-global.h +++ b/include/asm-blackfin/bfin-global.h @@ -62,7 +62,6 @@ extern void _cplb_hdr(void); /* Blackfin cache functions */ extern void bfin_icache_init(void); extern void bfin_dcache_init(void); -extern int read_iloc(void); extern int bfin_console_init(void); extern asmlinkage void lower_to_irq14(void); extern asmlinkage void bfin_return_from_exception(void); @@ -126,6 +125,11 @@ extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], /* only used when CONFIG_MTD_UCLINUX */ extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; +#ifdef CONFIG_BFIN_ICACHE_LOCK +extern void cache_grab_lock(int way); +extern void cache_lock(int way); +#endif + #endif #endif /* _BLACKFIN_H_ */ -- cgit v1.2.3-70-g09d2 From 7ab37da52db98ea9f272045c837058dfc1870ac3 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Thu, 14 Aug 2008 14:33:05 +0800 Subject: Blackfin arch: delete dead prototypes Signed-off-by: Mike Frysinger Signed-off-by: Bryan Wu --- include/asm-blackfin/bfin-global.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include') diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h index 78eb389d200..8918cea3116 100644 --- a/include/asm-blackfin/bfin-global.h +++ b/include/asm-blackfin/bfin-global.h @@ -58,11 +58,9 @@ extern void dump_bfin_trace_buffer(void); extern int init_arch_irq(void); extern void bfin_reset(void); -extern void _cplb_hdr(void); /* Blackfin cache functions */ extern void bfin_icache_init(void); extern void bfin_dcache_init(void); -extern int bfin_console_init(void); extern asmlinkage void lower_to_irq14(void); extern asmlinkage void bfin_return_from_exception(void); extern void init_exception_vectors(void); @@ -70,7 +68,6 @@ extern void init_dma(void); extern void program_IAR(void); extern void evt14_softirq(void); extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs); -extern void bfin_gpio_interrupt_setup(int irq, int irq_pfx, int type); extern int bfin_internal_set_wake(unsigned int irq, unsigned int state); extern asmlinkage void finish_atomic_sections (struct pt_regs *regs); @@ -109,12 +106,10 @@ extern void *sram_alloc_with_lsl(size_t, unsigned long); extern int sram_free_with_lsl(const void*); extern const char bfin_board_name[]; -extern unsigned long wall_jiffies; extern unsigned long bfin_sic_iwr[]; extern unsigned vr_wakeup; extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */ -extern struct file_operations dpmc_fops; extern unsigned long _ramstart, _ramend, _rambase; extern unsigned long memory_start, memory_end, physical_mem_end; extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], -- cgit v1.2.3-70-g09d2 From 3c012eebf8869dd3118c21a73a2f8cc9e9c694ea Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Thu, 14 Aug 2008 14:36:15 +0800 Subject: Blackfin arch: move fixed code defines into fixed_code.h as very few things actually need to know these details Signed-off-by: Mike Frysinger Signed-off-by: Bryan Wu --- include/asm-blackfin/Kbuild | 2 +- include/asm-blackfin/bfin-global.h | 13 ------------- include/asm-blackfin/fixed_code.h | 24 ++++++++++++++++++++++++ 3 files changed, 25 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/asm-blackfin/Kbuild b/include/asm-blackfin/Kbuild index 71f8fe78325..606ecfdcc96 100644 --- a/include/asm-blackfin/Kbuild +++ b/include/asm-blackfin/Kbuild @@ -1,3 +1,3 @@ include include/asm-generic/Kbuild.asm -header-y += fixed_code.h +unifdef-y += fixed_code.h diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h index 8918cea3116..e61ffc0162b 100644 --- a/include/asm-blackfin/bfin-global.h +++ b/include/asm-blackfin/bfin-global.h @@ -70,19 +70,6 @@ extern void evt14_softirq(void); extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs); extern int bfin_internal_set_wake(unsigned int irq, unsigned int state); -extern asmlinkage void finish_atomic_sections (struct pt_regs *regs); -extern char fixed_code_start; -extern char fixed_code_end; -extern int atomic_xchg32(void); -extern int atomic_cas32(void); -extern int atomic_add32(void); -extern int atomic_sub32(void); -extern int atomic_ior32(void); -extern int atomic_and32(void); -extern int atomic_xor32(void); -extern void safe_user_instruction(void); -extern void sigreturn_stub(void); - extern void *l1_data_A_sram_alloc(size_t); extern void *l1_data_B_sram_alloc(size_t); extern void *l1_inst_sram_alloc(size_t); diff --git a/include/asm-blackfin/fixed_code.h b/include/asm-blackfin/fixed_code.h index 37db66c7030..32c4d495d84 100644 --- a/include/asm-blackfin/fixed_code.h +++ b/include/asm-blackfin/fixed_code.h @@ -1,6 +1,28 @@ /* This file defines the fixed addresses where userspace programs can find atomic code sequences. */ +#ifndef __BFIN_ASM_FIXED_CODE_H__ +#define __BFIN_ASM_FIXED_CODE_H__ + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#include +#include +extern asmlinkage void finish_atomic_sections(struct pt_regs *regs); +extern char fixed_code_start; +extern char fixed_code_end; +extern int atomic_xchg32(void); +extern int atomic_cas32(void); +extern int atomic_add32(void); +extern int atomic_sub32(void); +extern int atomic_ior32(void); +extern int atomic_and32(void); +extern int atomic_xor32(void); +extern void safe_user_instruction(void); +extern void sigreturn_stub(void); +#endif +#endif + #define FIXED_CODE_START 0x400 #define SIGRETURN_STUB 0x400 @@ -20,3 +42,5 @@ #define SAFE_USER_INSTRUCTION 0x480 #define FIXED_CODE_END 0x490 + +#endif -- cgit v1.2.3-70-g09d2 From ee32664da9531329b87aa5109e41e7cc73a04121 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Thu, 14 Aug 2008 14:37:32 +0800 Subject: Blackfin arch: shuffle related prototypes together -- no functional changes Signed-off-by: Mike Frysinger Signed-off-by: Bryan Wu --- include/asm-blackfin/bfin-global.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-blackfin/bfin-global.h b/include/asm-blackfin/bfin-global.h index e61ffc0162b..7ba70de66f2 100644 --- a/include/asm-blackfin/bfin-global.h +++ b/include/asm-blackfin/bfin-global.h @@ -56,17 +56,17 @@ extern void dump_bfin_process(struct pt_regs *regs); extern void dump_bfin_mem(struct pt_regs *regs); extern void dump_bfin_trace_buffer(void); +/* init functions only */ extern int init_arch_irq(void); -extern void bfin_reset(void); -/* Blackfin cache functions */ extern void bfin_icache_init(void); extern void bfin_dcache_init(void); -extern asmlinkage void lower_to_irq14(void); -extern asmlinkage void bfin_return_from_exception(void); extern void init_exception_vectors(void); -extern void init_dma(void); extern void program_IAR(void); -extern void evt14_softirq(void); + +extern void bfin_reset(void); +extern asmlinkage void lower_to_irq14(void); +extern asmlinkage void bfin_return_from_exception(void); +extern asmlinkage void evt14_softirq(void); extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs); extern int bfin_internal_set_wake(unsigned int irq, unsigned int state); -- cgit v1.2.3-70-g09d2 From b42a9f442c6f9f47a9d63f66fcc67ab8efe7b7fa Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Thu, 14 Aug 2008 15:19:25 +0800 Subject: Blackfin arch: fix missing digit in SCLK range checking Signed-off-by: Mike Frysinger Signed-off-by: Bryan Wu --- include/asm-blackfin/mach-bf533/mem_init.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-blackfin/mach-bf533/mem_init.h b/include/asm-blackfin/mach-bf533/mem_init.h index 995c06b2b1e..ed2034bf10e 100644 --- a/include/asm-blackfin/mach-bf533/mem_init.h +++ b/include/asm-blackfin/mach-bf533/mem_init.h @@ -47,7 +47,7 @@ #define SDRAM_tRCD TRCD_2 #define SDRAM_tWR TWR_2 #endif -#if (CONFIG_SCLK_HZ > 8955223) && (CONFIG_SCLK_HZ <= 104477612) +#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612) #define SDRAM_tRP TRP_2 #define SDRAM_tRP_num 2 #define SDRAM_tRAS TRAS_5 -- cgit v1.2.3-70-g09d2 From a4b7b6d7d3f4f71e741a878bcca6226d8d326a34 Mon Sep 17 00:00:00 2001 From: Bryan Wu Date: Thu, 14 Aug 2008 15:40:19 +0800 Subject: Blackfin arch: hook up some missing new system calls Signed-off-by: Bryan Wu --- arch/blackfin/mach-common/entry.S | 6 ++++++ include/asm-blackfin/unistd.h | 8 +++++++- 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 4bd971e81f1..117c01c2c6b 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -1422,6 +1422,12 @@ ENTRY(_sys_call_table) .long _sys_semtimedop .long _sys_timerfd_settime .long _sys_timerfd_gettime + .long _sys_signalfd4 /* 360 */ + .long _sys_eventfd2 + .long _sys_epoll_create1 + .long _sys_dup3 + .long _sys_pipe2 + .long _sys_inotify_init1 /* 365 */ .rept NR_syscalls-(.-_sys_call_table)/4 .long _sys_ni_syscall diff --git a/include/asm-blackfin/unistd.h b/include/asm-blackfin/unistd.h index 42955d0c439..1e57b636e0b 100644 --- a/include/asm-blackfin/unistd.h +++ b/include/asm-blackfin/unistd.h @@ -372,8 +372,14 @@ #define __NR_semtimedop 357 #define __NR_timerfd_settime 358 #define __NR_timerfd_gettime 359 +#define __NR_signalfd4 360 +#define __NR_eventfd2 361 +#define __NR_epoll_create1 362 +#define __NR_dup3 363 +#define __NR_pipe2 364 +#define __NR_inotify_init1 365 -#define __NR_syscall 360 +#define __NR_syscall 366 #define NR_syscalls __NR_syscall /* Old optional stuff no one actually uses */ -- cgit v1.2.3-70-g09d2 From 5cd9c58fbe9ec92b45b27e131719af4f2bd9eb40 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 14 Aug 2008 11:37:28 +0100 Subject: security: Fix setting of PF_SUPERPRIV by __capable() Fix the setting of PF_SUPERPRIV by __capable() as it could corrupt the flags the target process if that is not the current process and it is trying to change its own flags in a different way at the same time. __capable() is using neither atomic ops nor locking to protect t->flags. This patch removes __capable() and introduces has_capability() that doesn't set PF_SUPERPRIV on the process being queried. This patch further splits security_ptrace() in two: (1) security_ptrace_may_access(). This passes judgement on whether one process may access another only (PTRACE_MODE_ATTACH for ptrace() and PTRACE_MODE_READ for /proc), and takes a pointer to the child process. current is the parent. (2) security_ptrace_traceme(). This passes judgement on PTRACE_TRACEME only, and takes only a pointer to the parent process. current is the child. In Smack and commoncap, this uses has_capability() to determine whether the parent will be permitted to use PTRACE_ATTACH if normal checks fail. This does not set PF_SUPERPRIV. Two of the instances of __capable() actually only act on current, and so have been changed to calls to capable(). Of the places that were using __capable(): (1) The OOM killer calls __capable() thrice when weighing the killability of a process. All of these now use has_capability(). (2) cap_ptrace() and smack_ptrace() were using __capable() to check to see whether the parent was allowed to trace any process. As mentioned above, these have been split. For PTRACE_ATTACH and /proc, capable() is now used, and for PTRACE_TRACEME, has_capability() is used. (3) cap_safe_nice() only ever saw current, so now uses capable(). (4) smack_setprocattr() rejected accesses to tasks other than current just after calling __capable(), so the order of these two tests have been switched and capable() is used instead. (5) In smack_file_send_sigiotask(), we need to allow privileged processes to receive SIGIO on files they're manipulating. (6) In smack_task_wait(), we let a process wait for a privileged process, whether or not the process doing the waiting is privileged. I've tested this with the LTP SELinux and syscalls testscripts. Signed-off-by: David Howells Acked-by: Serge Hallyn Acked-by: Casey Schaufler Acked-by: Andrew G. Morgan Acked-by: Al Viro Signed-off-by: James Morris --- include/linux/capability.h | 15 ++++++++++++-- include/linux/security.h | 39 +++++++++++++++++++++++------------- kernel/capability.c | 21 ++++++++++++-------- kernel/ptrace.c | 5 ++--- mm/oom_kill.c | 6 ++++-- security/capability.c | 3 ++- security/commoncap.c | 24 ++++++++++++++++------- security/root_plug.c | 3 ++- security/security.c | 10 +++++++--- security/selinux/hooks.c | 25 ++++++++++++++++------- security/smack/smack_lsm.c | 49 ++++++++++++++++++++++++++++++++-------------- 11 files changed, 137 insertions(+), 63 deletions(-) (limited to 'include') diff --git a/include/linux/capability.h b/include/linux/capability.h index 02673846d20..9d1fe30b6f6 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -503,8 +503,19 @@ extern const kernel_cap_t __cap_init_eff_set; kernel_cap_t cap_set_effective(const kernel_cap_t pE_new); -int capable(int cap); -int __capable(struct task_struct *t, int cap); +/** + * has_capability - Determine if a task has a superior capability available + * @t: The task in question + * @cap: The capability to be tested for + * + * Return true if the specified task has the given superior capability + * currently in effect, false if not. + * + * Note that this does not set PF_SUPERPRIV on the task. + */ +#define has_capability(t, cap) (security_capable((t), (cap)) == 0) + +extern int capable(int cap); #endif /* __KERNEL__ */ diff --git a/include/linux/security.h b/include/linux/security.h index fd96e7f8a6f..2ee5ecfb239 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -46,8 +46,8 @@ struct audit_krule; */ extern int cap_capable(struct task_struct *tsk, int cap); extern int cap_settime(struct timespec *ts, struct timezone *tz); -extern int cap_ptrace(struct task_struct *parent, struct task_struct *child, - unsigned int mode); +extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); +extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -1157,17 +1157,24 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @alter contains the flag indicating whether changes are to be made. * Return 0 if permission is granted. * - * @ptrace: - * Check permission before allowing the @parent process to trace the + * @ptrace_may_access: + * Check permission before allowing the current process to trace the * @child process. * Security modules may also want to perform a process tracing check * during an execve in the set_security or apply_creds hooks of * binprm_security_ops if the process is being traced and its security * attributes would be changed by the execve. - * @parent contains the task_struct structure for parent process. - * @child contains the task_struct structure for child process. + * @child contains the task_struct structure for the target process. * @mode contains the PTRACE_MODE flags indicating the form of access. * Return 0 if permission is granted. + * @ptrace_traceme: + * Check that the @parent process has sufficient permission to trace the + * current process before allowing the current process to present itself + * to the @parent process for tracing. + * The parent process will still have to undergo the ptrace_may_access + * checks before it is allowed to trace this one. + * @parent contains the task_struct structure for debugger process. + * Return 0 if permission is granted. * @capget: * Get the @effective, @inheritable, and @permitted capability sets for * the @target process. The hook may also perform permission checking to @@ -1287,8 +1294,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) struct security_operations { char name[SECURITY_NAME_MAX + 1]; - int (*ptrace) (struct task_struct *parent, struct task_struct *child, - unsigned int mode); + int (*ptrace_may_access) (struct task_struct *child, unsigned int mode); + int (*ptrace_traceme) (struct task_struct *parent); int (*capget) (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); @@ -1560,8 +1567,8 @@ extern struct dentry *securityfs_create_dir(const char *name, struct dentry *par extern void securityfs_remove(struct dentry *dentry); /* Security operations */ -int security_ptrace(struct task_struct *parent, struct task_struct *child, - unsigned int mode); +int security_ptrace_may_access(struct task_struct *child, unsigned int mode); +int security_ptrace_traceme(struct task_struct *parent); int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, @@ -1742,11 +1749,15 @@ static inline int security_init(void) return 0; } -static inline int security_ptrace(struct task_struct *parent, - struct task_struct *child, - unsigned int mode) +static inline int security_ptrace_may_access(struct task_struct *child, + unsigned int mode) +{ + return cap_ptrace_may_access(child, mode); +} + +static inline int security_ptrace_traceme(struct task_struct *child) { - return cap_ptrace(parent, child, mode); + return cap_ptrace_traceme(parent); } static inline int security_capget(struct task_struct *target, diff --git a/kernel/capability.c b/kernel/capability.c index 0101e847603..33e51e78c2d 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -486,17 +486,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) return ret; } -int __capable(struct task_struct *t, int cap) +/** + * capable - Determine if the current task has a superior capability in effect + * @cap: The capability to be tested for + * + * Return true if the current task has the given superior capability currently + * available for use, false if not. + * + * This sets PF_SUPERPRIV on the task if the capability is available on the + * assumption that it's about to be used. + */ +int capable(int cap) { - if (security_capable(t, cap) == 0) { - t->flags |= PF_SUPERPRIV; + if (has_capability(current, cap)) { + current->flags |= PF_SUPERPRIV; return 1; } return 0; } - -int capable(int cap) -{ - return __capable(current, cap); -} EXPORT_SYMBOL(capable); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 082b3fcb32a..356699a96d5 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -140,7 +140,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) if (!dumpable && !capable(CAP_SYS_PTRACE)) return -EPERM; - return security_ptrace(current, task, mode); + return security_ptrace_may_access(task, mode); } bool ptrace_may_access(struct task_struct *task, unsigned int mode) @@ -499,8 +499,7 @@ repeat: goto repeat; } - ret = security_ptrace(current->parent, current, - PTRACE_MODE_ATTACH); + ret = security_ptrace_traceme(current->parent); /* * Set the ptrace bit in the process ptrace flags. diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 8a5467ee626..64e5b4bcd96 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -26,6 +26,7 @@ #include #include #include +#include int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; @@ -128,7 +129,8 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) * Superuser processes are usually more important, so we make it * less likely that we kill those. */ - if (__capable(p, CAP_SYS_ADMIN) || __capable(p, CAP_SYS_RESOURCE)) + if (has_capability(p, CAP_SYS_ADMIN) || + has_capability(p, CAP_SYS_RESOURCE)) points /= 4; /* @@ -137,7 +139,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) * tend to only have this flag set on applications they think * of as important. */ - if (__capable(p, CAP_SYS_RAWIO)) + if (has_capability(p, CAP_SYS_RAWIO)) points /= 4; /* diff --git a/security/capability.c b/security/capability.c index 63d10da515a..24587481903 100644 --- a/security/capability.c +++ b/security/capability.c @@ -811,7 +811,8 @@ struct security_operations default_security_ops = { void security_fixup_ops(struct security_operations *ops) { - set_to_cap_if_null(ops, ptrace); + set_to_cap_if_null(ops, ptrace_may_access); + set_to_cap_if_null(ops, ptrace_traceme); set_to_cap_if_null(ops, capget); set_to_cap_if_null(ops, capset_check); set_to_cap_if_null(ops, capset_set); diff --git a/security/commoncap.c b/security/commoncap.c index 4afbece37a0..e4c4b3fc0c0 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -63,14 +63,24 @@ int cap_settime(struct timespec *ts, struct timezone *tz) return 0; } -int cap_ptrace (struct task_struct *parent, struct task_struct *child, - unsigned int mode) +int cap_ptrace_may_access(struct task_struct *child, unsigned int mode) { /* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */ - if (!cap_issubset(child->cap_permitted, parent->cap_permitted) && - !__capable(parent, CAP_SYS_PTRACE)) - return -EPERM; - return 0; + if (cap_issubset(child->cap_permitted, current->cap_permitted)) + return 0; + if (capable(CAP_SYS_PTRACE)) + return 0; + return -EPERM; +} + +int cap_ptrace_traceme(struct task_struct *parent) +{ + /* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */ + if (cap_issubset(current->cap_permitted, parent->cap_permitted)) + return 0; + if (has_capability(parent, CAP_SYS_PTRACE)) + return 0; + return -EPERM; } int cap_capget (struct task_struct *target, kernel_cap_t *effective, @@ -534,7 +544,7 @@ int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, static inline int cap_safe_nice(struct task_struct *p) { if (!cap_issubset(p->cap_permitted, current->cap_permitted) && - !__capable(current, CAP_SYS_NICE)) + !capable(CAP_SYS_NICE)) return -EPERM; return 0; } diff --git a/security/root_plug.c b/security/root_plug.c index be0ebec2580..c3f68b5b372 100644 --- a/security/root_plug.c +++ b/security/root_plug.c @@ -72,7 +72,8 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm) static struct security_operations rootplug_security_ops = { /* Use the capability functions for some of the hooks */ - .ptrace = cap_ptrace, + .ptrace_may_access = cap_ptrace_may_access, + .ptrace_traceme = cap_ptrace_traceme, .capget = cap_capget, .capset_check = cap_capset_check, .capset_set = cap_capset_set, diff --git a/security/security.c b/security/security.c index ff706872775..3a4b4f55b33 100644 --- a/security/security.c +++ b/security/security.c @@ -127,10 +127,14 @@ int register_security(struct security_operations *ops) /* Security operations */ -int security_ptrace(struct task_struct *parent, struct task_struct *child, - unsigned int mode) +int security_ptrace_may_access(struct task_struct *child, unsigned int mode) { - return security_ops->ptrace(parent, child, mode); + return security_ops->ptrace_may_access(child, mode); +} + +int security_ptrace_traceme(struct task_struct *parent) +{ + return security_ops->ptrace_traceme(parent); } int security_capget(struct task_struct *target, diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 3ae9bec5a50..03fc6a81ae3 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1738,24 +1738,34 @@ static inline u32 file_to_av(struct file *file) /* Hook functions begin here. */ -static int selinux_ptrace(struct task_struct *parent, - struct task_struct *child, - unsigned int mode) +static int selinux_ptrace_may_access(struct task_struct *child, + unsigned int mode) { int rc; - rc = secondary_ops->ptrace(parent, child, mode); + rc = secondary_ops->ptrace_may_access(child, mode); if (rc) return rc; if (mode == PTRACE_MODE_READ) { - struct task_security_struct *tsec = parent->security; + struct task_security_struct *tsec = current->security; struct task_security_struct *csec = child->security; return avc_has_perm(tsec->sid, csec->sid, SECCLASS_FILE, FILE__READ, NULL); } - return task_has_perm(parent, child, PROCESS__PTRACE); + return task_has_perm(current, child, PROCESS__PTRACE); +} + +static int selinux_ptrace_traceme(struct task_struct *parent) +{ + int rc; + + rc = secondary_ops->ptrace_traceme(parent); + if (rc) + return rc; + + return task_has_perm(parent, current, PROCESS__PTRACE); } static int selinux_capget(struct task_struct *target, kernel_cap_t *effective, @@ -5346,7 +5356,8 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer) static struct security_operations selinux_ops = { .name = "selinux", - .ptrace = selinux_ptrace, + .ptrace_may_access = selinux_ptrace_may_access, + .ptrace_traceme = selinux_ptrace_traceme, .capget = selinux_capget, .capset_check = selinux_capset_check, .capset_set = selinux_capset_set, diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 1b40e558f98..87d75417ea9 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -87,27 +87,46 @@ struct inode_smack *new_inode_smack(char *smack) */ /** - * smack_ptrace - Smack approval on ptrace - * @ptp: parent task pointer + * smack_ptrace_may_access - Smack approval on PTRACE_ATTACH * @ctp: child task pointer * * Returns 0 if access is OK, an error code otherwise * * Do the capability checks, and require read and write. */ -static int smack_ptrace(struct task_struct *ptp, struct task_struct *ctp, - unsigned int mode) +static int smack_ptrace_may_access(struct task_struct *ctp, unsigned int mode) { int rc; - rc = cap_ptrace(ptp, ctp, mode); + rc = cap_ptrace_may_access(ctp, mode); if (rc != 0) return rc; - rc = smk_access(ptp->security, ctp->security, MAY_READWRITE); - if (rc != 0 && __capable(ptp, CAP_MAC_OVERRIDE)) + rc = smk_access(current->security, ctp->security, MAY_READWRITE); + if (rc != 0 && capable(CAP_MAC_OVERRIDE)) return 0; + return rc; +} + +/** + * smack_ptrace_traceme - Smack approval on PTRACE_TRACEME + * @ptp: parent task pointer + * + * Returns 0 if access is OK, an error code otherwise + * + * Do the capability checks, and require read and write. + */ +static int smack_ptrace_traceme(struct task_struct *ptp) +{ + int rc; + + rc = cap_ptrace_traceme(ptp); + if (rc != 0) + return rc; + rc = smk_access(ptp->security, current->security, MAY_READWRITE); + if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE)) + return 0; return rc; } @@ -923,7 +942,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk, */ file = container_of(fown, struct file, f_owner); rc = smk_access(file->f_security, tsk->security, MAY_WRITE); - if (rc != 0 && __capable(tsk, CAP_MAC_OVERRIDE)) + if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE)) return 0; return rc; } @@ -1164,12 +1183,12 @@ static int smack_task_wait(struct task_struct *p) * account for the smack labels having gotten to * be different in the first place. * - * This breaks the strict subjet/object access + * This breaks the strict subject/object access * control ideal, taking the object's privilege * state into account in the decision as well as * the smack value. */ - if (capable(CAP_MAC_OVERRIDE) || __capable(p, CAP_MAC_OVERRIDE)) + if (capable(CAP_MAC_OVERRIDE) || has_capability(p, CAP_MAC_OVERRIDE)) return 0; return rc; @@ -2016,9 +2035,6 @@ static int smack_setprocattr(struct task_struct *p, char *name, { char *newsmack; - if (!__capable(p, CAP_MAC_ADMIN)) - return -EPERM; - /* * Changing another process' Smack value is too dangerous * and supports no sane use case. @@ -2026,6 +2042,9 @@ static int smack_setprocattr(struct task_struct *p, char *name, if (p != current) return -EPERM; + if (!capable(CAP_MAC_ADMIN)) + return -EPERM; + if (value == NULL || size == 0 || size >= SMK_LABELLEN) return -EINVAL; @@ -2552,7 +2571,8 @@ static void smack_release_secctx(char *secdata, u32 seclen) struct security_operations smack_ops = { .name = "smack", - .ptrace = smack_ptrace, + .ptrace_may_access = smack_ptrace_may_access, + .ptrace_traceme = smack_ptrace_traceme, .capget = cap_capget, .capset_check = cap_capset_check, .capset_set = cap_capset_set, @@ -2729,4 +2749,3 @@ static __init int smack_init(void) * all processes and objects when they are created. */ security_initcall(smack_init); - -- cgit v1.2.3-70-g09d2 From 3c7db22a194d3b53584047425af82b4e1e03d9f7 Mon Sep 17 00:00:00 2001 From: Bob Moore Date: Mon, 4 Aug 2008 11:13:01 +0800 Subject: ACPICA: Additional error checking for pathname utilities Add error check after all calls to acpi_ns_get_pathname_length. Add status return from acpi_ns_build_external_path and check after all calls. Add parameter validation to acpi_ut_initialize_buffer. Reported by and initial patch by Ingo Molnar. http://lkml.org/lkml/2008/7/21/176 Signed-off-by: Bob Moore Signed-off-by: Lin Ming Signed-off-by: Andi Kleen --- drivers/acpi/namespace/nsnames.c | 34 ++++++++++++++++++++++------------ drivers/acpi/resources/rscalc.c | 3 +++ drivers/acpi/utilities/utalloc.c | 8 +++++--- drivers/acpi/utilities/utobject.c | 13 +++++++++---- include/acpi/acnamesp.h | 2 +- 5 files changed, 40 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c index 549db42f16c..bd577387800 100644 --- a/drivers/acpi/namespace/nsnames.c +++ b/drivers/acpi/namespace/nsnames.c @@ -56,13 +56,14 @@ ACPI_MODULE_NAME("nsnames") * Size - Size of the pathname * *name_buffer - Where to return the pathname * - * RETURN: Places the pathname into the name_buffer, in external format + * RETURN: Status + * Places the pathname into the name_buffer, in external format * (name segments separated by path separators) * * DESCRIPTION: Generate a full pathaname * ******************************************************************************/ -void +acpi_status acpi_ns_build_external_path(struct acpi_namespace_node *node, acpi_size size, char *name_buffer) { @@ -77,7 +78,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node, if (index < ACPI_NAME_SIZE) { name_buffer[0] = AML_ROOT_PREFIX; name_buffer[1] = 0; - return; + return (AE_OK); } /* Store terminator byte, then build name backwards */ @@ -105,11 +106,13 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node, if (index != 0) { ACPI_ERROR((AE_INFO, - "Could not construct pathname; index=%X, size=%X, Path=%s", + "Could not construct external pathname; index=%X, size=%X, Path=%s", (u32) index, (u32) size, &name_buffer[size])); + + return (AE_BAD_PARAMETER); } - return; + return (AE_OK); } #ifdef ACPI_DEBUG_OUTPUT @@ -129,6 +132,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node, char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) { + acpi_status status; char *name_buffer; acpi_size size; @@ -138,8 +142,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) size = acpi_ns_get_pathname_length(node); if (!size) { - ACPI_ERROR((AE_INFO, "Invalid node failure")); - return_PTR(NULL); + return (NULL); } /* Allocate a buffer to be returned to caller */ @@ -152,7 +155,11 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) /* Build the path in the allocated buffer */ - acpi_ns_build_external_path(node, size, name_buffer); + status = acpi_ns_build_external_path(node, size, name_buffer); + if (ACPI_FAILURE(status)) { + return (NULL); + } + return_PTR(name_buffer); } #endif @@ -186,7 +193,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node) while (next_node && (next_node != acpi_gbl_root_node)) { if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) { ACPI_ERROR((AE_INFO, - "Invalid NS Node (%p) while traversing path", + "Invalid Namespace Node (%p) while traversing namespace", next_node)); return 0; } @@ -234,8 +241,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle, required_size = acpi_ns_get_pathname_length(node); if (!required_size) { - ACPI_ERROR((AE_INFO, "Invalid node failure")); - return_ACPI_STATUS(AE_ERROR); + return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Validate/Allocate/Clear caller buffer */ @@ -247,7 +253,11 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle, /* Build the path in the caller buffer */ - acpi_ns_build_external_path(node, required_size, buffer->pointer); + status = + acpi_ns_build_external_path(node, required_size, buffer->pointer); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n", (char *)buffer->pointer, (u32) required_size)); diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c index f61ebc679e6..d9063ea414e 100644 --- a/drivers/acpi/resources/rscalc.c +++ b/drivers/acpi/resources/rscalc.c @@ -587,6 +587,9 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, } else { temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node); + if (!temp_size_needed) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } } } else { /* diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c index e7bf34a7b1d..7dcb67e0b21 100644 --- a/drivers/acpi/utilities/utalloc.c +++ b/drivers/acpi/utilities/utalloc.c @@ -242,10 +242,12 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer, { acpi_status status = AE_OK; - if (!required_length) { - WARN_ON(1); - return AE_ERROR; + /* Parameter validation */ + + if (!buffer || !required_length) { + return (AE_BAD_PARAMETER); } + switch (buffer->length) { case ACPI_NO_BUFFER: diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c index e25484495e6..916eff399eb 100644 --- a/drivers/acpi/utilities/utobject.c +++ b/drivers/acpi/utilities/utobject.c @@ -425,6 +425,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, acpi_size * obj_length) { acpi_size length; + acpi_size size; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object); @@ -484,10 +485,14 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, * Get the actual length of the full pathname to this object. * The reference will be converted to the pathname to the object */ - length += - ACPI_ROUND_UP_TO_NATIVE_WORD - (acpi_ns_get_pathname_length - (internal_object->reference.node)); + size = + acpi_ns_get_pathname_length(internal_object-> + reference.node); + if (!size) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + length += ACPI_ROUND_UP_TO_NATIVE_WORD(size); break; default: diff --git a/include/acpi/acnamesp.h b/include/acpi/acnamesp.h index 9ed70a05058..c34008507b6 100644 --- a/include/acpi/acnamesp.h +++ b/include/acpi/acnamesp.h @@ -182,7 +182,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info); */ u32 acpi_ns_opens_scope(acpi_object_type type); -void +acpi_status acpi_ns_build_external_path(struct acpi_namespace_node *node, acpi_size size, char *name_buffer); -- cgit v1.2.3-70-g09d2 From 519c31bacf78a969efa8d2e55ed8862848f28590 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 14 Aug 2008 19:55:15 +0200 Subject: x86, AMD IOMMU: use status bit instead of memory write-back for completion wait Signed-off-by: Joerg Roedel Signed-off-by: Ingo Molnar --- arch/x86/kernel/amd_iommu.c | 17 ++++++++++------- include/asm-x86/amd_iommu_types.h | 4 ++++ 2 files changed, 14 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 22d7d050905..028e945c68a 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -101,16 +101,13 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) */ static int iommu_completion_wait(struct amd_iommu *iommu) { - int ret; + int ret, ready = 0; + unsigned status = 0; struct iommu_cmd cmd; - volatile u64 ready = 0; - unsigned long ready_phys = virt_to_phys(&ready); unsigned long i = 0; memset(&cmd, 0, sizeof(cmd)); - cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK; - cmd.data[1] = upper_32_bits(ready_phys); - cmd.data[2] = 1; /* value written to 'ready' */ + cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); iommu->need_sync = 0; @@ -122,9 +119,15 @@ static int iommu_completion_wait(struct amd_iommu *iommu) while (!ready && (i < EXIT_LOOP_COUNT)) { ++i; - cpu_relax(); + /* wait for the bit to become one */ + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; } + /* set bit back to zero */ + status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; + writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); + if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h index 22aa58ca199..32543229db7 100644 --- a/include/asm-x86/amd_iommu_types.h +++ b/include/asm-x86/amd_iommu_types.h @@ -69,6 +69,9 @@ #define MMIO_EVT_TAIL_OFFSET 0x2018 #define MMIO_STATUS_OFFSET 0x2020 +/* MMIO status bits */ +#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 + /* feature control bits */ #define CONTROL_IOMMU_EN 0x00ULL #define CONTROL_HT_TUN_EN 0x01ULL @@ -89,6 +92,7 @@ #define CMD_INV_IOMMU_PAGES 0x03 #define CMD_COMPL_WAIT_STORE_MASK 0x01 +#define CMD_COMPL_WAIT_INT_MASK 0x02 #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 -- cgit v1.2.3-70-g09d2 From 9f5f5fb35d2934fe7dc0cb019854a030efd10cd7 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 14 Aug 2008 19:55:16 +0200 Subject: x86, AMD IOMMU: initialize device table properly This patch adds device table initializations which forbids memory accesses for devices per default and disables all page faults. Signed-off-by: Joerg Roedel Signed-off-by: Ingo Molnar --- arch/x86/kernel/amd_iommu_init.c | 18 ++++++++++++++++++ include/asm-x86/amd_iommu_types.h | 1 + 2 files changed, 19 insertions(+) (limited to 'include') diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index d9a9da597e7..ceba3381153 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -800,6 +800,21 @@ static int __init init_memory_definitions(struct acpi_table_header *table) return 0; } +/* + * Init the device table to not allow DMA access for devices and + * suppress all page faults + */ +static void init_device_table(void) +{ + u16 devid; + + for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { + set_dev_entry_bit(devid, DEV_ENTRY_VALID); + set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); + set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT); + } +} + /* * This function finally enables all IOMMUs found in the system after * they have been initialized @@ -931,6 +946,9 @@ int __init amd_iommu_init(void) if (amd_iommu_pd_alloc_bitmap == NULL) goto free; + /* init the device table */ + init_device_table(); + /* * let all alias entries point to itself */ diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h index 32543229db7..f0beca73e36 100644 --- a/include/asm-x86/amd_iommu_types.h +++ b/include/asm-x86/amd_iommu_types.h @@ -103,6 +103,7 @@ #define DEV_ENTRY_TRANSLATION 0x01 #define DEV_ENTRY_IR 0x3d #define DEV_ENTRY_IW 0x3e +#define DEV_ENTRY_NO_PAGE_FAULT 0x62 #define DEV_ENTRY_EX 0x67 #define DEV_ENTRY_SYSMGT1 0x68 #define DEV_ENTRY_SYSMGT2 0x69 -- cgit v1.2.3-70-g09d2 From 8a456695c5020d6317f9c7af190999e9414b0d3e Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 14 Aug 2008 19:55:17 +0200 Subject: x86m AMD IOMMU: cleanup: replace LOW_U32 macro with generic lower_32_bits Signed-off-by: Joerg Roedel Signed-off-by: Ingo Molnar --- arch/x86/kernel/amd_iommu.c | 2 +- include/asm-x86/amd_iommu_types.h | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 028e945c68a..de39e1f2ede 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -164,7 +164,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, address &= PAGE_MASK; CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES); cmd.data[1] |= domid; - cmd.data[2] = LOW_U32(address); + cmd.data[2] = lower_32_bits(address); cmd.data[3] = upper_32_bits(address); if (s) /* size bit - we flush more than one 4kb page */ cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h index f0beca73e36..dcc81206739 100644 --- a/include/asm-x86/amd_iommu_types.h +++ b/include/asm-x86/amd_iommu_types.h @@ -31,9 +31,6 @@ #define ALIAS_TABLE_ENTRY_SIZE 2 #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) -/* helper macros */ -#define LOW_U32(x) ((x) & ((1ULL << 32)-1)) - /* Length of the MMIO region for the AMD IOMMU */ #define MMIO_REGION_LENGTH 0x4000 -- cgit v1.2.3-70-g09d2 From 394a15051c33f2b18e72f42283b36a9388fa414b Mon Sep 17 00:00:00 2001 From: Mark Langsdorf Date: Thu, 14 Aug 2008 09:11:26 -0500 Subject: x86: invalidate caches before going into suspend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a CPU core is shut down, all of its caches need to be flushed to prevent stale data from causing errors if the core is resumed. Current Linux suspend code performs an assignment after the flush, which can add dirty data back to the cache.  On some AMD platforms, additional speculative reads have caused crashes on resume because of this dirty data. Relocate the cache flush to be the very last thing done before halting.  Tie into an assembly line so the compile will not reorder it.  Add some documentation explaining what is going on and why we're doing this. Signed-off-by: Mark Langsdorf Acked-by: Mark Borden Acked-by: Michael Hohmuth Signed-off-by: Ingo Molnar --- arch/x86/kernel/process_32.c | 5 ++--- arch/x86/kernel/process_64.c | 5 ++--- include/asm-x86/processor.h | 23 +++++++++++++++++++++++ 3 files changed, 27 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 53bc653ed5c..3b7a1ddcc0b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -95,7 +95,6 @@ static inline void play_dead(void) { /* This must be done before dead CPU ack */ cpu_exit_clear(); - wbinvd(); mb(); /* Ack it */ __get_cpu_var(cpu_state) = CPU_DEAD; @@ -104,8 +103,8 @@ static inline void play_dead(void) * With physical CPU hotplug, we should halt the cpu */ local_irq_disable(); - while (1) - halt(); + /* mask all interrupts, flush any and all caches, and halt */ + wbinvd_halt(); } #else static inline void play_dead(void) diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3fb62a7d9a1..71553b664e2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -93,14 +93,13 @@ DECLARE_PER_CPU(int, cpu_state); static inline void play_dead(void) { idle_task_exit(); - wbinvd(); mb(); /* Ack it */ __get_cpu_var(cpu_state) = CPU_DEAD; local_irq_disable(); - while (1) - halt(); + /* mask all interrupts, flush any and all caches, and halt */ + wbinvd_halt(); } #else static inline void play_dead(void) diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 5f58da401b4..4df3e2f6fb5 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -728,6 +728,29 @@ extern unsigned long boot_option_idle_override; extern unsigned long idle_halt; extern unsigned long idle_nomwait; +/* + * on systems with caches, caches must be flashed as the absolute + * last instruction before going into a suspended halt. Otherwise, + * dirty data can linger in the cache and become stale on resume, + * leading to strange errors. + * + * perform a variety of operations to guarantee that the compiler + * will not reorder instructions. wbinvd itself is serializing + * so the processor will not reorder. + * + * Systems without cache can just go into halt. + */ +static inline void wbinvd_halt(void) +{ + mb(); + /* check for clflush to determine if wbinvd is legal */ + if (cpu_has_clflush) + asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); + else + while (1) + halt(); +} + extern void enable_sep_cpu(void); extern int sysenter_setup(void); -- cgit v1.2.3-70-g09d2 From 1c5b0eb66d74683e2be5da0c53e33c1f4ca982fd Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Wed, 13 Aug 2008 21:07:07 +0200 Subject: x86: fix readb() et al compile error with gcc-3.2.3 Building 2.6.27-rc1 on x86 with gcc-3.2.3 fails with: In file included from include/asm/dma.h:12, from include/linux/bootmem.h:8, from init/main.c:26: include/asm/io.h: In function `readb': include/asm/io.h:32: syntax error before string constant include/asm/io.h: In function `readw': include/asm/io.h:33: syntax error before string constant include/asm/io.h: In function `readl': include/asm/io.h:34: syntax error before string constant include/asm/io.h: In function `__readb': include/asm/io.h:36: syntax error before string constant include/asm/io.h: In function `__readw': include/asm/io.h:37: syntax error before string constant include/asm/io.h: In function `__readl': include/asm/io.h:38: syntax error before string constant make[1]: *** [init/main.o] Error 1 make: *** [init] Error 2 Starting with 2.6.27-rc1 readb() et al are generated by a build_mmio_read() macro, which generates asm() statements with output register constraints like "=" "q", i.e. as two adjacent string literals. This doesn't work with gcc-3.2.3. Fixed by moving the "=" part into the callers' reg parameter (as suggested by Ingo). Build and boot-tested with gcc-3.2.3 on 32 and 64-bit x86. Fixes . Signed-off-by: Mikael Pettersson Signed-off-by: Ingo Molnar --- include/asm-x86/io.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index bf5d629b3a3..0f954dc89cb 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h @@ -21,7 +21,7 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); #define build_mmio_read(name, size, type, reg, barrier) \ static inline type name(const volatile void __iomem *addr) \ -{ type ret; asm volatile("mov" size " %1,%0":"=" reg (ret) \ +{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \ :"m" (*(volatile type __force *)addr) barrier); return ret; } #define build_mmio_write(name, size, type, reg, barrier) \ @@ -29,13 +29,13 @@ static inline void name(type val, volatile void __iomem *addr) \ { asm volatile("mov" size " %0,%1": :reg (val), \ "m" (*(volatile type __force *)addr) barrier); } -build_mmio_read(readb, "b", unsigned char, "q", :"memory") -build_mmio_read(readw, "w", unsigned short, "r", :"memory") -build_mmio_read(readl, "l", unsigned int, "r", :"memory") +build_mmio_read(readb, "b", unsigned char, "=q", :"memory") +build_mmio_read(readw, "w", unsigned short, "=r", :"memory") +build_mmio_read(readl, "l", unsigned int, "=r", :"memory") -build_mmio_read(__readb, "b", unsigned char, "q", ) -build_mmio_read(__readw, "w", unsigned short, "r", ) -build_mmio_read(__readl, "l", unsigned int, "r", ) +build_mmio_read(__readb, "b", unsigned char, "=q", ) +build_mmio_read(__readw, "w", unsigned short, "=r", ) +build_mmio_read(__readl, "l", unsigned int, "=r", ) build_mmio_write(writeb, "b", unsigned char, "q", :"memory") build_mmio_write(writew, "w", unsigned short, "r", :"memory") @@ -59,8 +59,8 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) #define mmiowb() barrier() #ifdef CONFIG_X86_64 -build_mmio_read(readq, "q", unsigned long, "r", :"memory") -build_mmio_read(__readq, "q", unsigned long, "r", ) +build_mmio_read(readq, "q", unsigned long, "=r", :"memory") +build_mmio_read(__readq, "q", unsigned long, "=r", ) build_mmio_write(writeq, "q", unsigned long, "r", :"memory") build_mmio_write(__writeq, "q", unsigned long, "r", ) -- cgit v1.2.3-70-g09d2 From 7bc069c6bc4ede519a7116be1b9e149a1dbf787a Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 4 Aug 2008 14:38:54 +0100 Subject: x86: fix spin_is_contended() The masked difference is what needs to be compared against 1, rather than the difference of masked values (which can be negative). Signed-off-by: Jan Beulich Acked-by: Nick Piggin Cc: Signed-off-by: Ingo Molnar --- include/asm-x86/spinlock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 4f9a9861799..e39c790dbfd 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h @@ -65,7 +65,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); - return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; + return (((tmp >> 8) - tmp) & 0xff) > 1; } static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) @@ -127,7 +127,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); - return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; + return (((tmp >> 16) - tmp) & 0xffff) > 1; } static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) -- cgit v1.2.3-70-g09d2 From 0d5cdc97e242a5589e5dca23277675f4b4482490 Mon Sep 17 00:00:00 2001 From: Jens Rottmann Date: Mon, 4 Aug 2008 14:40:16 +0200 Subject: x86, geode-mfgpt: check IRQ before using MFGPT as clocksource Adds a simple IRQ autodetection to the AMD Geode MFGPT driver, and more importantly, adds some checks, if IRQs can actually be received on the chosen line. This fixes cases where MFGPT is selected as clocksource though not producing any ticks, so the kernel simply starves during boot. Signed-off-by: Jens Rottmann Cc: Andres Salomon Cc: linux-geode@bombadil.infradead.org Cc: Jordan Crouse Signed-off-by: Ingo Molnar --- arch/x86/kernel/mfgpt_32.c | 52 +++++++++++++++++++++++++++++++++------------- include/asm-x86/geode.h | 3 ++- 2 files changed, 39 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 07c0f828f48..3b599518c32 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c @@ -33,6 +33,8 @@ #include #include +#define MFGPT_DEFAULT_IRQ 7 + static struct mfgpt_timer_t { unsigned int avail:1; } mfgpt_timers[MFGPT_MAX_TIMERS]; @@ -157,29 +159,48 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable) } EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event); -int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable) +int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable) { - u32 val, dummy; - int offset; + u32 zsel, lpc, dummy; + int shift; if (timer < 0 || timer >= MFGPT_MAX_TIMERS) return -EIO; - if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) + /* + * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA + * is using the same CMP of the timer's Siamese twin, the IRQ is set to + * 2, and we mustn't use nor change it. + * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the + * IRQ of the 1st. This can only happen if forcing an IRQ, calling this + * with *irq==0 is safe. Currently there _are_ no 2 drivers. + */ + rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); + shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4; + if (((zsel >> shift) & 0xF) == 2) return -EIO; - rdmsr(MSR_PIC_ZSEL_LOW, val, dummy); + /* Choose IRQ: if none supplied, keep IRQ already set or use default */ + if (!*irq) + *irq = (zsel >> shift) & 0xF; + if (!*irq) + *irq = MFGPT_DEFAULT_IRQ; - offset = (timer % 4) * 4; - - val &= ~((0xF << offset) | (0xF << (offset + 16))); + /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */ + if (*irq < 1 || *irq == 2 || *irq > 15) + return -EIO; + rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy); + if (lpc & (1 << *irq)) + return -EIO; + /* All chosen and checked - go for it */ + if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable)) + return -EIO; if (enable) { - val |= (irq & 0x0F) << (offset); - val |= (irq & 0x0F) << (offset + 16); + zsel = (zsel & ~(0xF << shift)) | (*irq << shift); + wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy); } - wrmsr(MSR_PIC_ZSEL_LOW, val, dummy); return 0; } @@ -242,7 +263,7 @@ EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer); static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN; static u16 mfgpt_event_clock; -static int irq = 7; +static int irq; static int __init mfgpt_setup(char *str) { get_option(&str, &irq); @@ -346,7 +367,7 @@ int __init mfgpt_timer_setup(void) mfgpt_event_clock = timer; /* Set up the IRQ on the MFGPT side */ - if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, irq)) { + if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) { printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq); return -EIO; } @@ -374,13 +395,14 @@ int __init mfgpt_timer_setup(void) &mfgpt_clockevent); printk(KERN_INFO - "mfgpt-timer: registering the MFGPT timer as a clock event.\n"); + "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n", + timer, irq); clockevents_register_device(&mfgpt_clockevent); return 0; err: - geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, irq); + geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq); printk(KERN_ERR "mfgpt-timer: Unable to set up the MFGPT clock source\n"); return -EIO; diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index bb06027fc83..2c1cda0b8a8 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h @@ -50,6 +50,7 @@ extern int geode_get_dev_base(unsigned int dev); #define MSR_PIC_YSEL_HIGH 0x51400021 #define MSR_PIC_ZSEL_LOW 0x51400022 #define MSR_PIC_ZSEL_HIGH 0x51400023 +#define MSR_PIC_IRQM_LPC 0x51400025 #define MSR_MFGPT_IRQ 0x51400028 #define MSR_MFGPT_NR 0x51400029 @@ -237,7 +238,7 @@ static inline u16 geode_mfgpt_read(int timer, u16 reg) } extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable); -extern int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable); +extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable); extern int geode_mfgpt_alloc_timer(int timer, int domain); #define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1) -- cgit v1.2.3-70-g09d2 From 66d4bdf22b8652cda215e2653c8bbec7a767ed57 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 31 Jul 2008 16:48:31 +0100 Subject: x86-64: fix overlap of modules and fixmap areas Plus add a build time check so this doesn't go unnoticed again. Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/kernel/head64.c | 1 + include/asm-x86/pgtable_64.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 1b318e903bf..9bfc4d72fb2 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -88,6 +88,7 @@ void __init x86_64_start_kernel(char * real_mode_data) BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); + BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); /* clear bss before set_intr_gate with early_idt_handler */ clear_bss(); diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index ac5fff4cc58..549144d03d9 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -151,7 +151,7 @@ static inline void native_pgd_clear(pgd_t *pgd) #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) #define VMEMMAP_START _AC(0xffffe20000000000, UL) #define MODULES_VADDR _AC(0xffffffffa0000000, UL) -#define MODULES_END _AC(0xfffffffffff00000, UL) +#define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) #ifndef __ASSEMBLY__ -- cgit v1.2.3-70-g09d2 From ce289e89726948b50a58c9e8f4e81174a8c9c254 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Fri, 15 Aug 2008 00:40:19 -0700 Subject: suspend: fix section mismatch warning - register_nosave_region WARNING: vmlinux.o(.text+0xe684): Section mismatch in reference from the function register_nosave_region() to the function .init.text:__register_nosave_region() The function register_nosave_region() references the function __init __register_nosave_region(). This is often because register_nosave_region lacks a __init annotation or the annotation of __register_nosave_region is wrong. register_nosave_region calls __init function and is called only from __init functions Signed-off-by: Marcin Slusarz Acked-by: Rafael J. Wysocki Cc: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/suspend.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index c6343509597..2ce8207686e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -217,11 +217,11 @@ struct platform_hibernation_ops { #ifdef CONFIG_HIBERNATION /* kernel/power/snapshot.c */ extern void __register_nosave_region(unsigned long b, unsigned long e, int km); -static inline void register_nosave_region(unsigned long b, unsigned long e) +static inline void __init register_nosave_region(unsigned long b, unsigned long e) { __register_nosave_region(b, e, 0); } -static inline void register_nosave_region_late(unsigned long b, unsigned long e) +static inline void __init register_nosave_region_late(unsigned long b, unsigned long e) { __register_nosave_region(b, e, 1); } -- cgit v1.2.3-70-g09d2 From 163f6876f5c3ff8215e900b93779e960a56b3694 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 15 Aug 2008 00:40:22 -0700 Subject: kexec jump: rename KEXEC_CONTROL_CODE_SIZE to KEXEC_CONTROL_PAGE_SIZE Rename KEXEC_CONTROL_CODE_SIZE to KEXEC_CONTROL_PAGE_SIZE, because control page is used for not only code on some platform. For example in kexec jump, it is used for data and stack too. [akpm@linux-foundation.org: unbreak powerpc and arm, finish conversion] Signed-off-by: Huang Ying Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: "Eric W. Biederman" Cc: Vivek Goyal Cc: Ingo Molnar Cc: Russell King Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/kexec.h | 2 +- arch/arm/kernel/machine_kexec.c | 2 +- arch/ia64/include/asm/kexec.h | 2 +- arch/powerpc/include/asm/kexec.h | 2 +- arch/powerpc/kernel/machine_kexec_32.c | 2 +- arch/s390/include/asm/kexec.h | 2 +- arch/sh/include/asm/kexec.h | 2 +- arch/x86/kernel/machine_kexec_32.c | 2 +- include/asm-mips/kexec.h | 2 +- include/asm-x86/kexec.h | 4 ++-- include/linux/kexec.h | 4 ++-- kernel/kexec.c | 6 +++--- 12 files changed, 16 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index c8986bb99ed..df15a0dc228 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -10,7 +10,7 @@ /* Maximum address we can use for the control code buffer */ #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) -#define KEXEC_CONTROL_CODE_SIZE 4096 +#define KEXEC_CONTROL_PAGE_SIZE 4096 #define KEXEC_ARCH KEXEC_ARCH_ARM diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index db8f54a3451..fae5beb3c3d 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -71,7 +71,7 @@ void machine_kexec(struct kimage *image) flush_icache_range((unsigned long) reboot_code_buffer, - (unsigned long) reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); + (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); cpu_proc_fin(); diff --git a/arch/ia64/include/asm/kexec.h b/arch/ia64/include/asm/kexec.h index 541be835fc5..e1d58f819d7 100644 --- a/arch/ia64/include/asm/kexec.h +++ b/arch/ia64/include/asm/kexec.h @@ -9,7 +9,7 @@ /* Maximum address we can use for the control code buffer */ #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE -#define KEXEC_CONTROL_CODE_SIZE (8192 + 8192 + 4096) +#define KEXEC_CONTROL_PAGE_SIZE (8192 + 8192 + 4096) /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_IA_64 diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index acdcdc66f1b..3736d9b3328 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -22,7 +22,7 @@ #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE #endif -#define KEXEC_CONTROL_CODE_SIZE 4096 +#define KEXEC_CONTROL_PAGE_SIZE 4096 /* The native architecture */ #ifdef __powerpc64__ diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c index cbaa3419679..ae63a964b85 100644 --- a/arch/powerpc/kernel/machine_kexec_32.c +++ b/arch/powerpc/kernel/machine_kexec_32.c @@ -51,7 +51,7 @@ void default_machine_kexec(struct kimage *image) relocate_new_kernel_size); flush_icache_range(reboot_code_buffer, - reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); + reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); /* now call it */ diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index f219c6411e0..bb729b84a21 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -31,7 +31,7 @@ #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) /* Allocate one page for the pdp and the second for the code */ -#define KEXEC_CONTROL_CODE_SIZE 4096 +#define KEXEC_CONTROL_PAGE_SIZE 4096 /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_S390 diff --git a/arch/sh/include/asm/kexec.h b/arch/sh/include/asm/kexec.h index 00f4260ef09..765a5e1660f 100644 --- a/arch/sh/include/asm/kexec.h +++ b/arch/sh/include/asm/kexec.h @@ -21,7 +21,7 @@ /* Maximum address we can use for the control code buffer */ #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE -#define KEXEC_CONTROL_CODE_SIZE 4096 +#define KEXEC_CONTROL_PAGE_SIZE 4096 /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_SH diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 9fe478d9840..466450167de 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -78,7 +78,7 @@ static void load_segments(void) /* * A architecture hook called to validate the * proposed image and prepare the control pages - * as needed. The pages for KEXEC_CONTROL_CODE_SIZE + * as needed. The pages for KEXEC_CONTROL_PAGE_SIZE * have been allocated, but the segments have yet * been copied into the kernel. * diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h index cdbab43b7d3..4314892aaeb 100644 --- a/include/asm-mips/kexec.h +++ b/include/asm-mips/kexec.h @@ -16,7 +16,7 @@ /* Maximum address we can use for the control code buffer */ #define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000) -#define KEXEC_CONTROL_CODE_SIZE 4096 +#define KEXEC_CONTROL_PAGE_SIZE 4096 /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_MIPS diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index c0e52a14fd4..f6fb3d21883 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h @@ -63,7 +63,7 @@ /* Maximum address we can use for the control code buffer */ # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE -# define KEXEC_CONTROL_CODE_SIZE 4096 +# define KEXEC_CONTROL_PAGE_SIZE 4096 /* The native architecture */ # define KEXEC_ARCH KEXEC_ARCH_386 @@ -79,7 +79,7 @@ # define KEXEC_CONTROL_MEMORY_LIMIT (0xFFFFFFFFFFUL) /* Allocate one page for the pdp and the second for the code */ -# define KEXEC_CONTROL_CODE_SIZE (4096UL + 4096UL) +# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL) /* The native architecture */ # define KEXEC_ARCH KEXEC_ARCH_X86_64 diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 32110cede64..17f76fc0517 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -25,8 +25,8 @@ #error KEXEC_CONTROL_MEMORY_LIMIT not defined #endif -#ifndef KEXEC_CONTROL_CODE_SIZE -#error KEXEC_CONTROL_CODE_SIZE not defined +#ifndef KEXEC_CONTROL_PAGE_SIZE +#error KEXEC_CONTROL_PAGE_SIZE not defined #endif #ifndef KEXEC_ARCH diff --git a/kernel/kexec.c b/kernel/kexec.c index bfbbd120623..2810558802b 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -77,7 +77,7 @@ int kexec_should_crash(struct task_struct *p) * * The code for the transition from the current kernel to the * the new kernel is placed in the control_code_buffer, whose size - * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single + * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single * page of memory is necessary, but some architectures require more. * Because this memory must be identity mapped in the transition from * virtual to physical addresses it must live in the range @@ -242,7 +242,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, */ result = -ENOMEM; image->control_code_page = kimage_alloc_control_pages(image, - get_order(KEXEC_CONTROL_CODE_SIZE)); + get_order(KEXEC_CONTROL_PAGE_SIZE)); if (!image->control_code_page) { printk(KERN_ERR "Could not allocate control_code_buffer\n"); goto out; @@ -317,7 +317,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, */ result = -ENOMEM; image->control_code_page = kimage_alloc_control_pages(image, - get_order(KEXEC_CONTROL_CODE_SIZE)); + get_order(KEXEC_CONTROL_PAGE_SIZE)); if (!image->control_code_page) { printk(KERN_ERR "Could not allocate control_code_buffer\n"); goto out; -- cgit v1.2.3-70-g09d2 From fb45daa69d287b394eca1619b3fadff7c0215c71 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 15 Aug 2008 00:40:23 -0700 Subject: kexec jump: check code size in control page Kexec/Kexec-jump require code size in control page is less than PAGE_SIZE/2. This patch add link-time checking for this. ASSERT() of ld link script is used as the link-time checking mechanism. [akpm@linux-foundation.org: build fix] Signed-off-by: Huang Ying Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: "Eric W. Biederman" Acked-by: Vivek Goyal Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/machine_kexec_32.c | 2 +- arch/x86/kernel/relocate_kernel_32.S | 10 +++++++--- arch/x86/kernel/vmlinux_32.lds.S | 8 ++++++++ include/asm-x86/kexec.h | 4 ++++ 4 files changed, 20 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 466450167de..5c8e7735c89 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -138,7 +138,7 @@ void machine_kexec(struct kimage *image) } control_page = page_address(image->control_code_page); - memcpy(control_page, relocate_kernel, PAGE_SIZE/2); + memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); relocate_kernel_ptr = control_page; page_list[PA_CONTROL_PAGE] = __pa(control_page); diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index 703310a9902..6f50664b2ba 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -20,10 +20,11 @@ #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define PAE_PGD_ATTR (_PAGE_PRESENT) -/* control_page + PAGE_SIZE/2 ~ control_page + PAGE_SIZE * 3/4 are - * used to save some data for jumping back +/* control_page + KEXEC_CONTROL_CODE_MAX_SIZE + * ~ control_page + PAGE_SIZE are used as data storage and stack for + * jumping back */ -#define DATA(offset) (PAGE_SIZE/2+(offset)) +#define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset)) /* Minimal CPU state */ #define ESP DATA(0x0) @@ -376,3 +377,6 @@ swap_pages: popl %ebx popl %ebp ret + + .globl kexec_control_code_size +.set kexec_control_code_size, . - relocate_kernel diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index cdb2363697d..af5bdad8460 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S @@ -209,3 +209,11 @@ SECTIONS DWARF_DEBUG } + +#ifdef CONFIG_KEXEC +/* Link time checks */ +#include + +ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, + "kexec control code size is too big") +#endif diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h index f6fb3d21883..4246ab7dc98 100644 --- a/include/asm-x86/kexec.h +++ b/include/asm-x86/kexec.h @@ -41,6 +41,10 @@ # define PAGES_NR 17 #endif +#ifdef CONFIG_X86_32 +# define KEXEC_CONTROL_CODE_MAX_SIZE 2048 +#endif + #ifndef __ASSEMBLY__ #include -- cgit v1.2.3-70-g09d2 From ca195b7f6da3d5dde0bb85a7c322d7de73352653 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 15 Aug 2008 00:40:24 -0700 Subject: kexec jump: remove duplication of kexec_restart_prepare() Call kernel_restart_prepare() in kernel_kexec() instead of duplicating the code. Signed-off-by: Huang Ying Acked-by: Pavel Machek Acked-by: Vivek Goyal Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: "Eric W. Biederman" Cc: Vivek Goyal Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/reboot.h | 1 + kernel/kexec.c | 6 +----- kernel/sys.c | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/reboot.h b/include/linux/reboot.h index b93b541cf11..988e55fe649 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -59,6 +59,7 @@ extern void machine_crash_shutdown(struct pt_regs *); * Architecture independent implemenations of sys_reboot commands. */ +extern void kernel_restart_prepare(char *cmd); extern void kernel_restart(char *cmd); extern void kernel_halt(void); extern void kernel_power_off(void); diff --git a/kernel/kexec.c b/kernel/kexec.c index 2810558802b..b81682312dc 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1472,11 +1472,7 @@ int kernel_kexec(void) } else #endif { - blocking_notifier_call_chain(&reboot_notifier_list, - SYS_RESTART, NULL); - system_state = SYSTEM_RESTART; - device_shutdown(); - sysdev_shutdown(); + kernel_restart_prepare(NULL); printk(KERN_EMERG "Starting new kernel\n"); machine_shutdown(); } diff --git a/kernel/sys.c b/kernel/sys.c index c01858090a9..3dacb00a7f7 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -274,7 +274,7 @@ void emergency_restart(void) } EXPORT_SYMBOL_GPL(emergency_restart); -static void kernel_restart_prepare(char *cmd) +void kernel_restart_prepare(char *cmd) { blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); system_state = SYSTEM_RESTART; -- cgit v1.2.3-70-g09d2 From 9bdeb7b5d34f197dea7859d24475943395ffea5e Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 15 Aug 2008 00:40:25 -0700 Subject: kexec jump: __ftrace_enabled_save/restore Add __ftrace_enabled_save/restore, used to disable ftrace for a while. Now, this is used by kexec jump, which need a version without lock, for general situation, a locked version should be used. Signed-off-by: Huang Ying Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: "Eric W. Biederman" Cc: Vivek Goyal Cc: Ingo Molnar Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ftrace.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'include') diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f368d041e02..bb384068272 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -98,6 +98,27 @@ static inline void tracer_disable(void) #endif } +/* Ftrace disable/restore without lock. Some synchronization mechanism + * must be used to prevent ftrace_enabled to be changed between + * disable/restore. */ +static inline int __ftrace_enabled_save(void) +{ +#ifdef CONFIG_FTRACE + int saved_ftrace_enabled = ftrace_enabled; + ftrace_enabled = 0; + return saved_ftrace_enabled; +#else + return 0; +#endif +} + +static inline void __ftrace_enabled_restore(int enabled) +{ +#ifdef CONFIG_FTRACE + ftrace_enabled = enabled; +#endif +} + #ifdef CONFIG_FRAME_POINTER /* TODO: need to fix this for ARM */ # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -- cgit v1.2.3-70-g09d2 From be4de35263f59ca1f4740edfffbfb02cc3f2189e Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 15 Aug 2008 00:40:44 -0700 Subject: completions: uninline try_wait_for_completion and completion_done m68k fails to build with these functions inlined in completion.h. Move them out of line into sched.c and export them to avoid this problem. Signed-off-by: Dave Chinner Cc: Geert Uytterhoeven Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/completion.h | 46 ++-------------------------------------------- kernel/sched.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 44 deletions(-) (limited to 'include') diff --git a/include/linux/completion.h b/include/linux/completion.h index 57faa60de9b..02ef8835999 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -49,6 +49,8 @@ extern unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); extern unsigned long wait_for_completion_interruptible_timeout( struct completion *x, unsigned long timeout); +extern bool try_wait_for_completion(struct completion *x); +extern bool completion_done(struct completion *x); extern void complete(struct completion *); extern void complete_all(struct completion *); @@ -56,48 +58,4 @@ extern void complete_all(struct completion *); #define INIT_COMPLETION(x) ((x).done = 0) -/** - * try_wait_for_completion - try to decrement a completion without blocking - * @x: completion structure - * - * Returns: 0 if a decrement cannot be done without blocking - * 1 if a decrement succeeded. - * - * If a completion is being used as a counting completion, - * attempt to decrement the counter without blocking. This - * enables us to avoid waiting if the resource the completion - * is protecting is not available. - */ -static inline bool try_wait_for_completion(struct completion *x) -{ - int ret = 1; - - spin_lock_irq(&x->wait.lock); - if (!x->done) - ret = 0; - else - x->done--; - spin_unlock_irq(&x->wait.lock); - return ret; -} - -/** - * completion_done - Test to see if a completion has any waiters - * @x: completion structure - * - * Returns: 0 if there are waiters (wait_for_completion() in progress) - * 1 if there are no waiters. - * - */ -static inline bool completion_done(struct completion *x) -{ - int ret = 1; - - spin_lock_irq(&x->wait.lock); - if (!x->done) - ret = 0; - spin_unlock_irq(&x->wait.lock); - return ret; -} - #endif diff --git a/kernel/sched.c b/kernel/sched.c index d601fb0406c..95e6ad3c231 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4669,6 +4669,52 @@ int __sched wait_for_completion_killable(struct completion *x) } EXPORT_SYMBOL(wait_for_completion_killable); +/** + * try_wait_for_completion - try to decrement a completion without blocking + * @x: completion structure + * + * Returns: 0 if a decrement cannot be done without blocking + * 1 if a decrement succeeded. + * + * If a completion is being used as a counting completion, + * attempt to decrement the counter without blocking. This + * enables us to avoid waiting if the resource the completion + * is protecting is not available. + */ +bool try_wait_for_completion(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + else + x->done--; + spin_unlock_irq(&x->wait.lock); + return ret; +} +EXPORT_SYMBOL(try_wait_for_completion); + +/** + * completion_done - Test to see if a completion has any waiters + * @x: completion structure + * + * Returns: 0 if there are waiters (wait_for_completion() in progress) + * 1 if there are no waiters. + * + */ +bool completion_done(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + spin_unlock_irq(&x->wait.lock); + return ret; +} +EXPORT_SYMBOL(completion_done); + static long __sched sleep_on_common(wait_queue_head_t *q, int state, long timeout) { -- cgit v1.2.3-70-g09d2 From 024b246ed24492d6c2ee14c34d742b137fce1b94 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 15 Aug 2008 09:19:40 -0700 Subject: alpha: move include/asm-alpha to arch/alpha/include/asm Sam Ravnborg did the build-test that the direct header file move works, I'm just committing it. This is a pure move: mkdir arch/alpha/include git mv include/asm-alpha arch/alpha/include/asm with no other changes. Requested-and-tested-by: Sam Ravnborg Cc: Richard Henderson Cc: Ivan Kokshaysky Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/8253pit.h | 10 + arch/alpha/include/asm/Kbuild | 11 + arch/alpha/include/asm/a.out-core.h | 80 +++ arch/alpha/include/asm/a.out.h | 102 ++++ arch/alpha/include/asm/agp.h | 22 + arch/alpha/include/asm/agp_backend.h | 42 ++ arch/alpha/include/asm/atomic.h | 267 +++++++++ arch/alpha/include/asm/auxvec.h | 24 + arch/alpha/include/asm/barrier.h | 33 ++ arch/alpha/include/asm/bitops.h | 466 ++++++++++++++++ arch/alpha/include/asm/bug.h | 28 + arch/alpha/include/asm/bugs.h | 20 + arch/alpha/include/asm/byteorder.h | 47 ++ arch/alpha/include/asm/cache.h | 23 + arch/alpha/include/asm/cacheflush.h | 74 +++ arch/alpha/include/asm/checksum.h | 75 +++ arch/alpha/include/asm/compiler.h | 130 +++++ arch/alpha/include/asm/console.h | 75 +++ arch/alpha/include/asm/core_apecs.h | 517 +++++++++++++++++ arch/alpha/include/asm/core_cia.h | 500 +++++++++++++++++ arch/alpha/include/asm/core_irongate.h | 232 ++++++++ arch/alpha/include/asm/core_lca.h | 361 ++++++++++++ arch/alpha/include/asm/core_marvel.h | 378 +++++++++++++ arch/alpha/include/asm/core_mcpcia.h | 381 +++++++++++++ arch/alpha/include/asm/core_polaris.h | 110 ++++ arch/alpha/include/asm/core_t2.h | 633 +++++++++++++++++++++ arch/alpha/include/asm/core_titan.h | 410 ++++++++++++++ arch/alpha/include/asm/core_tsunami.h | 335 +++++++++++ arch/alpha/include/asm/core_wildfire.h | 318 +++++++++++ arch/alpha/include/asm/cputime.h | 6 + arch/alpha/include/asm/current.h | 9 + arch/alpha/include/asm/delay.h | 10 + arch/alpha/include/asm/device.h | 7 + arch/alpha/include/asm/div64.h | 1 + arch/alpha/include/asm/dma-mapping.h | 69 +++ arch/alpha/include/asm/dma.h | 376 +++++++++++++ arch/alpha/include/asm/elf.h | 165 ++++++ arch/alpha/include/asm/emergency-restart.h | 6 + arch/alpha/include/asm/err_common.h | 118 ++++ arch/alpha/include/asm/err_ev6.h | 6 + arch/alpha/include/asm/err_ev7.h | 202 +++++++ arch/alpha/include/asm/errno.h | 123 +++++ arch/alpha/include/asm/fb.h | 13 + arch/alpha/include/asm/fcntl.h | 43 ++ arch/alpha/include/asm/floppy.h | 115 ++++ arch/alpha/include/asm/fpu.h | 193 +++++++ arch/alpha/include/asm/futex.h | 6 + arch/alpha/include/asm/gct.h | 58 ++ arch/alpha/include/asm/gentrap.h | 37 ++ arch/alpha/include/asm/hardirq.h | 30 + arch/alpha/include/asm/hw_irq.h | 13 + arch/alpha/include/asm/hwrpb.h | 220 ++++++++ arch/alpha/include/asm/io.h | 577 +++++++++++++++++++ arch/alpha/include/asm/io_trivial.h | 131 +++++ arch/alpha/include/asm/ioctl.h | 66 +++ arch/alpha/include/asm/ioctls.h | 112 ++++ arch/alpha/include/asm/ipcbuf.h | 28 + arch/alpha/include/asm/irq.h | 91 +++ arch/alpha/include/asm/irq_regs.h | 1 + arch/alpha/include/asm/jensen.h | 346 ++++++++++++ arch/alpha/include/asm/kdebug.h | 1 + arch/alpha/include/asm/kmap_types.h | 32 ++ arch/alpha/include/asm/linkage.h | 6 + arch/alpha/include/asm/local.h | 118 ++++ arch/alpha/include/asm/machvec.h | 134 +++++ arch/alpha/include/asm/mc146818rtc.h | 27 + arch/alpha/include/asm/md.h | 13 + arch/alpha/include/asm/mman.h | 54 ++ arch/alpha/include/asm/mmu.h | 7 + arch/alpha/include/asm/mmu_context.h | 260 +++++++++ arch/alpha/include/asm/mmzone.h | 115 ++++ arch/alpha/include/asm/module.h | 23 + arch/alpha/include/asm/msgbuf.h | 27 + arch/alpha/include/asm/mutex.h | 9 + arch/alpha/include/asm/page.h | 98 ++++ arch/alpha/include/asm/pal.h | 51 ++ arch/alpha/include/asm/param.h | 27 + arch/alpha/include/asm/parport.h | 18 + arch/alpha/include/asm/pci.h | 276 ++++++++++ arch/alpha/include/asm/percpu.h | 78 +++ arch/alpha/include/asm/pgalloc.h | 83 +++ arch/alpha/include/asm/pgtable.h | 380 +++++++++++++ arch/alpha/include/asm/poll.h | 1 + arch/alpha/include/asm/posix_types.h | 123 +++++ arch/alpha/include/asm/processor.h | 93 ++++ arch/alpha/include/asm/ptrace.h | 83 +++ arch/alpha/include/asm/reg.h | 52 ++ arch/alpha/include/asm/regdef.h | 44 ++ arch/alpha/include/asm/resource.h | 22 + arch/alpha/include/asm/rtc.h | 10 + arch/alpha/include/asm/rwsem.h | 259 +++++++++ arch/alpha/include/asm/scatterlist.h | 25 + arch/alpha/include/asm/sections.h | 7 + arch/alpha/include/asm/segment.h | 6 + arch/alpha/include/asm/sembuf.h | 22 + arch/alpha/include/asm/serial.h | 29 + arch/alpha/include/asm/setup.h | 6 + arch/alpha/include/asm/sfp-machine.h | 82 +++ arch/alpha/include/asm/shmbuf.h | 38 ++ arch/alpha/include/asm/shmparam.h | 6 + arch/alpha/include/asm/sigcontext.h | 34 ++ arch/alpha/include/asm/siginfo.h | 9 + arch/alpha/include/asm/signal.h | 172 ++++++ arch/alpha/include/asm/smp.h | 62 +++ arch/alpha/include/asm/socket.h | 70 +++ arch/alpha/include/asm/sockios.h | 16 + arch/alpha/include/asm/spinlock.h | 173 ++++++ arch/alpha/include/asm/spinlock_types.h | 20 + arch/alpha/include/asm/stat.h | 48 ++ arch/alpha/include/asm/statfs.h | 6 + arch/alpha/include/asm/string.h | 66 +++ arch/alpha/include/asm/suspend.h | 6 + arch/alpha/include/asm/sysinfo.h | 39 ++ arch/alpha/include/asm/system.h | 829 ++++++++++++++++++++++++++++ arch/alpha/include/asm/termbits.h | 200 +++++++ arch/alpha/include/asm/termios.h | 146 +++++ arch/alpha/include/asm/thread_info.h | 114 ++++ arch/alpha/include/asm/timex.h | 31 ++ arch/alpha/include/asm/tlb.h | 15 + arch/alpha/include/asm/tlbflush.h | 151 +++++ arch/alpha/include/asm/topology.h | 47 ++ arch/alpha/include/asm/types.h | 33 ++ arch/alpha/include/asm/uaccess.h | 511 +++++++++++++++++ arch/alpha/include/asm/ucontext.h | 13 + arch/alpha/include/asm/unaligned.h | 11 + arch/alpha/include/asm/unistd.h | 464 ++++++++++++++++ arch/alpha/include/asm/user.h | 53 ++ arch/alpha/include/asm/vga.h | 82 +++ arch/alpha/include/asm/xor.h | 855 +++++++++++++++++++++++++++++ include/asm-alpha/8253pit.h | 10 - include/asm-alpha/Kbuild | 11 - include/asm-alpha/a.out-core.h | 80 --- include/asm-alpha/a.out.h | 102 ---- include/asm-alpha/agp.h | 22 - include/asm-alpha/agp_backend.h | 42 -- include/asm-alpha/atomic.h | 267 --------- include/asm-alpha/auxvec.h | 24 - include/asm-alpha/barrier.h | 33 -- include/asm-alpha/bitops.h | 466 ---------------- include/asm-alpha/bug.h | 28 - include/asm-alpha/bugs.h | 20 - include/asm-alpha/byteorder.h | 47 -- include/asm-alpha/cache.h | 23 - include/asm-alpha/cacheflush.h | 74 --- include/asm-alpha/checksum.h | 75 --- include/asm-alpha/compiler.h | 130 ----- include/asm-alpha/console.h | 75 --- include/asm-alpha/core_apecs.h | 517 ----------------- include/asm-alpha/core_cia.h | 500 ----------------- include/asm-alpha/core_irongate.h | 232 -------- include/asm-alpha/core_lca.h | 361 ------------ include/asm-alpha/core_marvel.h | 378 ------------- include/asm-alpha/core_mcpcia.h | 381 ------------- include/asm-alpha/core_polaris.h | 110 ---- include/asm-alpha/core_t2.h | 633 --------------------- include/asm-alpha/core_titan.h | 410 -------------- include/asm-alpha/core_tsunami.h | 335 ----------- include/asm-alpha/core_wildfire.h | 318 ----------- include/asm-alpha/cputime.h | 6 - include/asm-alpha/current.h | 9 - include/asm-alpha/delay.h | 10 - include/asm-alpha/device.h | 7 - include/asm-alpha/div64.h | 1 - include/asm-alpha/dma-mapping.h | 69 --- include/asm-alpha/dma.h | 376 ------------- include/asm-alpha/elf.h | 165 ------ include/asm-alpha/emergency-restart.h | 6 - include/asm-alpha/err_common.h | 118 ---- include/asm-alpha/err_ev6.h | 6 - include/asm-alpha/err_ev7.h | 202 ------- include/asm-alpha/errno.h | 123 ----- include/asm-alpha/fb.h | 13 - include/asm-alpha/fcntl.h | 43 -- include/asm-alpha/floppy.h | 115 ---- include/asm-alpha/fpu.h | 193 ------- include/asm-alpha/futex.h | 6 - include/asm-alpha/gct.h | 58 -- include/asm-alpha/gentrap.h | 37 -- include/asm-alpha/hardirq.h | 30 - include/asm-alpha/hw_irq.h | 13 - include/asm-alpha/hwrpb.h | 220 -------- include/asm-alpha/io.h | 577 ------------------- include/asm-alpha/io_trivial.h | 131 ----- include/asm-alpha/ioctl.h | 66 --- include/asm-alpha/ioctls.h | 112 ---- include/asm-alpha/ipcbuf.h | 28 - include/asm-alpha/irq.h | 91 --- include/asm-alpha/irq_regs.h | 1 - include/asm-alpha/jensen.h | 346 ------------ include/asm-alpha/kdebug.h | 1 - include/asm-alpha/kmap_types.h | 32 -- include/asm-alpha/linkage.h | 6 - include/asm-alpha/local.h | 118 ---- include/asm-alpha/machvec.h | 134 ----- include/asm-alpha/mc146818rtc.h | 27 - include/asm-alpha/md.h | 13 - include/asm-alpha/mman.h | 54 -- include/asm-alpha/mmu.h | 7 - include/asm-alpha/mmu_context.h | 260 --------- include/asm-alpha/mmzone.h | 115 ---- include/asm-alpha/module.h | 23 - include/asm-alpha/msgbuf.h | 27 - include/asm-alpha/mutex.h | 9 - include/asm-alpha/page.h | 98 ---- include/asm-alpha/pal.h | 51 -- include/asm-alpha/param.h | 27 - include/asm-alpha/parport.h | 18 - include/asm-alpha/pci.h | 276 ---------- include/asm-alpha/percpu.h | 78 --- include/asm-alpha/pgalloc.h | 83 --- include/asm-alpha/pgtable.h | 380 ------------- include/asm-alpha/poll.h | 1 - include/asm-alpha/posix_types.h | 123 ----- include/asm-alpha/processor.h | 93 ---- include/asm-alpha/ptrace.h | 83 --- include/asm-alpha/reg.h | 52 -- include/asm-alpha/regdef.h | 44 -- include/asm-alpha/resource.h | 22 - include/asm-alpha/rtc.h | 10 - include/asm-alpha/rwsem.h | 259 --------- include/asm-alpha/scatterlist.h | 25 - include/asm-alpha/sections.h | 7 - include/asm-alpha/segment.h | 6 - include/asm-alpha/sembuf.h | 22 - include/asm-alpha/serial.h | 29 - include/asm-alpha/setup.h | 6 - include/asm-alpha/sfp-machine.h | 82 --- include/asm-alpha/shmbuf.h | 38 -- include/asm-alpha/shmparam.h | 6 - include/asm-alpha/sigcontext.h | 34 -- include/asm-alpha/siginfo.h | 9 - include/asm-alpha/signal.h | 172 ------ include/asm-alpha/smp.h | 62 --- include/asm-alpha/socket.h | 70 --- include/asm-alpha/sockios.h | 16 - include/asm-alpha/spinlock.h | 173 ------ include/asm-alpha/spinlock_types.h | 20 - include/asm-alpha/stat.h | 48 -- include/asm-alpha/statfs.h | 6 - include/asm-alpha/string.h | 66 --- include/asm-alpha/suspend.h | 6 - include/asm-alpha/sysinfo.h | 39 -- include/asm-alpha/system.h | 829 ---------------------------- include/asm-alpha/termbits.h | 200 ------- include/asm-alpha/termios.h | 146 ----- include/asm-alpha/thread_info.h | 114 ---- include/asm-alpha/timex.h | 31 -- include/asm-alpha/tlb.h | 15 - include/asm-alpha/tlbflush.h | 151 ----- include/asm-alpha/topology.h | 47 -- include/asm-alpha/types.h | 33 -- include/asm-alpha/uaccess.h | 511 ----------------- include/asm-alpha/ucontext.h | 13 - include/asm-alpha/unaligned.h | 11 - include/asm-alpha/unistd.h | 464 ---------------- include/asm-alpha/user.h | 53 -- include/asm-alpha/vga.h | 82 --- include/asm-alpha/xor.h | 855 ----------------------------- 258 files changed, 15903 insertions(+), 15903 deletions(-) create mode 100644 arch/alpha/include/asm/8253pit.h create mode 100644 arch/alpha/include/asm/Kbuild create mode 100644 arch/alpha/include/asm/a.out-core.h create mode 100644 arch/alpha/include/asm/a.out.h create mode 100644 arch/alpha/include/asm/agp.h create mode 100644 arch/alpha/include/asm/agp_backend.h create mode 100644 arch/alpha/include/asm/atomic.h create mode 100644 arch/alpha/include/asm/auxvec.h create mode 100644 arch/alpha/include/asm/barrier.h create mode 100644 arch/alpha/include/asm/bitops.h create mode 100644 arch/alpha/include/asm/bug.h create mode 100644 arch/alpha/include/asm/bugs.h create mode 100644 arch/alpha/include/asm/byteorder.h create mode 100644 arch/alpha/include/asm/cache.h create mode 100644 arch/alpha/include/asm/cacheflush.h create mode 100644 arch/alpha/include/asm/checksum.h create mode 100644 arch/alpha/include/asm/compiler.h create mode 100644 arch/alpha/include/asm/console.h create mode 100644 arch/alpha/include/asm/core_apecs.h create mode 100644 arch/alpha/include/asm/core_cia.h create mode 100644 arch/alpha/include/asm/core_irongate.h create mode 100644 arch/alpha/include/asm/core_lca.h create mode 100644 arch/alpha/include/asm/core_marvel.h create mode 100644 arch/alpha/include/asm/core_mcpcia.h create mode 100644 arch/alpha/include/asm/core_polaris.h create mode 100644 arch/alpha/include/asm/core_t2.h create mode 100644 arch/alpha/include/asm/core_titan.h create mode 100644 arch/alpha/include/asm/core_tsunami.h create mode 100644 arch/alpha/include/asm/core_wildfire.h create mode 100644 arch/alpha/include/asm/cputime.h create mode 100644 arch/alpha/include/asm/current.h create mode 100644 arch/alpha/include/asm/delay.h create mode 100644 arch/alpha/include/asm/device.h create mode 100644 arch/alpha/include/asm/div64.h create mode 100644 arch/alpha/include/asm/dma-mapping.h create mode 100644 arch/alpha/include/asm/dma.h create mode 100644 arch/alpha/include/asm/elf.h create mode 100644 arch/alpha/include/asm/emergency-restart.h create mode 100644 arch/alpha/include/asm/err_common.h create mode 100644 arch/alpha/include/asm/err_ev6.h create mode 100644 arch/alpha/include/asm/err_ev7.h create mode 100644 arch/alpha/include/asm/errno.h create mode 100644 arch/alpha/include/asm/fb.h create mode 100644 arch/alpha/include/asm/fcntl.h create mode 100644 arch/alpha/include/asm/floppy.h create mode 100644 arch/alpha/include/asm/fpu.h create mode 100644 arch/alpha/include/asm/futex.h create mode 100644 arch/alpha/include/asm/gct.h create mode 100644 arch/alpha/include/asm/gentrap.h create mode 100644 arch/alpha/include/asm/hardirq.h create mode 100644 arch/alpha/include/asm/hw_irq.h create mode 100644 arch/alpha/include/asm/hwrpb.h create mode 100644 arch/alpha/include/asm/io.h create mode 100644 arch/alpha/include/asm/io_trivial.h create mode 100644 arch/alpha/include/asm/ioctl.h create mode 100644 arch/alpha/include/asm/ioctls.h create mode 100644 arch/alpha/include/asm/ipcbuf.h create mode 100644 arch/alpha/include/asm/irq.h create mode 100644 arch/alpha/include/asm/irq_regs.h create mode 100644 arch/alpha/include/asm/jensen.h create mode 100644 arch/alpha/include/asm/kdebug.h create mode 100644 arch/alpha/include/asm/kmap_types.h create mode 100644 arch/alpha/include/asm/linkage.h create mode 100644 arch/alpha/include/asm/local.h create mode 100644 arch/alpha/include/asm/machvec.h create mode 100644 arch/alpha/include/asm/mc146818rtc.h create mode 100644 arch/alpha/include/asm/md.h create mode 100644 arch/alpha/include/asm/mman.h create mode 100644 arch/alpha/include/asm/mmu.h create mode 100644 arch/alpha/include/asm/mmu_context.h create mode 100644 arch/alpha/include/asm/mmzone.h create mode 100644 arch/alpha/include/asm/module.h create mode 100644 arch/alpha/include/asm/msgbuf.h create mode 100644 arch/alpha/include/asm/mutex.h create mode 100644 arch/alpha/include/asm/page.h create mode 100644 arch/alpha/include/asm/pal.h create mode 100644 arch/alpha/include/asm/param.h create mode 100644 arch/alpha/include/asm/parport.h create mode 100644 arch/alpha/include/asm/pci.h create mode 100644 arch/alpha/include/asm/percpu.h create mode 100644 arch/alpha/include/asm/pgalloc.h create mode 100644 arch/alpha/include/asm/pgtable.h create mode 100644 arch/alpha/include/asm/poll.h create mode 100644 arch/alpha/include/asm/posix_types.h create mode 100644 arch/alpha/include/asm/processor.h create mode 100644 arch/alpha/include/asm/ptrace.h create mode 100644 arch/alpha/include/asm/reg.h create mode 100644 arch/alpha/include/asm/regdef.h create mode 100644 arch/alpha/include/asm/resource.h create mode 100644 arch/alpha/include/asm/rtc.h create mode 100644 arch/alpha/include/asm/rwsem.h create mode 100644 arch/alpha/include/asm/scatterlist.h create mode 100644 arch/alpha/include/asm/sections.h create mode 100644 arch/alpha/include/asm/segment.h create mode 100644 arch/alpha/include/asm/sembuf.h create mode 100644 arch/alpha/include/asm/serial.h create mode 100644 arch/alpha/include/asm/setup.h create mode 100644 arch/alpha/include/asm/sfp-machine.h create mode 100644 arch/alpha/include/asm/shmbuf.h create mode 100644 arch/alpha/include/asm/shmparam.h create mode 100644 arch/alpha/include/asm/sigcontext.h create mode 100644 arch/alpha/include/asm/siginfo.h create mode 100644 arch/alpha/include/asm/signal.h create mode 100644 arch/alpha/include/asm/smp.h create mode 100644 arch/alpha/include/asm/socket.h create mode 100644 arch/alpha/include/asm/sockios.h create mode 100644 arch/alpha/include/asm/spinlock.h create mode 100644 arch/alpha/include/asm/spinlock_types.h create mode 100644 arch/alpha/include/asm/stat.h create mode 100644 arch/alpha/include/asm/statfs.h create mode 100644 arch/alpha/include/asm/string.h create mode 100644 arch/alpha/include/asm/suspend.h create mode 100644 arch/alpha/include/asm/sysinfo.h create mode 100644 arch/alpha/include/asm/system.h create mode 100644 arch/alpha/include/asm/termbits.h create mode 100644 arch/alpha/include/asm/termios.h create mode 100644 arch/alpha/include/asm/thread_info.h create mode 100644 arch/alpha/include/asm/timex.h create mode 100644 arch/alpha/include/asm/tlb.h create mode 100644 arch/alpha/include/asm/tlbflush.h create mode 100644 arch/alpha/include/asm/topology.h create mode 100644 arch/alpha/include/asm/types.h create mode 100644 arch/alpha/include/asm/uaccess.h create mode 100644 arch/alpha/include/asm/ucontext.h create mode 100644 arch/alpha/include/asm/unaligned.h create mode 100644 arch/alpha/include/asm/unistd.h create mode 100644 arch/alpha/include/asm/user.h create mode 100644 arch/alpha/include/asm/vga.h create mode 100644 arch/alpha/include/asm/xor.h delete mode 100644 include/asm-alpha/8253pit.h delete mode 100644 include/asm-alpha/Kbuild delete mode 100644 include/asm-alpha/a.out-core.h delete mode 100644 include/asm-alpha/a.out.h delete mode 100644 include/asm-alpha/agp.h delete mode 100644 include/asm-alpha/agp_backend.h delete mode 100644 include/asm-alpha/atomic.h delete mode 100644 include/asm-alpha/auxvec.h delete mode 100644 include/asm-alpha/barrier.h delete mode 100644 include/asm-alpha/bitops.h delete mode 100644 include/asm-alpha/bug.h delete mode 100644 include/asm-alpha/bugs.h delete mode 100644 include/asm-alpha/byteorder.h delete mode 100644 include/asm-alpha/cache.h delete mode 100644 include/asm-alpha/cacheflush.h delete mode 100644 include/asm-alpha/checksum.h delete mode 100644 include/asm-alpha/compiler.h delete mode 100644 include/asm-alpha/console.h delete mode 100644 include/asm-alpha/core_apecs.h delete mode 100644 include/asm-alpha/core_cia.h delete mode 100644 include/asm-alpha/core_irongate.h delete mode 100644 include/asm-alpha/core_lca.h delete mode 100644 include/asm-alpha/core_marvel.h delete mode 100644 include/asm-alpha/core_mcpcia.h delete mode 100644 include/asm-alpha/core_polaris.h delete mode 100644 include/asm-alpha/core_t2.h delete mode 100644 include/asm-alpha/core_titan.h delete mode 100644 include/asm-alpha/core_tsunami.h delete mode 100644 include/asm-alpha/core_wildfire.h delete mode 100644 include/asm-alpha/cputime.h delete mode 100644 include/asm-alpha/current.h delete mode 100644 include/asm-alpha/delay.h delete mode 100644 include/asm-alpha/device.h delete mode 100644 include/asm-alpha/div64.h delete mode 100644 include/asm-alpha/dma-mapping.h delete mode 100644 include/asm-alpha/dma.h delete mode 100644 include/asm-alpha/elf.h delete mode 100644 include/asm-alpha/emergency-restart.h delete mode 100644 include/asm-alpha/err_common.h delete mode 100644 include/asm-alpha/err_ev6.h delete mode 100644 include/asm-alpha/err_ev7.h delete mode 100644 include/asm-alpha/errno.h delete mode 100644 include/asm-alpha/fb.h delete mode 100644 include/asm-alpha/fcntl.h delete mode 100644 include/asm-alpha/floppy.h delete mode 100644 include/asm-alpha/fpu.h delete mode 100644 include/asm-alpha/futex.h delete mode 100644 include/asm-alpha/gct.h delete mode 100644 include/asm-alpha/gentrap.h delete mode 100644 include/asm-alpha/hardirq.h delete mode 100644 include/asm-alpha/hw_irq.h delete mode 100644 include/asm-alpha/hwrpb.h delete mode 100644 include/asm-alpha/io.h delete mode 100644 include/asm-alpha/io_trivial.h delete mode 100644 include/asm-alpha/ioctl.h delete mode 100644 include/asm-alpha/ioctls.h delete mode 100644 include/asm-alpha/ipcbuf.h delete mode 100644 include/asm-alpha/irq.h delete mode 100644 include/asm-alpha/irq_regs.h delete mode 100644 include/asm-alpha/jensen.h delete mode 100644 include/asm-alpha/kdebug.h delete mode 100644 include/asm-alpha/kmap_types.h delete mode 100644 include/asm-alpha/linkage.h delete mode 100644 include/asm-alpha/local.h delete mode 100644 include/asm-alpha/machvec.h delete mode 100644 include/asm-alpha/mc146818rtc.h delete mode 100644 include/asm-alpha/md.h delete mode 100644 include/asm-alpha/mman.h delete mode 100644 include/asm-alpha/mmu.h delete mode 100644 include/asm-alpha/mmu_context.h delete mode 100644 include/asm-alpha/mmzone.h delete mode 100644 include/asm-alpha/module.h delete mode 100644 include/asm-alpha/msgbuf.h delete mode 100644 include/asm-alpha/mutex.h delete mode 100644 include/asm-alpha/page.h delete mode 100644 include/asm-alpha/pal.h delete mode 100644 include/asm-alpha/param.h delete mode 100644 include/asm-alpha/parport.h delete mode 100644 include/asm-alpha/pci.h delete mode 100644 include/asm-alpha/percpu.h delete mode 100644 include/asm-alpha/pgalloc.h delete mode 100644 include/asm-alpha/pgtable.h delete mode 100644 include/asm-alpha/poll.h delete mode 100644 include/asm-alpha/posix_types.h delete mode 100644 include/asm-alpha/processor.h delete mode 100644 include/asm-alpha/ptrace.h delete mode 100644 include/asm-alpha/reg.h delete mode 100644 include/asm-alpha/regdef.h delete mode 100644 include/asm-alpha/resource.h delete mode 100644 include/asm-alpha/rtc.h delete mode 100644 include/asm-alpha/rwsem.h delete mode 100644 include/asm-alpha/scatterlist.h delete mode 100644 include/asm-alpha/sections.h delete mode 100644 include/asm-alpha/segment.h delete mode 100644 include/asm-alpha/sembuf.h delete mode 100644 include/asm-alpha/serial.h delete mode 100644 include/asm-alpha/setup.h delete mode 100644 include/asm-alpha/sfp-machine.h delete mode 100644 include/asm-alpha/shmbuf.h delete mode 100644 include/asm-alpha/shmparam.h delete mode 100644 include/asm-alpha/sigcontext.h delete mode 100644 include/asm-alpha/siginfo.h delete mode 100644 include/asm-alpha/signal.h delete mode 100644 include/asm-alpha/smp.h delete mode 100644 include/asm-alpha/socket.h delete mode 100644 include/asm-alpha/sockios.h delete mode 100644 include/asm-alpha/spinlock.h delete mode 100644 include/asm-alpha/spinlock_types.h delete mode 100644 include/asm-alpha/stat.h delete mode 100644 include/asm-alpha/statfs.h delete mode 100644 include/asm-alpha/string.h delete mode 100644 include/asm-alpha/suspend.h delete mode 100644 include/asm-alpha/sysinfo.h delete mode 100644 include/asm-alpha/system.h delete mode 100644 include/asm-alpha/termbits.h delete mode 100644 include/asm-alpha/termios.h delete mode 100644 include/asm-alpha/thread_info.h delete mode 100644 include/asm-alpha/timex.h delete mode 100644 include/asm-alpha/tlb.h delete mode 100644 include/asm-alpha/tlbflush.h delete mode 100644 include/asm-alpha/topology.h delete mode 100644 include/asm-alpha/types.h delete mode 100644 include/asm-alpha/uaccess.h delete mode 100644 include/asm-alpha/ucontext.h delete mode 100644 include/asm-alpha/unaligned.h delete mode 100644 include/asm-alpha/unistd.h delete mode 100644 include/asm-alpha/user.h delete mode 100644 include/asm-alpha/vga.h delete mode 100644 include/asm-alpha/xor.h (limited to 'include') diff --git a/arch/alpha/include/asm/8253pit.h b/arch/alpha/include/asm/8253pit.h new file mode 100644 index 00000000000..fef5c1450e4 --- /dev/null +++ b/arch/alpha/include/asm/8253pit.h @@ -0,0 +1,10 @@ +/* + * 8253/8254 Programmable Interval Timer + */ + +#ifndef _8253PIT_H +#define _8253PIT_H + +#define PIT_TICK_RATE 1193180UL + +#endif diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild new file mode 100644 index 00000000000..b7c8f188b31 --- /dev/null +++ b/arch/alpha/include/asm/Kbuild @@ -0,0 +1,11 @@ +include include/asm-generic/Kbuild.asm + +header-y += gentrap.h +header-y += regdef.h +header-y += pal.h +header-y += reg.h + +unifdef-y += console.h +unifdef-y += fpu.h +unifdef-y += sysinfo.h +unifdef-y += compiler.h diff --git a/arch/alpha/include/asm/a.out-core.h b/arch/alpha/include/asm/a.out-core.h new file mode 100644 index 00000000000..9e33e92e524 --- /dev/null +++ b/arch/alpha/include/asm/a.out-core.h @@ -0,0 +1,80 @@ +/* a.out coredump register dumper + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _ASM_A_OUT_CORE_H +#define _ASM_A_OUT_CORE_H + +#ifdef __KERNEL__ + +#include + +/* + * Fill in the user structure for an ECOFF core dump. + */ +static inline void aout_dump_thread(struct pt_regs *pt, struct user *dump) +{ + /* switch stack follows right below pt_regs: */ + struct switch_stack * sw = ((struct switch_stack *) pt) - 1; + + dump->magic = CMAGIC; + dump->start_code = current->mm->start_code; + dump->start_data = current->mm->start_data; + dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); + dump->u_tsize = ((current->mm->end_code - dump->start_code) + >> PAGE_SHIFT); + dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data) + >> PAGE_SHIFT); + dump->u_ssize = (current->mm->start_stack - dump->start_stack + + PAGE_SIZE-1) >> PAGE_SHIFT; + + /* + * We store the registers in an order/format that is + * compatible with DEC Unix/OSF/1 as this makes life easier + * for gdb. + */ + dump->regs[EF_V0] = pt->r0; + dump->regs[EF_T0] = pt->r1; + dump->regs[EF_T1] = pt->r2; + dump->regs[EF_T2] = pt->r3; + dump->regs[EF_T3] = pt->r4; + dump->regs[EF_T4] = pt->r5; + dump->regs[EF_T5] = pt->r6; + dump->regs[EF_T6] = pt->r7; + dump->regs[EF_T7] = pt->r8; + dump->regs[EF_S0] = sw->r9; + dump->regs[EF_S1] = sw->r10; + dump->regs[EF_S2] = sw->r11; + dump->regs[EF_S3] = sw->r12; + dump->regs[EF_S4] = sw->r13; + dump->regs[EF_S5] = sw->r14; + dump->regs[EF_S6] = sw->r15; + dump->regs[EF_A3] = pt->r19; + dump->regs[EF_A4] = pt->r20; + dump->regs[EF_A5] = pt->r21; + dump->regs[EF_T8] = pt->r22; + dump->regs[EF_T9] = pt->r23; + dump->regs[EF_T10] = pt->r24; + dump->regs[EF_T11] = pt->r25; + dump->regs[EF_RA] = pt->r26; + dump->regs[EF_T12] = pt->r27; + dump->regs[EF_AT] = pt->r28; + dump->regs[EF_SP] = rdusp(); + dump->regs[EF_PS] = pt->ps; + dump->regs[EF_PC] = pt->pc; + dump->regs[EF_GP] = pt->gp; + dump->regs[EF_A0] = pt->r16; + dump->regs[EF_A1] = pt->r17; + dump->regs[EF_A2] = pt->r18; + memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_A_OUT_CORE_H */ diff --git a/arch/alpha/include/asm/a.out.h b/arch/alpha/include/asm/a.out.h new file mode 100644 index 00000000000..02ce8473870 --- /dev/null +++ b/arch/alpha/include/asm/a.out.h @@ -0,0 +1,102 @@ +#ifndef __ALPHA_A_OUT_H__ +#define __ALPHA_A_OUT_H__ + +#include + +/* + * OSF/1 ECOFF header structs. ECOFF files consist of: + * - a file header (struct filehdr), + * - an a.out header (struct aouthdr), + * - one or more section headers (struct scnhdr). + * The filhdr's "f_nscns" field contains the + * number of section headers. + */ + +struct filehdr +{ + /* OSF/1 "file" header */ + __u16 f_magic, f_nscns; + __u32 f_timdat; + __u64 f_symptr; + __u32 f_nsyms; + __u16 f_opthdr, f_flags; +}; + +struct aouthdr +{ + __u64 info; /* after that it looks quite normal.. */ + __u64 tsize; + __u64 dsize; + __u64 bsize; + __u64 entry; + __u64 text_start; /* with a few additions that actually make sense */ + __u64 data_start; + __u64 bss_start; + __u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */ + __u64 gpvalue; +}; + +struct scnhdr +{ + char s_name[8]; + __u64 s_paddr; + __u64 s_vaddr; + __u64 s_size; + __u64 s_scnptr; + __u64 s_relptr; + __u64 s_lnnoptr; + __u16 s_nreloc; + __u16 s_nlnno; + __u32 s_flags; +}; + +struct exec +{ + /* OSF/1 "file" header */ + struct filehdr fh; + struct aouthdr ah; +}; + +/* + * Define's so that the kernel exec code can access the a.out header + * fields... + */ +#define a_info ah.info +#define a_text ah.tsize +#define a_data ah.dsize +#define a_bss ah.bsize +#define a_entry ah.entry +#define a_textstart ah.text_start +#define a_datastart ah.data_start +#define a_bssstart ah.bss_start +#define a_gprmask ah.gprmask +#define a_fprmask ah.fprmask +#define a_gpvalue ah.gpvalue + +#define N_TXTADDR(x) ((x).a_textstart) +#define N_DATADDR(x) ((x).a_datastart) +#define N_BSSADDR(x) ((x).a_bssstart) +#define N_DRSIZE(x) 0 +#define N_TRSIZE(x) 0 +#define N_SYMSIZE(x) 0 + +#define AOUTHSZ sizeof(struct aouthdr) +#define SCNHSZ sizeof(struct scnhdr) +#define SCNROUND 16 + +#define N_TXTOFF(x) \ + ((long) N_MAGIC(x) == ZMAGIC ? 0 : \ + (sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1)) + +#ifdef __KERNEL__ + +/* Assume that start addresses below 4G belong to a TASO application. + Unfortunately, there is no proper bit in the exec header to check. + Worse, we have to notice the start address before swapping to use + /sbin/loader, which of course is _not_ a TASO application. */ +#define SET_AOUT_PERSONALITY(BFPM, EX) \ + set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000L \ + ? ADDR_LIMIT_32BIT : 0) | PER_OSF4)) + +#endif /* __KERNEL__ */ +#endif /* __A_OUT_GNU_H__ */ diff --git a/arch/alpha/include/asm/agp.h b/arch/alpha/include/asm/agp.h new file mode 100644 index 00000000000..26c17913529 --- /dev/null +++ b/arch/alpha/include/asm/agp.h @@ -0,0 +1,22 @@ +#ifndef AGP_H +#define AGP_H 1 + +#include + +/* dummy for now */ + +#define map_page_into_agp(page) +#define unmap_page_from_agp(page) +#define flush_agp_cache() mb() + +/* Convert a physical address to an address suitable for the GART. */ +#define phys_to_gart(x) (x) +#define gart_to_phys(x) (x) + +/* GATT allocation. Returns/accepts GATT kernel virtual address. */ +#define alloc_gatt_pages(order) \ + ((char *)__get_free_pages(GFP_KERNEL, (order))) +#define free_gatt_pages(table, order) \ + free_pages((unsigned long)(table), (order)) + +#endif diff --git a/arch/alpha/include/asm/agp_backend.h b/arch/alpha/include/asm/agp_backend.h new file mode 100644 index 00000000000..55dd44a2cea --- /dev/null +++ b/arch/alpha/include/asm/agp_backend.h @@ -0,0 +1,42 @@ +#ifndef _ALPHA_AGP_BACKEND_H +#define _ALPHA_AGP_BACKEND_H 1 + +typedef union _alpha_agp_mode { + struct { + u32 rate : 3; + u32 reserved0 : 1; + u32 fw : 1; + u32 fourgb : 1; + u32 reserved1 : 2; + u32 enable : 1; + u32 sba : 1; + u32 reserved2 : 14; + u32 rq : 8; + } bits; + u32 lw; +} alpha_agp_mode; + +typedef struct _alpha_agp_info { + struct pci_controller *hose; + struct { + dma_addr_t bus_base; + unsigned long size; + void *sysdata; + } aperture; + alpha_agp_mode capability; + alpha_agp_mode mode; + void *private; + struct alpha_agp_ops *ops; +} alpha_agp_info; + +struct alpha_agp_ops { + int (*setup)(alpha_agp_info *); + void (*cleanup)(alpha_agp_info *); + int (*configure)(alpha_agp_info *); + int (*bind)(alpha_agp_info *, off_t, struct agp_memory *); + int (*unbind)(alpha_agp_info *, off_t, struct agp_memory *); + unsigned long (*translate)(alpha_agp_info *, dma_addr_t); +}; + + +#endif /* _ALPHA_AGP_BACKEND_H */ diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h new file mode 100644 index 00000000000..ca88e54dec9 --- /dev/null +++ b/arch/alpha/include/asm/atomic.h @@ -0,0 +1,267 @@ +#ifndef _ALPHA_ATOMIC_H +#define _ALPHA_ATOMIC_H + +#include +#include + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc... + * + * But use these as seldom as possible since they are much slower + * than regular operations. + */ + + +/* + * Counter is volatile to make sure gcc doesn't try to be clever + * and move things around on us. We need to use _exactly_ the address + * the user gave us, not some alias that contains the same information. + */ +typedef struct { volatile int counter; } atomic_t; +typedef struct { volatile long counter; } atomic64_t; + +#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) +#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) + +#define atomic_read(v) ((v)->counter + 0) +#define atomic64_read(v) ((v)->counter + 0) + +#define atomic_set(v,i) ((v)->counter = (i)) +#define atomic64_set(v,i) ((v)->counter = (i)) + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ + +static __inline__ void atomic_add(int i, atomic_t * v) +{ + unsigned long temp; + __asm__ __volatile__( + "1: ldl_l %0,%1\n" + " addl %0,%2,%0\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter) + :"Ir" (i), "m" (v->counter)); +} + +static __inline__ void atomic64_add(long i, atomic64_t * v) +{ + unsigned long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%2,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter) + :"Ir" (i), "m" (v->counter)); +} + +static __inline__ void atomic_sub(int i, atomic_t * v) +{ + unsigned long temp; + __asm__ __volatile__( + "1: ldl_l %0,%1\n" + " subl %0,%2,%0\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter) + :"Ir" (i), "m" (v->counter)); +} + +static __inline__ void atomic64_sub(long i, atomic64_t * v) +{ + unsigned long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " subq %0,%2,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter) + :"Ir" (i), "m" (v->counter)); +} + + +/* + * Same as above, but return the result value + */ +static inline int atomic_add_return(int i, atomic_t *v) +{ + long temp, result; + smp_mb(); + __asm__ __volatile__( + "1: ldl_l %0,%1\n" + " addl %0,%3,%2\n" + " addl %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter), "=&r" (result) + :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); + return result; +} + +static __inline__ long atomic64_add_return(long i, atomic64_t * v) +{ + long temp, result; + smp_mb(); + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " addq %0,%3,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter), "=&r" (result) + :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); + return result; +} + +static __inline__ long atomic_sub_return(int i, atomic_t * v) +{ + long temp, result; + smp_mb(); + __asm__ __volatile__( + "1: ldl_l %0,%1\n" + " subl %0,%3,%2\n" + " subl %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter), "=&r" (result) + :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); + return result; +} + +static __inline__ long atomic64_sub_return(long i, atomic64_t * v) +{ + long temp, result; + smp_mb(); + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " subq %0,%3,%2\n" + " subq %0,%3,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (v->counter), "=&r" (result) + :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); + return result; +} + +#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) +#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) + +#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) + +#define atomic_dec_return(v) atomic_sub_return(1,(v)) +#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) + +#define atomic_inc_return(v) atomic_add_return(1,(v)) +#define atomic64_inc_return(v) atomic64_add_return(1,(v)) + +#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) +#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) + +#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) +#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) + +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) +#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) + +#define atomic_inc(v) atomic_add(1,(v)) +#define atomic64_inc(v) atomic64_add(1,(v)) + +#define atomic_dec(v) atomic_sub(1,(v)) +#define atomic64_dec(v) atomic64_sub(1,(v)) + +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() + +#include +#endif /* _ALPHA_ATOMIC_H */ diff --git a/arch/alpha/include/asm/auxvec.h b/arch/alpha/include/asm/auxvec.h new file mode 100644 index 00000000000..e96fe880e31 --- /dev/null +++ b/arch/alpha/include/asm/auxvec.h @@ -0,0 +1,24 @@ +#ifndef __ASM_ALPHA_AUXVEC_H +#define __ASM_ALPHA_AUXVEC_H + +/* Reserve these numbers for any future use of a VDSO. */ +#if 0 +#define AT_SYSINFO 32 +#define AT_SYSINFO_EHDR 33 +#endif + +/* More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the + value is -1, then the cache doesn't exist. Otherwise: + + bit 0-3: Cache set-associativity; 0 means fully associative. + bit 4-7: Log2 of cacheline size. + bit 8-31: Size of the entire cache >> 8. + bit 32-63: Reserved. +*/ + +#define AT_L1I_CACHESHAPE 34 +#define AT_L1D_CACHESHAPE 35 +#define AT_L2_CACHESHAPE 36 +#define AT_L3_CACHESHAPE 37 + +#endif /* __ASM_ALPHA_AUXVEC_H */ diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h new file mode 100644 index 00000000000..ac78eba909b --- /dev/null +++ b/arch/alpha/include/asm/barrier.h @@ -0,0 +1,33 @@ +#ifndef __BARRIER_H +#define __BARRIER_H + +#include + +#define mb() \ +__asm__ __volatile__("mb": : :"memory") + +#define rmb() \ +__asm__ __volatile__("mb": : :"memory") + +#define wmb() \ +__asm__ __volatile__("wmb": : :"memory") + +#define read_barrier_depends() \ +__asm__ __volatile__("mb": : :"memory") + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() do { } while (0) +#endif + +#define set_mb(var, value) \ +do { var = value; mb(); } while (0) + +#endif /* __BARRIER_H */ diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h new file mode 100644 index 00000000000..15f3ae25c51 --- /dev/null +++ b/arch/alpha/include/asm/bitops.h @@ -0,0 +1,466 @@ +#ifndef _ALPHA_BITOPS_H +#define _ALPHA_BITOPS_H + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include + +/* + * Copyright 1994, Linus Torvalds. + */ + +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + * + * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). + */ + +static inline void +set_bit(unsigned long nr, volatile void * addr) +{ + unsigned long temp; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + "1: ldl_l %0,%3\n" + " bis %0,%2,%0\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*m) + :"Ir" (1UL << (nr & 31)), "m" (*m)); +} + +/* + * WARNING: non atomic version. + */ +static inline void +__set_bit(unsigned long nr, volatile void * addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m |= 1 << (nr & 31); +} + +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +static inline void +clear_bit(unsigned long nr, volatile void * addr) +{ + unsigned long temp; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + "1: ldl_l %0,%3\n" + " bic %0,%2,%0\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*m) + :"Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +clear_bit_unlock(unsigned long nr, volatile void * addr) +{ + smp_mb(); + clear_bit(nr, addr); +} + +/* + * WARNING: non atomic version. + */ +static __inline__ void +__clear_bit(unsigned long nr, volatile void * addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m &= ~(1 << (nr & 31)); +} + +static inline void +__clear_bit_unlock(unsigned long nr, volatile void * addr) +{ + smp_mb(); + __clear_bit(nr, addr); +} + +static inline void +change_bit(unsigned long nr, volatile void * addr) +{ + unsigned long temp; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + "1: ldl_l %0,%3\n" + " xor %0,%2,%0\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*m) + :"Ir" (1UL << (nr & 31)), "m" (*m)); +} + +/* + * WARNING: non atomic version. + */ +static __inline__ void +__change_bit(unsigned long nr, volatile void * addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m ^= 1 << (nr & 31); +} + +static inline int +test_and_set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( +#ifdef CONFIG_SMP + " mb\n" +#endif + "1: ldl_l %0,%4\n" + " and %0,%3,%2\n" + " bne %2,2f\n" + " xor %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,3f\n" + "2:\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + ".subsection 2\n" + "3: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*m), "=&r" (oldbit) + :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + "1: ldl_l %0,%4\n" + " and %0,%3,%2\n" + " bne %2,2f\n" + " xor %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,3f\n" + "2:\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + ".subsection 2\n" + "3: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*m), "=&r" (oldbit) + :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +/* + * WARNING: non atomic version. + */ +static inline int +__test_and_set_bit(unsigned long nr, volatile void * addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old | mask; + return (old & mask) != 0; +} + +static inline int +test_and_clear_bit(unsigned long nr, volatile void * addr) +{ + unsigned long oldbit; + unsigned long temp; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( +#ifdef CONFIG_SMP + " mb\n" +#endif + "1: ldl_l %0,%4\n" + " and %0,%3,%2\n" + " beq %2,2f\n" + " xor %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,3f\n" + "2:\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + ".subsection 2\n" + "3: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*m), "=&r" (oldbit) + :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +/* + * WARNING: non atomic version. + */ +static inline int +__test_and_clear_bit(unsigned long nr, volatile void * addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old & ~mask; + return (old & mask) != 0; +} + +static inline int +test_and_change_bit(unsigned long nr, volatile void * addr) +{ + unsigned long oldbit; + unsigned long temp; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( +#ifdef CONFIG_SMP + " mb\n" +#endif + "1: ldl_l %0,%4\n" + " and %0,%3,%2\n" + " xor %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,3f\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + ".subsection 2\n" + "3: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*m), "=&r" (oldbit) + :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +/* + * WARNING: non atomic version. + */ +static __inline__ int +__test_and_change_bit(unsigned long nr, volatile void * addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old ^ mask; + return (old & mask) != 0; +} + +static inline int +test_bit(int nr, const volatile void * addr) +{ + return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; +} + +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + * + * Do a binary search on the bits. Due to the nature of large + * constants on the alpha, it is worthwhile to split the search. + */ +static inline unsigned long ffz_b(unsigned long x) +{ + unsigned long sum, x1, x2, x4; + + x = ~x & -~x; /* set first 0 bit, clear others */ + x1 = x & 0xAA; + x2 = x & 0xCC; + x4 = x & 0xF0; + sum = x2 ? 2 : 0; + sum += (x4 != 0) * 4; + sum += (x1 != 0); + + return sum; +} + +static inline unsigned long ffz(unsigned long word) +{ +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) + /* Whee. EV67 can calculate it directly. */ + return __kernel_cttz(~word); +#else + unsigned long bits, qofs, bofs; + + bits = __kernel_cmpbge(word, ~0UL); + qofs = ffz_b(bits); + bits = __kernel_extbl(word, qofs); + bofs = ffz_b(bits); + + return qofs*8 + bofs; +#endif +} + +/* + * __ffs = Find First set bit in word. Undefined if no set bit exists. + */ +static inline unsigned long __ffs(unsigned long word) +{ +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) + /* Whee. EV67 can calculate it directly. */ + return __kernel_cttz(word); +#else + unsigned long bits, qofs, bofs; + + bits = __kernel_cmpbge(0, word); + qofs = ffz_b(bits); + bits = __kernel_extbl(word, qofs); + bofs = ffz_b(~bits); + + return qofs*8 + bofs; +#endif +} + +#ifdef __KERNEL__ + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above __ffs. + */ + +static inline int ffs(int word) +{ + int result = __ffs(word) + 1; + return word ? result : 0; +} + +/* + * fls: find last bit set. + */ +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) +static inline int fls64(unsigned long word) +{ + return 64 - __kernel_ctlz(word); +} +#else +extern const unsigned char __flsm1_tab[256]; + +static inline int fls64(unsigned long x) +{ + unsigned long t, a, r; + + t = __kernel_cmpbge (x, 0x0101010101010101UL); + a = __flsm1_tab[t]; + t = __kernel_extbl (x, a); + r = a*8 + __flsm1_tab[t] + (x != 0); + + return r; +} +#endif + +static inline unsigned long __fls(unsigned long x) +{ + return fls64(x) - 1; +} + +static inline int fls(int x) +{ + return fls64((unsigned int) x); +} + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ + +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) +/* Whee. EV67 can calculate it directly. */ +static inline unsigned long hweight64(unsigned long w) +{ + return __kernel_ctpop(w); +} + +static inline unsigned int hweight32(unsigned int w) +{ + return hweight64(w); +} + +static inline unsigned int hweight16(unsigned int w) +{ + return hweight64(w & 0xffff); +} + +static inline unsigned int hweight8(unsigned int w) +{ + return hweight64(w & 0xff); +} +#else +#include +#endif + +#endif /* __KERNEL__ */ + +#include + +#ifdef __KERNEL__ + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 140-bit bitmap where the first 100 bits are + * unlikely to be set. It's guaranteed that at least one of the 140 + * bits is set. + */ +static inline unsigned long +sched_find_first_bit(unsigned long b[3]) +{ + unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; + unsigned long ofs; + + ofs = (b1 ? 64 : 128); + b1 = (b1 ? b1 : b2); + ofs = (b0 ? 0 : ofs); + b0 = (b0 ? b0 : b1); + + return __ffs(b0) + ofs; +} + +#include + +#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) +#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) + +#include + +#endif /* __KERNEL__ */ + +#endif /* _ALPHA_BITOPS_H */ diff --git a/arch/alpha/include/asm/bug.h b/arch/alpha/include/asm/bug.h new file mode 100644 index 00000000000..695a5ee4b5d --- /dev/null +++ b/arch/alpha/include/asm/bug.h @@ -0,0 +1,28 @@ +#ifndef _ALPHA_BUG_H +#define _ALPHA_BUG_H + +#include + +#ifdef CONFIG_BUG +#include + +/* ??? Would be nice to use .gprel32 here, but we can't be sure that the + function loaded the GP, so this could fail in modules. */ +static inline void ATTRIB_NORET __BUG(const char *file, int line) +{ + __asm__ __volatile__( + "call_pal %0 # bugchk\n\t" + ".long %1\n\t.8byte %2" + : : "i" (PAL_bugchk), "i"(line), "i"(file)); + for ( ; ; ) + ; +} + +#define BUG() __BUG(__FILE__, __LINE__) + +#define HAVE_ARCH_BUG +#endif + +#include + +#endif diff --git a/arch/alpha/include/asm/bugs.h b/arch/alpha/include/asm/bugs.h new file mode 100644 index 00000000000..78030d1c7e7 --- /dev/null +++ b/arch/alpha/include/asm/bugs.h @@ -0,0 +1,20 @@ +/* + * include/asm-alpha/bugs.h + * + * Copyright (C) 1994 Linus Torvalds + */ + +/* + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Needs: + * void check_bugs(void); + */ + +/* + * I don't know of any alpha bugs yet.. Nice chip + */ + +static void check_bugs(void) +{ +} diff --git a/arch/alpha/include/asm/byteorder.h b/arch/alpha/include/asm/byteorder.h new file mode 100644 index 00000000000..58e958fc7f1 --- /dev/null +++ b/arch/alpha/include/asm/byteorder.h @@ -0,0 +1,47 @@ +#ifndef _ALPHA_BYTEORDER_H +#define _ALPHA_BYTEORDER_H + +#include +#include +#include + +#ifdef __GNUC__ + +static inline __attribute_const__ __u32 __arch__swab32(__u32 x) +{ + /* + * Unfortunately, we can't use the 6 instruction sequence + * on ev6 since the latency of the UNPKBW is 3, which is + * pretty hard to hide. Just in case a future implementation + * has a lower latency, here's the sequence (also by Mike Burrows) + * + * UNPKBW a0, v0 v0: 00AA00BB00CC00DD + * SLL v0, 24, a0 a0: BB00CC00DD000000 + * BIS v0, a0, a0 a0: BBAACCBBDDCC00DD + * EXTWL a0, 6, v0 v0: 000000000000BBAA + * ZAP a0, 0xf3, a0 a0: 00000000DDCC0000 + * ADDL a0, v0, v0 v0: ssssssssDDCCBBAA + */ + + __u64 t0, t1, t2, t3; + + t0 = __kernel_inslh(x, 7); /* t0 : 0000000000AABBCC */ + t1 = __kernel_inswl(x, 3); /* t1 : 000000CCDD000000 */ + t1 |= t0; /* t1 : 000000CCDDAABBCC */ + t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */ + t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */ + t3 = t2 & 0x00FF00FF; /* t3 : 0000000000CC00AA */ + t1 = t0 + t3; /* t1 : ssssssssDDCCBBAA */ + + return t1; +} + +#define __arch__swab32 __arch__swab32 + +#endif /* __GNUC__ */ + +#define __BYTEORDER_HAS_U64__ + +#include + +#endif /* _ALPHA_BYTEORDER_H */ diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h new file mode 100644 index 00000000000..f199e69a5d0 --- /dev/null +++ b/arch/alpha/include/asm/cache.h @@ -0,0 +1,23 @@ +/* + * include/asm-alpha/cache.h + */ +#ifndef __ARCH_ALPHA_CACHE_H +#define __ARCH_ALPHA_CACHE_H + + +/* Bytes per L1 (data) cache line. */ +#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) +# define L1_CACHE_BYTES 64 +# define L1_CACHE_SHIFT 6 +#else +/* Both EV4 and EV5 are write-through, read-allocate, + direct-mapped, physical. +*/ +# define L1_CACHE_BYTES 32 +# define L1_CACHE_SHIFT 5 +#endif + +#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +#endif diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h new file mode 100644 index 00000000000..b686cc7fc44 --- /dev/null +++ b/arch/alpha/include/asm/cacheflush.h @@ -0,0 +1,74 @@ +#ifndef _ALPHA_CACHEFLUSH_H +#define _ALPHA_CACHEFLUSH_H + +#include + +/* Caches aren't brain-dead on the Alpha. */ +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_dup_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_dcache_page(page) do { } while (0) +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +/* Note that the following two definitions are _highly_ dependent + on the contexts in which they are used in the kernel. I personally + think it is criminal how loosely defined these macros are. */ + +/* We need to flush the kernel's icache after loading modules. The + only other use of this macro is in load_aout_interp which is not + used on Alpha. + + Note that this definition should *not* be used for userspace + icache flushing. While functional, it is _way_ overkill. The + icache is tagged with ASNs and it suffices to allocate a new ASN + for the process. */ +#ifndef CONFIG_SMP +#define flush_icache_range(start, end) imb() +#else +#define flush_icache_range(start, end) smp_imb() +extern void smp_imb(void); +#endif + +/* We need to flush the userspace icache after setting breakpoints in + ptrace. + + Instead of indiscriminately using imb, take advantage of the fact + that icache entries are tagged with the ASN and load a new mm context. */ +/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */ + +#ifndef CONFIG_SMP +extern void __load_new_mm_context(struct mm_struct *); +static inline void +flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len) +{ + if (vma->vm_flags & VM_EXEC) { + struct mm_struct *mm = vma->vm_mm; + if (current->active_mm == mm) + __load_new_mm_context(mm); + else + mm->context[smp_processor_id()] = 0; + } +} +#else +extern void flush_icache_user_range(struct vm_area_struct *vma, + struct page *page, unsigned long addr, int len); +#endif + +/* This is used only in do_no_page and do_swap_page. */ +#define flush_icache_page(vma, page) \ + flush_icache_user_range((vma), (page), 0, 0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ +} while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* _ALPHA_CACHEFLUSH_H */ diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h new file mode 100644 index 00000000000..d3854bbf0a9 --- /dev/null +++ b/arch/alpha/include/asm/checksum.h @@ -0,0 +1,75 @@ +#ifndef _ALPHA_CHECKSUM_H +#define _ALPHA_CHECKSUM_H + +#include + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum); + +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, unsigned short proto, + __wsum sum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp); + +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); + + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +extern __sum16 ip_compute_csum(const void *buff, int len); + +/* + * Fold a partial checksum without adding pseudo headers + */ + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} + +#define _HAVE_ARCH_IPV6_CSUM +extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum); +#endif diff --git a/arch/alpha/include/asm/compiler.h b/arch/alpha/include/asm/compiler.h new file mode 100644 index 00000000000..da6bb199839 --- /dev/null +++ b/arch/alpha/include/asm/compiler.h @@ -0,0 +1,130 @@ +#ifndef __ALPHA_COMPILER_H +#define __ALPHA_COMPILER_H + +/* + * Herein are macros we use when describing various patterns we want to GCC. + * In all cases we can get better schedules out of the compiler if we hide + * as little as possible inside inline assembly. However, we want to be + * able to know what we'll get out before giving up inline assembly. Thus + * these tests and macros. + */ + +#if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3 +# define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift) +# define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift) +# define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift) +# define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift) +# define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift) +# define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift) +# define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b) +#else +# define __kernel_insbl(val, shift) \ + ({ unsigned long __kir; \ + __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ + __kir; }) +# define __kernel_inswl(val, shift) \ + ({ unsigned long __kir; \ + __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ + __kir; }) +# define __kernel_insql(val, shift) \ + ({ unsigned long __kir; \ + __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ + __kir; }) +# define __kernel_inslh(val, shift) \ + ({ unsigned long __kir; \ + __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ + __kir; }) +# define __kernel_extbl(val, shift) \ + ({ unsigned long __kir; \ + __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ + __kir; }) +# define __kernel_extwl(val, shift) \ + ({ unsigned long __kir; \ + __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ + __kir; }) +# define __kernel_cmpbge(a, b) \ + ({ unsigned long __kir; \ + __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ + __kir; }) +#endif + +#ifdef __alpha_cix__ +# if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3 +# define __kernel_cttz(x) __builtin_ctzl(x) +# define __kernel_ctlz(x) __builtin_clzl(x) +# define __kernel_ctpop(x) __builtin_popcountl(x) +# else +# define __kernel_cttz(x) \ + ({ unsigned long __kir; \ + __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \ + __kir; }) +# define __kernel_ctlz(x) \ + ({ unsigned long __kir; \ + __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ + __kir; }) +# define __kernel_ctpop(x) \ + ({ unsigned long __kir; \ + __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ + __kir; }) +# endif +#else +# define __kernel_cttz(x) \ + ({ unsigned long __kir; \ + __asm__(".arch ev67; cttz %1,%0" : "=r"(__kir) : "r"(x)); \ + __kir; }) +# define __kernel_ctlz(x) \ + ({ unsigned long __kir; \ + __asm__(".arch ev67; ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ + __kir; }) +# define __kernel_ctpop(x) \ + ({ unsigned long __kir; \ + __asm__(".arch ev67; ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ + __kir; }) +#endif + + +/* + * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX + * extension is enabled. Previous versions did not define anything + * we could test during compilation -- too bad, so sad. + */ + +#if defined(__alpha_bwx__) +#define __kernel_ldbu(mem) (mem) +#define __kernel_ldwu(mem) (mem) +#define __kernel_stb(val,mem) ((mem) = (val)) +#define __kernel_stw(val,mem) ((mem) = (val)) +#else +#define __kernel_ldbu(mem) \ + ({ unsigned char __kir; \ + __asm__(".arch ev56; \ + ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \ + __kir; }) +#define __kernel_ldwu(mem) \ + ({ unsigned short __kir; \ + __asm__(".arch ev56; \ + ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \ + __kir; }) +#define __kernel_stb(val,mem) \ + __asm__(".arch ev56; \ + stb %1,%0" : "=m"(mem) : "r"(val)) +#define __kernel_stw(val,mem) \ + __asm__(".arch ev56; \ + stw %1,%0" : "=m"(mem) : "r"(val)) +#endif + +#ifdef __KERNEL__ +/* Some idiots over in thought inline should imply + always_inline. This breaks stuff. We'll include this file whenever + we run into such problems. */ + +#include +#undef inline +#undef __inline__ +#undef __inline +#undef __always_inline +#define __always_inline inline __attribute__((always_inline)) + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_COMPILER_H */ diff --git a/arch/alpha/include/asm/console.h b/arch/alpha/include/asm/console.h new file mode 100644 index 00000000000..a3ce4e62249 --- /dev/null +++ b/arch/alpha/include/asm/console.h @@ -0,0 +1,75 @@ +#ifndef __AXP_CONSOLE_H +#define __AXP_CONSOLE_H + +/* + * Console callback routine numbers + */ +#define CCB_GETC 0x01 +#define CCB_PUTS 0x02 +#define CCB_RESET_TERM 0x03 +#define CCB_SET_TERM_INT 0x04 +#define CCB_SET_TERM_CTL 0x05 +#define CCB_PROCESS_KEYCODE 0x06 +#define CCB_OPEN_CONSOLE 0x07 +#define CCB_CLOSE_CONSOLE 0x08 + +#define CCB_OPEN 0x10 +#define CCB_CLOSE 0x11 +#define CCB_IOCTL 0x12 +#define CCB_READ 0x13 +#define CCB_WRITE 0x14 + +#define CCB_SET_ENV 0x20 +#define CCB_RESET_ENV 0x21 +#define CCB_GET_ENV 0x22 +#define CCB_SAVE_ENV 0x23 + +#define CCB_PSWITCH 0x30 +#define CCB_BIOS_EMUL 0x32 + +/* + * Environment variable numbers + */ +#define ENV_AUTO_ACTION 0x01 +#define ENV_BOOT_DEV 0x02 +#define ENV_BOOTDEF_DEV 0x03 +#define ENV_BOOTED_DEV 0x04 +#define ENV_BOOT_FILE 0x05 +#define ENV_BOOTED_FILE 0x06 +#define ENV_BOOT_OSFLAGS 0x07 +#define ENV_BOOTED_OSFLAGS 0x08 +#define ENV_BOOT_RESET 0x09 +#define ENV_DUMP_DEV 0x0A +#define ENV_ENABLE_AUDIT 0x0B +#define ENV_LICENSE 0x0C +#define ENV_CHAR_SET 0x0D +#define ENV_LANGUAGE 0x0E +#define ENV_TTY_DEV 0x0F + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +extern long callback_puts(long unit, const char *s, long length); +extern long callback_getc(long unit); +extern long callback_open_console(void); +extern long callback_close_console(void); +extern long callback_open(const char *device, long length); +extern long callback_close(long unit); +extern long callback_read(long channel, long count, const char *buf, long lbn); +extern long callback_getenv(long id, const char *buf, unsigned long buf_size); +extern long callback_setenv(long id, const char *buf, unsigned long buf_size); +extern long callback_save_env(void); + +extern int srm_fixup(unsigned long new_callback_addr, + unsigned long new_hwrpb_addr); +extern long srm_puts(const char *, long); +extern long srm_printk(const char *, ...) + __attribute__ ((format (printf, 1, 2))); + +struct crb_struct; +struct hwrpb_struct; +extern int callback_init_done; +extern void * callback_init(void *); +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* __AXP_CONSOLE_H */ diff --git a/arch/alpha/include/asm/core_apecs.h b/arch/alpha/include/asm/core_apecs.h new file mode 100644 index 00000000000..6785ff7e02b --- /dev/null +++ b/arch/alpha/include/asm/core_apecs.h @@ -0,0 +1,517 @@ +#ifndef __ALPHA_APECS__H__ +#define __ALPHA_APECS__H__ + +#include +#include + +/* + * APECS is the internal name for the 2107x chipset which provides + * memory controller and PCI access for the 21064 chip based systems. + * + * This file is based on: + * + * DECchip 21071-AA and DECchip 21072-AA Core Logic Chipsets + * Data Sheet + * + * EC-N0648-72 + * + * + * david.rusling@reo.mts.dec.com Initial Version. + * + */ + +/* + An AVANTI *might* be an XL, and an XL has only 27 bits of ISA address + that get passed through the PCI<->ISA bridge chip. So we've gotta use + both windows to max out the physical memory we can DMA to. Sigh... + + If we try a window at 0 for 1GB as a work-around, we run into conflicts + with ISA/PCI bus memory which can't be relocated, like VGA aperture and + BIOS ROMs. So we must put the windows high enough to avoid these areas. + + We put window 1 at BUS 64Mb for 64Mb, mapping physical 0 to 64Mb-1, + and window 2 at BUS 1Gb for 1Gb, mapping physical 0 to 1Gb-1. + Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually + be used for that range (via virt_to_bus()). + + Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb, + to keep virt_to_bus() from returning an address in the first window, for + a data area that goes beyond the 64Mb first DMA window. Sigh... + The fudge factor MUST match with MAX_DMA_ADDRESS, but + we can't just use that here, because of header file looping... :-( + + Window 1 will be used for all DMA from the ISA bus; yes, that does + limit what memory an ISA floppy or sound card or Ethernet can touch, but + it's also a known limitation on other platforms as well. We use the + same technique that is used on INTEL platforms with similar limitation: + set MAX_DMA_ADDRESS and clear some pages' DMAable flags during mem_init(). + We trust that any ISA bus device drivers will *always* ask for DMAable + memory explicitly via kmalloc()/get_free_pages() flags arguments. + + Note that most PCI bus devices' drivers do *not* explicitly ask for + DMAable memory; they count on being able to DMA to any memory they + get from kmalloc()/get_free_pages(). They will also use window 1 for + any physical memory accesses below 64Mb; the rest will be handled by + window 2, maxing out at 1Gb of memory. I trust this is enough... :-) + + We hope that the area before the first window is large enough so that + there will be no overlap at the top end (64Mb). We *must* locate the + PCI cards' memory just below window 1, so that there's still the + possibility of being able to access it via SPARSE space. This is + important for cards such as the Matrox Millennium, whose Xserver + wants to access memory-mapped registers in byte and short lengths. + + Note that the XL is treated differently from the AVANTI, even though + for most other things they are identical. It didn't seem reasonable to + make the AVANTI support pay for the limitations of the XL. It is true, + however, that an XL kernel will run on an AVANTI without problems. + + %%% All of this should be obviated by the ability to route + everything through the iommu. +*/ + +/* + * 21071-DA Control and Status registers. + * These are used for PCI memory access. + */ +#define APECS_IOC_DCSR (IDENT_ADDR + 0x1A0000000UL) +#define APECS_IOC_PEAR (IDENT_ADDR + 0x1A0000020UL) +#define APECS_IOC_SEAR (IDENT_ADDR + 0x1A0000040UL) +#define APECS_IOC_DR1 (IDENT_ADDR + 0x1A0000060UL) +#define APECS_IOC_DR2 (IDENT_ADDR + 0x1A0000080UL) +#define APECS_IOC_DR3 (IDENT_ADDR + 0x1A00000A0UL) + +#define APECS_IOC_TB1R (IDENT_ADDR + 0x1A00000C0UL) +#define APECS_IOC_TB2R (IDENT_ADDR + 0x1A00000E0UL) + +#define APECS_IOC_PB1R (IDENT_ADDR + 0x1A0000100UL) +#define APECS_IOC_PB2R (IDENT_ADDR + 0x1A0000120UL) + +#define APECS_IOC_PM1R (IDENT_ADDR + 0x1A0000140UL) +#define APECS_IOC_PM2R (IDENT_ADDR + 0x1A0000160UL) + +#define APECS_IOC_HAXR0 (IDENT_ADDR + 0x1A0000180UL) +#define APECS_IOC_HAXR1 (IDENT_ADDR + 0x1A00001A0UL) +#define APECS_IOC_HAXR2 (IDENT_ADDR + 0x1A00001C0UL) + +#define APECS_IOC_PMLT (IDENT_ADDR + 0x1A00001E0UL) + +#define APECS_IOC_TLBTAG0 (IDENT_ADDR + 0x1A0000200UL) +#define APECS_IOC_TLBTAG1 (IDENT_ADDR + 0x1A0000220UL) +#define APECS_IOC_TLBTAG2 (IDENT_ADDR + 0x1A0000240UL) +#define APECS_IOC_TLBTAG3 (IDENT_ADDR + 0x1A0000260UL) +#define APECS_IOC_TLBTAG4 (IDENT_ADDR + 0x1A0000280UL) +#define APECS_IOC_TLBTAG5 (IDENT_ADDR + 0x1A00002A0UL) +#define APECS_IOC_TLBTAG6 (IDENT_ADDR + 0x1A00002C0UL) +#define APECS_IOC_TLBTAG7 (IDENT_ADDR + 0x1A00002E0UL) + +#define APECS_IOC_TLBDATA0 (IDENT_ADDR + 0x1A0000300UL) +#define APECS_IOC_TLBDATA1 (IDENT_ADDR + 0x1A0000320UL) +#define APECS_IOC_TLBDATA2 (IDENT_ADDR + 0x1A0000340UL) +#define APECS_IOC_TLBDATA3 (IDENT_ADDR + 0x1A0000360UL) +#define APECS_IOC_TLBDATA4 (IDENT_ADDR + 0x1A0000380UL) +#define APECS_IOC_TLBDATA5 (IDENT_ADDR + 0x1A00003A0UL) +#define APECS_IOC_TLBDATA6 (IDENT_ADDR + 0x1A00003C0UL) +#define APECS_IOC_TLBDATA7 (IDENT_ADDR + 0x1A00003E0UL) + +#define APECS_IOC_TBIA (IDENT_ADDR + 0x1A0000400UL) + + +/* + * 21071-CA Control and Status registers. + * These are used to program memory timing, + * configure memory and initialise the B-Cache. + */ +#define APECS_MEM_GCR (IDENT_ADDR + 0x180000000UL) +#define APECS_MEM_EDSR (IDENT_ADDR + 0x180000040UL) +#define APECS_MEM_TAR (IDENT_ADDR + 0x180000060UL) +#define APECS_MEM_ELAR (IDENT_ADDR + 0x180000080UL) +#define APECS_MEM_EHAR (IDENT_ADDR + 0x1800000a0UL) +#define APECS_MEM_SFT_RST (IDENT_ADDR + 0x1800000c0UL) +#define APECS_MEM_LDxLAR (IDENT_ADDR + 0x1800000e0UL) +#define APECS_MEM_LDxHAR (IDENT_ADDR + 0x180000100UL) +#define APECS_MEM_GTR (IDENT_ADDR + 0x180000200UL) +#define APECS_MEM_RTR (IDENT_ADDR + 0x180000220UL) +#define APECS_MEM_VFPR (IDENT_ADDR + 0x180000240UL) +#define APECS_MEM_PDLDR (IDENT_ADDR + 0x180000260UL) +#define APECS_MEM_PDhDR (IDENT_ADDR + 0x180000280UL) + +/* Bank x Base Address Register */ +#define APECS_MEM_B0BAR (IDENT_ADDR + 0x180000800UL) +#define APECS_MEM_B1BAR (IDENT_ADDR + 0x180000820UL) +#define APECS_MEM_B2BAR (IDENT_ADDR + 0x180000840UL) +#define APECS_MEM_B3BAR (IDENT_ADDR + 0x180000860UL) +#define APECS_MEM_B4BAR (IDENT_ADDR + 0x180000880UL) +#define APECS_MEM_B5BAR (IDENT_ADDR + 0x1800008A0UL) +#define APECS_MEM_B6BAR (IDENT_ADDR + 0x1800008C0UL) +#define APECS_MEM_B7BAR (IDENT_ADDR + 0x1800008E0UL) +#define APECS_MEM_B8BAR (IDENT_ADDR + 0x180000900UL) + +/* Bank x Configuration Register */ +#define APECS_MEM_B0BCR (IDENT_ADDR + 0x180000A00UL) +#define APECS_MEM_B1BCR (IDENT_ADDR + 0x180000A20UL) +#define APECS_MEM_B2BCR (IDENT_ADDR + 0x180000A40UL) +#define APECS_MEM_B3BCR (IDENT_ADDR + 0x180000A60UL) +#define APECS_MEM_B4BCR (IDENT_ADDR + 0x180000A80UL) +#define APECS_MEM_B5BCR (IDENT_ADDR + 0x180000AA0UL) +#define APECS_MEM_B6BCR (IDENT_ADDR + 0x180000AC0UL) +#define APECS_MEM_B7BCR (IDENT_ADDR + 0x180000AE0UL) +#define APECS_MEM_B8BCR (IDENT_ADDR + 0x180000B00UL) + +/* Bank x Timing Register A */ +#define APECS_MEM_B0TRA (IDENT_ADDR + 0x180000C00UL) +#define APECS_MEM_B1TRA (IDENT_ADDR + 0x180000C20UL) +#define APECS_MEM_B2TRA (IDENT_ADDR + 0x180000C40UL) +#define APECS_MEM_B3TRA (IDENT_ADDR + 0x180000C60UL) +#define APECS_MEM_B4TRA (IDENT_ADDR + 0x180000C80UL) +#define APECS_MEM_B5TRA (IDENT_ADDR + 0x180000CA0UL) +#define APECS_MEM_B6TRA (IDENT_ADDR + 0x180000CC0UL) +#define APECS_MEM_B7TRA (IDENT_ADDR + 0x180000CE0UL) +#define APECS_MEM_B8TRA (IDENT_ADDR + 0x180000D00UL) + +/* Bank x Timing Register B */ +#define APECS_MEM_B0TRB (IDENT_ADDR + 0x180000E00UL) +#define APECS_MEM_B1TRB (IDENT_ADDR + 0x180000E20UL) +#define APECS_MEM_B2TRB (IDENT_ADDR + 0x180000E40UL) +#define APECS_MEM_B3TRB (IDENT_ADDR + 0x180000E60UL) +#define APECS_MEM_B4TRB (IDENT_ADDR + 0x180000E80UL) +#define APECS_MEM_B5TRB (IDENT_ADDR + 0x180000EA0UL) +#define APECS_MEM_B6TRB (IDENT_ADDR + 0x180000EC0UL) +#define APECS_MEM_B7TRB (IDENT_ADDR + 0x180000EE0UL) +#define APECS_MEM_B8TRB (IDENT_ADDR + 0x180000F00UL) + + +/* + * Memory spaces: + */ +#define APECS_IACK_SC (IDENT_ADDR + 0x1b0000000UL) +#define APECS_CONF (IDENT_ADDR + 0x1e0000000UL) +#define APECS_IO (IDENT_ADDR + 0x1c0000000UL) +#define APECS_SPARSE_MEM (IDENT_ADDR + 0x200000000UL) +#define APECS_DENSE_MEM (IDENT_ADDR + 0x300000000UL) + + +/* + * Bit definitions for I/O Controller status register 0: + */ +#define APECS_IOC_STAT0_CMD 0xf +#define APECS_IOC_STAT0_ERR (1<<4) +#define APECS_IOC_STAT0_LOST (1<<5) +#define APECS_IOC_STAT0_THIT (1<<6) +#define APECS_IOC_STAT0_TREF (1<<7) +#define APECS_IOC_STAT0_CODE_SHIFT 8 +#define APECS_IOC_STAT0_CODE_MASK 0x7 +#define APECS_IOC_STAT0_P_NBR_SHIFT 13 +#define APECS_IOC_STAT0_P_NBR_MASK 0x7ffff + +#define APECS_HAE_ADDRESS APECS_IOC_HAXR1 + + +/* + * Data structure for handling APECS machine checks: + */ + +struct el_apecs_mikasa_sysdata_mcheck +{ + unsigned long coma_gcr; + unsigned long coma_edsr; + unsigned long coma_ter; + unsigned long coma_elar; + unsigned long coma_ehar; + unsigned long coma_ldlr; + unsigned long coma_ldhr; + unsigned long coma_base0; + unsigned long coma_base1; + unsigned long coma_base2; + unsigned long coma_base3; + unsigned long coma_cnfg0; + unsigned long coma_cnfg1; + unsigned long coma_cnfg2; + unsigned long coma_cnfg3; + unsigned long epic_dcsr; + unsigned long epic_pear; + unsigned long epic_sear; + unsigned long epic_tbr1; + unsigned long epic_tbr2; + unsigned long epic_pbr1; + unsigned long epic_pbr2; + unsigned long epic_pmr1; + unsigned long epic_pmr2; + unsigned long epic_harx1; + unsigned long epic_harx2; + unsigned long epic_pmlt; + unsigned long epic_tag0; + unsigned long epic_tag1; + unsigned long epic_tag2; + unsigned long epic_tag3; + unsigned long epic_tag4; + unsigned long epic_tag5; + unsigned long epic_tag6; + unsigned long epic_tag7; + unsigned long epic_data0; + unsigned long epic_data1; + unsigned long epic_data2; + unsigned long epic_data3; + unsigned long epic_data4; + unsigned long epic_data5; + unsigned long epic_data6; + unsigned long epic_data7; + + unsigned long pceb_vid; + unsigned long pceb_did; + unsigned long pceb_revision; + unsigned long pceb_command; + unsigned long pceb_status; + unsigned long pceb_latency; + unsigned long pceb_control; + unsigned long pceb_arbcon; + unsigned long pceb_arbpri; + + unsigned long esc_id; + unsigned long esc_revision; + unsigned long esc_int0; + unsigned long esc_int1; + unsigned long esc_elcr0; + unsigned long esc_elcr1; + unsigned long esc_last_eisa; + unsigned long esc_nmi_stat; + + unsigned long pci_ir; + unsigned long pci_imr; + unsigned long svr_mgr; +}; + +/* This for the normal APECS machines. */ +struct el_apecs_sysdata_mcheck +{ + unsigned long coma_gcr; + unsigned long coma_edsr; + unsigned long coma_ter; + unsigned long coma_elar; + unsigned long coma_ehar; + unsigned long coma_ldlr; + unsigned long coma_ldhr; + unsigned long coma_base0; + unsigned long coma_base1; + unsigned long coma_base2; + unsigned long coma_cnfg0; + unsigned long coma_cnfg1; + unsigned long coma_cnfg2; + unsigned long epic_dcsr; + unsigned long epic_pear; + unsigned long epic_sear; + unsigned long epic_tbr1; + unsigned long epic_tbr2; + unsigned long epic_pbr1; + unsigned long epic_pbr2; + unsigned long epic_pmr1; + unsigned long epic_pmr2; + unsigned long epic_harx1; + unsigned long epic_harx2; + unsigned long epic_pmlt; + unsigned long epic_tag0; + unsigned long epic_tag1; + unsigned long epic_tag2; + unsigned long epic_tag3; + unsigned long epic_tag4; + unsigned long epic_tag5; + unsigned long epic_tag6; + unsigned long epic_tag7; + unsigned long epic_data0; + unsigned long epic_data1; + unsigned long epic_data2; + unsigned long epic_data3; + unsigned long epic_data4; + unsigned long epic_data5; + unsigned long epic_data6; + unsigned long epic_data7; +}; + +struct el_apecs_procdata +{ + unsigned long paltemp[32]; /* PAL TEMP REGS. */ + /* EV4-specific fields */ + unsigned long exc_addr; /* Address of excepting instruction. */ + unsigned long exc_sum; /* Summary of arithmetic traps. */ + unsigned long exc_mask; /* Exception mask (from exc_sum). */ + unsigned long iccsr; /* IBox hardware enables. */ + unsigned long pal_base; /* Base address for PALcode. */ + unsigned long hier; /* Hardware Interrupt Enable. */ + unsigned long hirr; /* Hardware Interrupt Request. */ + unsigned long csr; /* D-stream fault info. */ + unsigned long dc_stat; /* D-cache status (ECC/Parity Err). */ + unsigned long dc_addr; /* EV3 Phys Addr for ECC/DPERR. */ + unsigned long abox_ctl; /* ABox Control Register. */ + unsigned long biu_stat; /* BIU Status. */ + unsigned long biu_addr; /* BUI Address. */ + unsigned long biu_ctl; /* BIU Control. */ + unsigned long fill_syndrome;/* For correcting ECC errors. */ + unsigned long fill_addr; /* Cache block which was being read */ + unsigned long va; /* Effective VA of fault or miss. */ + unsigned long bc_tag; /* Backup Cache Tag Probe Results.*/ +}; + + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions: + * + * Unlike Jensen, the APECS machines have no concept of local + * I/O---everything goes over the PCI bus. + * + * There is plenty room for optimization here. In particular, + * the Alpha's insb/insw/extb/extw should be useful in moving + * data to/from the right byte-lanes. + */ + +#define vip volatile int __force * +#define vuip volatile unsigned int __force * +#define vulp volatile unsigned long __force * + +#define APECS_SET_HAE \ + do { \ + if (addr >= (1UL << 24)) { \ + unsigned long msb = addr & 0xf8000000; \ + addr -= msb; \ + set_hae(msb); \ + } \ + } while (0) + +__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= APECS_DENSE_MEM) { + addr -= APECS_DENSE_MEM; + APECS_SET_HAE; + base_and_type = APECS_SPARSE_MEM + 0x00; + } else { + addr -= APECS_IO; + base_and_type = APECS_IO + 0x00; + } + + result = *(vip) ((addr << 5) + base_and_type); + return __kernel_extbl(result, addr & 3); +} + +__EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= APECS_DENSE_MEM) { + addr -= APECS_DENSE_MEM; + APECS_SET_HAE; + base_and_type = APECS_SPARSE_MEM + 0x00; + } else { + addr -= APECS_IO; + base_and_type = APECS_IO + 0x00; + } + + w = __kernel_insbl(b, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; +} + +__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= APECS_DENSE_MEM) { + addr -= APECS_DENSE_MEM; + APECS_SET_HAE; + base_and_type = APECS_SPARSE_MEM + 0x08; + } else { + addr -= APECS_IO; + base_and_type = APECS_IO + 0x08; + } + + result = *(vip) ((addr << 5) + base_and_type); + return __kernel_extwl(result, addr & 3); +} + +__EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= APECS_DENSE_MEM) { + addr -= APECS_DENSE_MEM; + APECS_SET_HAE; + base_and_type = APECS_SPARSE_MEM + 0x08; + } else { + addr -= APECS_IO; + base_and_type = APECS_IO + 0x08; + } + + w = __kernel_inswl(b, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; +} + +__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (addr < APECS_DENSE_MEM) + addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; + return *(vuip)addr; +} + +__EXTERN_INLINE void apecs_iowrite32(u32 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (addr < APECS_DENSE_MEM) + addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; + *(vuip)addr = b; +} + +__EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr) +{ + return (void __iomem *)(addr + APECS_IO); +} + +__EXTERN_INLINE void __iomem *apecs_ioremap(unsigned long addr, + unsigned long size) +{ + return (void __iomem *)(addr + APECS_DENSE_MEM); +} + +__EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr) +{ + return addr >= IDENT_ADDR + 0x180000000UL; +} + +__EXTERN_INLINE int apecs_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr >= APECS_DENSE_MEM; +} + +#undef APECS_SET_HAE + +#undef vip +#undef vuip +#undef vulp + +#undef __IO_PREFIX +#define __IO_PREFIX apecs +#define apecs_trivial_io_bw 0 +#define apecs_trivial_io_lq 0 +#define apecs_trivial_rw_bw 2 +#define apecs_trivial_rw_lq 1 +#define apecs_trivial_iounmap 1 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_APECS__H__ */ diff --git a/arch/alpha/include/asm/core_cia.h b/arch/alpha/include/asm/core_cia.h new file mode 100644 index 00000000000..9e0516c0ca2 --- /dev/null +++ b/arch/alpha/include/asm/core_cia.h @@ -0,0 +1,500 @@ +#ifndef __ALPHA_CIA__H__ +#define __ALPHA_CIA__H__ + +/* Define to experiment with fitting everything into one 512MB HAE window. */ +#define CIA_ONE_HAE_WINDOW 1 + +#include +#include + +/* + * CIA is the internal name for the 21171 chipset which provides + * memory controller and PCI access for the 21164 chip based systems. + * Also supported here is the 21172 (CIA-2) and 21174 (PYXIS). + * + * The lineage is a bit confused, since the 21174 was reportedly started + * from the 21171 Pass 1 mask, and so is missing bug fixes that appear + * in 21171 Pass 2 and 21172, but it also contains additional features. + * + * This file is based on: + * + * DECchip 21171 Core Logic Chipset + * Technical Reference Manual + * + * EC-QE18B-TE + * + * david.rusling@reo.mts.dec.com Initial Version. + * + */ + +/* + * CIA ADDRESS BIT DEFINITIONS + * + * 3333 3333 3322 2222 2222 1111 1111 11 + * 9876 5432 1098 7654 3210 9876 5432 1098 7654 3210 + * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- + * 1 000 + * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- + * | |\| + * | Byte Enable --+ | + * | Transfer Length --+ + * +-- IO space, not cached + * + * Byte Transfer + * Enable Length Transfer Byte Address + * adr<6:5> adr<4:3> Length Enable Adder + * --------------------------------------------- + * 00 00 Byte 1110 0x000 + * 01 00 Byte 1101 0x020 + * 10 00 Byte 1011 0x040 + * 11 00 Byte 0111 0x060 + * + * 00 01 Word 1100 0x008 + * 01 01 Word 1001 0x028 <= Not supported in this code. + * 10 01 Word 0011 0x048 + * + * 00 10 Tribyte 1000 0x010 + * 01 10 Tribyte 0001 0x030 + * + * 10 11 Longword 0000 0x058 + * + * Note that byte enables are asserted low. + * + */ + +#define CIA_MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */ +#define CIA_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */ +#define CIA_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */ + +/* + * 21171-CA Control and Status Registers + */ +#define CIA_IOC_CIA_REV (IDENT_ADDR + 0x8740000080UL) +# define CIA_REV_MASK 0xff +#define CIA_IOC_PCI_LAT (IDENT_ADDR + 0x87400000C0UL) +#define CIA_IOC_CIA_CTRL (IDENT_ADDR + 0x8740000100UL) +# define CIA_CTRL_PCI_EN (1 << 0) +# define CIA_CTRL_PCI_LOCK_EN (1 << 1) +# define CIA_CTRL_PCI_LOOP_EN (1 << 2) +# define CIA_CTRL_FST_BB_EN (1 << 3) +# define CIA_CTRL_PCI_MST_EN (1 << 4) +# define CIA_CTRL_PCI_MEM_EN (1 << 5) +# define CIA_CTRL_PCI_REQ64_EN (1 << 6) +# define CIA_CTRL_PCI_ACK64_EN (1 << 7) +# define CIA_CTRL_ADDR_PE_EN (1 << 8) +# define CIA_CTRL_PERR_EN (1 << 9) +# define CIA_CTRL_FILL_ERR_EN (1 << 10) +# define CIA_CTRL_MCHK_ERR_EN (1 << 11) +# define CIA_CTRL_ECC_CHK_EN (1 << 12) +# define CIA_CTRL_ASSERT_IDLE_BC (1 << 13) +# define CIA_CTRL_COM_IDLE_BC (1 << 14) +# define CIA_CTRL_CSR_IOA_BYPASS (1 << 15) +# define CIA_CTRL_IO_FLUSHREQ_EN (1 << 16) +# define CIA_CTRL_CPU_FLUSHREQ_EN (1 << 17) +# define CIA_CTRL_ARB_CPU_EN (1 << 18) +# define CIA_CTRL_EN_ARB_LINK (1 << 19) +# define CIA_CTRL_RD_TYPE_SHIFT 20 +# define CIA_CTRL_RL_TYPE_SHIFT 24 +# define CIA_CTRL_RM_TYPE_SHIFT 28 +# define CIA_CTRL_EN_DMA_RD_PERF (1 << 31) +#define CIA_IOC_CIA_CNFG (IDENT_ADDR + 0x8740000140UL) +# define CIA_CNFG_IOA_BWEN (1 << 0) +# define CIA_CNFG_PCI_MWEN (1 << 4) +# define CIA_CNFG_PCI_DWEN (1 << 5) +# define CIA_CNFG_PCI_WLEN (1 << 8) +#define CIA_IOC_FLASH_CTRL (IDENT_ADDR + 0x8740000200UL) +#define CIA_IOC_HAE_MEM (IDENT_ADDR + 0x8740000400UL) +#define CIA_IOC_HAE_IO (IDENT_ADDR + 0x8740000440UL) +#define CIA_IOC_CFG (IDENT_ADDR + 0x8740000480UL) +#define CIA_IOC_CACK_EN (IDENT_ADDR + 0x8740000600UL) +# define CIA_CACK_EN_LOCK_EN (1 << 0) +# define CIA_CACK_EN_MB_EN (1 << 1) +# define CIA_CACK_EN_SET_DIRTY_EN (1 << 2) +# define CIA_CACK_EN_BC_VICTIM_EN (1 << 3) + + +/* + * 21171-CA Diagnostic Registers + */ +#define CIA_IOC_CIA_DIAG (IDENT_ADDR + 0x8740002000UL) +#define CIA_IOC_DIAG_CHECK (IDENT_ADDR + 0x8740003000UL) + +/* + * 21171-CA Performance Monitor registers + */ +#define CIA_IOC_PERF_MONITOR (IDENT_ADDR + 0x8740004000UL) +#define CIA_IOC_PERF_CONTROL (IDENT_ADDR + 0x8740004040UL) + +/* + * 21171-CA Error registers + */ +#define CIA_IOC_CPU_ERR0 (IDENT_ADDR + 0x8740008000UL) +#define CIA_IOC_CPU_ERR1 (IDENT_ADDR + 0x8740008040UL) +#define CIA_IOC_CIA_ERR (IDENT_ADDR + 0x8740008200UL) +# define CIA_ERR_COR_ERR (1 << 0) +# define CIA_ERR_UN_COR_ERR (1 << 1) +# define CIA_ERR_CPU_PE (1 << 2) +# define CIA_ERR_MEM_NEM (1 << 3) +# define CIA_ERR_PCI_SERR (1 << 4) +# define CIA_ERR_PERR (1 << 5) +# define CIA_ERR_PCI_ADDR_PE (1 << 6) +# define CIA_ERR_RCVD_MAS_ABT (1 << 7) +# define CIA_ERR_RCVD_TAR_ABT (1 << 8) +# define CIA_ERR_PA_PTE_INV (1 << 9) +# define CIA_ERR_FROM_WRT_ERR (1 << 10) +# define CIA_ERR_IOA_TIMEOUT (1 << 11) +# define CIA_ERR_LOST_CORR_ERR (1 << 16) +# define CIA_ERR_LOST_UN_CORR_ERR (1 << 17) +# define CIA_ERR_LOST_CPU_PE (1 << 18) +# define CIA_ERR_LOST_MEM_NEM (1 << 19) +# define CIA_ERR_LOST_PERR (1 << 21) +# define CIA_ERR_LOST_PCI_ADDR_PE (1 << 22) +# define CIA_ERR_LOST_RCVD_MAS_ABT (1 << 23) +# define CIA_ERR_LOST_RCVD_TAR_ABT (1 << 24) +# define CIA_ERR_LOST_PA_PTE_INV (1 << 25) +# define CIA_ERR_LOST_FROM_WRT_ERR (1 << 26) +# define CIA_ERR_LOST_IOA_TIMEOUT (1 << 27) +# define CIA_ERR_VALID (1 << 31) +#define CIA_IOC_CIA_STAT (IDENT_ADDR + 0x8740008240UL) +#define CIA_IOC_ERR_MASK (IDENT_ADDR + 0x8740008280UL) +#define CIA_IOC_CIA_SYN (IDENT_ADDR + 0x8740008300UL) +#define CIA_IOC_MEM_ERR0 (IDENT_ADDR + 0x8740008400UL) +#define CIA_IOC_MEM_ERR1 (IDENT_ADDR + 0x8740008440UL) +#define CIA_IOC_PCI_ERR0 (IDENT_ADDR + 0x8740008800UL) +#define CIA_IOC_PCI_ERR1 (IDENT_ADDR + 0x8740008840UL) +#define CIA_IOC_PCI_ERR3 (IDENT_ADDR + 0x8740008880UL) + +/* + * 21171-CA System configuration registers + */ +#define CIA_IOC_MCR (IDENT_ADDR + 0x8750000000UL) +#define CIA_IOC_MBA0 (IDENT_ADDR + 0x8750000600UL) +#define CIA_IOC_MBA2 (IDENT_ADDR + 0x8750000680UL) +#define CIA_IOC_MBA4 (IDENT_ADDR + 0x8750000700UL) +#define CIA_IOC_MBA6 (IDENT_ADDR + 0x8750000780UL) +#define CIA_IOC_MBA8 (IDENT_ADDR + 0x8750000800UL) +#define CIA_IOC_MBAA (IDENT_ADDR + 0x8750000880UL) +#define CIA_IOC_MBAC (IDENT_ADDR + 0x8750000900UL) +#define CIA_IOC_MBAE (IDENT_ADDR + 0x8750000980UL) +#define CIA_IOC_TMG0 (IDENT_ADDR + 0x8750000B00UL) +#define CIA_IOC_TMG1 (IDENT_ADDR + 0x8750000B40UL) +#define CIA_IOC_TMG2 (IDENT_ADDR + 0x8750000B80UL) + +/* + * 2117A-CA PCI Address and Scatter-Gather Registers. + */ +#define CIA_IOC_PCI_TBIA (IDENT_ADDR + 0x8760000100UL) + +#define CIA_IOC_PCI_W0_BASE (IDENT_ADDR + 0x8760000400UL) +#define CIA_IOC_PCI_W0_MASK (IDENT_ADDR + 0x8760000440UL) +#define CIA_IOC_PCI_T0_BASE (IDENT_ADDR + 0x8760000480UL) + +#define CIA_IOC_PCI_W1_BASE (IDENT_ADDR + 0x8760000500UL) +#define CIA_IOC_PCI_W1_MASK (IDENT_ADDR + 0x8760000540UL) +#define CIA_IOC_PCI_T1_BASE (IDENT_ADDR + 0x8760000580UL) + +#define CIA_IOC_PCI_W2_BASE (IDENT_ADDR + 0x8760000600UL) +#define CIA_IOC_PCI_W2_MASK (IDENT_ADDR + 0x8760000640UL) +#define CIA_IOC_PCI_T2_BASE (IDENT_ADDR + 0x8760000680UL) + +#define CIA_IOC_PCI_W3_BASE (IDENT_ADDR + 0x8760000700UL) +#define CIA_IOC_PCI_W3_MASK (IDENT_ADDR + 0x8760000740UL) +#define CIA_IOC_PCI_T3_BASE (IDENT_ADDR + 0x8760000780UL) + +#define CIA_IOC_PCI_Wn_BASE(N) (IDENT_ADDR + 0x8760000400UL + (N)*0x100) +#define CIA_IOC_PCI_Wn_MASK(N) (IDENT_ADDR + 0x8760000440UL + (N)*0x100) +#define CIA_IOC_PCI_Tn_BASE(N) (IDENT_ADDR + 0x8760000480UL + (N)*0x100) + +#define CIA_IOC_PCI_W_DAC (IDENT_ADDR + 0x87600007C0UL) + +/* + * 2117A-CA Address Translation Registers. + */ + +/* 8 tag registers, the first 4 of which are lockable. */ +#define CIA_IOC_TB_TAGn(n) \ + (IDENT_ADDR + 0x8760000800UL + (n)*0x40) + +/* 4 page registers per tag register. */ +#define CIA_IOC_TBn_PAGEm(n,m) \ + (IDENT_ADDR + 0x8760001000UL + (n)*0x100 + (m)*0x40) + +/* + * Memory spaces: + */ +#define CIA_IACK_SC (IDENT_ADDR + 0x8720000000UL) +#define CIA_CONF (IDENT_ADDR + 0x8700000000UL) +#define CIA_IO (IDENT_ADDR + 0x8580000000UL) +#define CIA_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL) +#define CIA_SPARSE_MEM_R2 (IDENT_ADDR + 0x8400000000UL) +#define CIA_SPARSE_MEM_R3 (IDENT_ADDR + 0x8500000000UL) +#define CIA_DENSE_MEM (IDENT_ADDR + 0x8600000000UL) +#define CIA_BW_MEM (IDENT_ADDR + 0x8800000000UL) +#define CIA_BW_IO (IDENT_ADDR + 0x8900000000UL) +#define CIA_BW_CFG_0 (IDENT_ADDR + 0x8a00000000UL) +#define CIA_BW_CFG_1 (IDENT_ADDR + 0x8b00000000UL) + +/* + * ALCOR's GRU ASIC registers + */ +#define GRU_INT_REQ (IDENT_ADDR + 0x8780000000UL) +#define GRU_INT_MASK (IDENT_ADDR + 0x8780000040UL) +#define GRU_INT_EDGE (IDENT_ADDR + 0x8780000080UL) +#define GRU_INT_HILO (IDENT_ADDR + 0x87800000C0UL) +#define GRU_INT_CLEAR (IDENT_ADDR + 0x8780000100UL) + +#define GRU_CACHE_CNFG (IDENT_ADDR + 0x8780000200UL) +#define GRU_SCR (IDENT_ADDR + 0x8780000300UL) +#define GRU_LED (IDENT_ADDR + 0x8780000800UL) +#define GRU_RESET (IDENT_ADDR + 0x8780000900UL) + +#define ALCOR_GRU_INT_REQ_BITS 0x800fffffUL +#define XLT_GRU_INT_REQ_BITS 0x80003fffUL +#define GRU_INT_REQ_BITS (alpha_mv.sys.cia.gru_int_req_bits+0) + +/* + * PYXIS interrupt control registers + */ +#define PYXIS_INT_REQ (IDENT_ADDR + 0x87A0000000UL) +#define PYXIS_INT_MASK (IDENT_ADDR + 0x87A0000040UL) +#define PYXIS_INT_HILO (IDENT_ADDR + 0x87A00000C0UL) +#define PYXIS_INT_ROUTE (IDENT_ADDR + 0x87A0000140UL) +#define PYXIS_GPO (IDENT_ADDR + 0x87A0000180UL) +#define PYXIS_INT_CNFG (IDENT_ADDR + 0x87A00001C0UL) +#define PYXIS_RT_COUNT (IDENT_ADDR + 0x87A0000200UL) +#define PYXIS_INT_TIME (IDENT_ADDR + 0x87A0000240UL) +#define PYXIS_IIC_CTRL (IDENT_ADDR + 0x87A00002C0UL) +#define PYXIS_RESET (IDENT_ADDR + 0x8780000900UL) + +/* Offset between ram physical addresses and pci64 DAC bus addresses. */ +#define PYXIS_DAC_OFFSET (1UL << 40) + +/* + * Data structure for handling CIA machine checks. + */ + +/* System-specific info. */ +struct el_CIA_sysdata_mcheck { + unsigned long cpu_err0; + unsigned long cpu_err1; + unsigned long cia_err; + unsigned long cia_stat; + unsigned long err_mask; + unsigned long cia_syn; + unsigned long mem_err0; + unsigned long mem_err1; + unsigned long pci_err0; + unsigned long pci_err1; + unsigned long pci_err2; +}; + + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +/* Do not touch, this should *NOT* be static inline */ +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions: + * + * CIA (the 2117x PCI/memory support chipset for the EV5 (21164) + * series of processors uses a sparse address mapping scheme to + * get at PCI memory and I/O. + */ + +/* + * Memory functions. 64-bit and 32-bit accesses are done through + * dense memory space, everything else through sparse space. + * + * For reading and writing 8 and 16 bit quantities we need to + * go through one of the three sparse address mapping regions + * and use the HAE_MEM CSR to provide some bits of the address. + * The following few routines use only sparse address region 1 + * which gives 1Gbyte of accessible space which relates exactly + * to the amount of PCI memory mapping *into* system address space. + * See p 6-17 of the specification but it looks something like this: + * + * 21164 Address: + * + * 3 2 1 + * 9876543210987654321098765432109876543210 + * 1ZZZZ0.PCI.QW.Address............BBLL + * + * ZZ = SBZ + * BB = Byte offset + * LL = Transfer length + * + * PCI Address: + * + * 3 2 1 + * 10987654321098765432109876543210 + * HHH....PCI.QW.Address........ 00 + * + * HHH = 31:29 HAE_MEM CSR + * + */ + +#define vip volatile int __force * +#define vuip volatile unsigned int __force * +#define vulp volatile unsigned long __force * + +__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= CIA_DENSE_MEM) + base_and_type = CIA_SPARSE_MEM + 0x00; + else + base_and_type = CIA_IO + 0x00; + + /* We can use CIA_MEM_R1_MASK for io ports too, since it is large + enough to cover all io ports, and smaller than CIA_IO. */ + addr &= CIA_MEM_R1_MASK; + result = *(vip) ((addr << 5) + base_and_type); + return __kernel_extbl(result, addr & 3); +} + +__EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= CIA_DENSE_MEM) + base_and_type = CIA_SPARSE_MEM + 0x00; + else + base_and_type = CIA_IO + 0x00; + + addr &= CIA_MEM_R1_MASK; + w = __kernel_insbl(b, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; +} + +__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= CIA_DENSE_MEM) + base_and_type = CIA_SPARSE_MEM + 0x08; + else + base_and_type = CIA_IO + 0x08; + + addr &= CIA_MEM_R1_MASK; + result = *(vip) ((addr << 5) + base_and_type); + return __kernel_extwl(result, addr & 3); +} + +__EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= CIA_DENSE_MEM) + base_and_type = CIA_SPARSE_MEM + 0x08; + else + base_and_type = CIA_IO + 0x08; + + addr &= CIA_MEM_R1_MASK; + w = __kernel_inswl(b, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; +} + +__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (addr < CIA_DENSE_MEM) + addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; + return *(vuip)addr; +} + +__EXTERN_INLINE void cia_iowrite32(u32 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (addr < CIA_DENSE_MEM) + addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; + *(vuip)addr = b; +} + +__EXTERN_INLINE void __iomem *cia_ioportmap(unsigned long addr) +{ + return (void __iomem *)(addr + CIA_IO); +} + +__EXTERN_INLINE void __iomem *cia_ioremap(unsigned long addr, + unsigned long size) +{ + return (void __iomem *)(addr + CIA_DENSE_MEM); +} + +__EXTERN_INLINE int cia_is_ioaddr(unsigned long addr) +{ + return addr >= IDENT_ADDR + 0x8000000000UL; +} + +__EXTERN_INLINE int cia_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr >= CIA_DENSE_MEM; +} + +__EXTERN_INLINE void __iomem *cia_bwx_ioportmap(unsigned long addr) +{ + return (void __iomem *)(addr + CIA_BW_IO); +} + +__EXTERN_INLINE void __iomem *cia_bwx_ioremap(unsigned long addr, + unsigned long size) +{ + return (void __iomem *)(addr + CIA_BW_MEM); +} + +__EXTERN_INLINE int cia_bwx_is_ioaddr(unsigned long addr) +{ + return addr >= IDENT_ADDR + 0x8000000000UL; +} + +__EXTERN_INLINE int cia_bwx_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr < CIA_BW_IO; +} + +#undef vip +#undef vuip +#undef vulp + +#undef __IO_PREFIX +#define __IO_PREFIX cia +#define cia_trivial_rw_bw 2 +#define cia_trivial_rw_lq 1 +#define cia_trivial_io_bw 0 +#define cia_trivial_io_lq 0 +#define cia_trivial_iounmap 1 +#include + +#undef __IO_PREFIX +#define __IO_PREFIX cia_bwx +#define cia_bwx_trivial_rw_bw 1 +#define cia_bwx_trivial_rw_lq 1 +#define cia_bwx_trivial_io_bw 1 +#define cia_bwx_trivial_io_lq 1 +#define cia_bwx_trivial_iounmap 1 +#include + +#undef __IO_PREFIX +#ifdef CONFIG_ALPHA_PYXIS +#define __IO_PREFIX cia_bwx +#else +#define __IO_PREFIX cia +#endif + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_CIA__H__ */ diff --git a/arch/alpha/include/asm/core_irongate.h b/arch/alpha/include/asm/core_irongate.h new file mode 100644 index 00000000000..24b2db54150 --- /dev/null +++ b/arch/alpha/include/asm/core_irongate.h @@ -0,0 +1,232 @@ +#ifndef __ALPHA_IRONGATE__H__ +#define __ALPHA_IRONGATE__H__ + +#include +#include + +/* + * IRONGATE is the internal name for the AMD-751 K7 core logic chipset + * which provides memory controller and PCI access for NAUTILUS-based + * EV6 (21264) systems. + * + * This file is based on: + * + * IronGate management library, (c) 1999 Alpha Processor, Inc. + * Copyright (C) 1999 Alpha Processor, Inc., + * (David Daniel, Stig Telfer, Soohoon Lee) + */ + +/* + * The 21264 supports, and internally recognizes, a 44-bit physical + * address space that is divided equally between memory address space + * and I/O address space. Memory address space resides in the lower + * half of the physical address space (PA[43]=0) and I/O address space + * resides in the upper half of the physical address space (PA[43]=1). + */ + +/* + * Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access + * through the routines given is 32-bit. + * + * The first 0x40 bytes are standard as per the PCI spec. + */ + +typedef volatile __u32 igcsr32; + +typedef struct { + igcsr32 dev_vendor; /* 0x00 - device ID, vendor ID */ + igcsr32 stat_cmd; /* 0x04 - status, command */ + igcsr32 class; /* 0x08 - class code, rev ID */ + igcsr32 latency; /* 0x0C - header type, PCI latency */ + igcsr32 bar0; /* 0x10 - BAR0 - AGP */ + igcsr32 bar1; /* 0x14 - BAR1 - GART */ + igcsr32 bar2; /* 0x18 - Power Management reg block */ + + igcsr32 rsrvd0[6]; /* 0x1C-0x33 reserved */ + + igcsr32 capptr; /* 0x34 - Capabilities pointer */ + + igcsr32 rsrvd1[2]; /* 0x38-0x3F reserved */ + + igcsr32 bacsr10; /* 0x40 - base address chip selects */ + igcsr32 bacsr32; /* 0x44 - base address chip selects */ + igcsr32 bacsr54_eccms761; /* 0x48 - 751: base addr. chip selects + 761: ECC, mode/status */ + + igcsr32 rsrvd2[1]; /* 0x4C-0x4F reserved */ + + igcsr32 drammap; /* 0x50 - address mapping control */ + igcsr32 dramtm; /* 0x54 - timing, driver strength */ + igcsr32 dramms; /* 0x58 - DRAM mode/status */ + + igcsr32 rsrvd3[1]; /* 0x5C-0x5F reserved */ + + igcsr32 biu0; /* 0x60 - bus interface unit */ + igcsr32 biusip; /* 0x64 - Serial initialisation pkt */ + + igcsr32 rsrvd4[2]; /* 0x68-0x6F reserved */ + + igcsr32 mro; /* 0x70 - memory request optimiser */ + + igcsr32 rsrvd5[3]; /* 0x74-0x7F reserved */ + + igcsr32 whami; /* 0x80 - who am I */ + igcsr32 pciarb; /* 0x84 - PCI arbitration control */ + igcsr32 pcicfg; /* 0x88 - PCI config status */ + + igcsr32 rsrvd6[4]; /* 0x8C-0x9B reserved */ + + igcsr32 pci_mem; /* 0x9C - PCI top of memory, + 761 only */ + + /* AGP (bus 1) control registers */ + igcsr32 agpcap; /* 0xA0 - AGP Capability Identifier */ + igcsr32 agpstat; /* 0xA4 - AGP status register */ + igcsr32 agpcmd; /* 0xA8 - AGP control register */ + igcsr32 agpva; /* 0xAC - AGP Virtual Address Space */ + igcsr32 agpmode; /* 0xB0 - AGP/GART mode control */ +} Irongate0; + + +typedef struct { + + igcsr32 dev_vendor; /* 0x00 - Device and Vendor IDs */ + igcsr32 stat_cmd; /* 0x04 - Status and Command regs */ + igcsr32 class; /* 0x08 - subclass, baseclass etc */ + igcsr32 htype; /* 0x0C - header type (at 0x0E) */ + igcsr32 rsrvd0[2]; /* 0x10-0x17 reserved */ + igcsr32 busnos; /* 0x18 - Primary, secondary bus nos */ + igcsr32 io_baselim_regs; /* 0x1C - IO base, IO lim, AGP status */ + igcsr32 mem_baselim; /* 0x20 - memory base, memory lim */ + igcsr32 pfmem_baselim; /* 0x24 - prefetchable base, lim */ + igcsr32 rsrvd1[2]; /* 0x28-0x2F reserved */ + igcsr32 io_baselim; /* 0x30 - IO base, IO limit */ + igcsr32 rsrvd2[2]; /* 0x34-0x3B - reserved */ + igcsr32 interrupt; /* 0x3C - interrupt, PCI bridge ctrl */ + +} Irongate1; + +extern igcsr32 *IronECC; + +/* + * Memory spaces: + */ + +/* Irongate is consistent with a subset of the Tsunami memory map */ +#ifdef USE_48_BIT_KSEG +#define IRONGATE_BIAS 0x80000000000UL +#else +#define IRONGATE_BIAS 0x10000000000UL +#endif + + +#define IRONGATE_MEM (IDENT_ADDR | IRONGATE_BIAS | 0x000000000UL) +#define IRONGATE_IACK_SC (IDENT_ADDR | IRONGATE_BIAS | 0x1F8000000UL) +#define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL) +#define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL) + +/* + * PCI Configuration space accesses are formed like so: + * + * 0x1FE << 24 | : 2 2 2 2 1 1 1 1 : 1 1 1 1 1 1 0 0 : 0 0 0 0 0 0 0 0 : + * : 3 2 1 0 9 8 7 6 : 5 4 3 2 1 0 9 8 : 7 6 5 4 3 2 1 0 : + * ---bus numer--- -device-- -fun- ---register---- + */ + +#define IGCSR(dev,fun,reg) ( IRONGATE_CONF | \ + ((dev)<<11) | \ + ((fun)<<8) | \ + (reg) ) + +#define IRONGATE0 ((Irongate0 *) IGCSR(0, 0, 0)) +#define IRONGATE1 ((Irongate1 *) IGCSR(1, 0, 0)) + +/* + * Data structure for handling IRONGATE machine checks: + * This is the standard OSF logout frame + */ + +#define SCB_Q_SYSERR 0x620 /* OSF definitions */ +#define SCB_Q_PROCERR 0x630 +#define SCB_Q_SYSMCHK 0x660 +#define SCB_Q_PROCMCHK 0x670 + +struct el_IRONGATE_sysdata_mcheck { + __u32 FrameSize; /* Bytes, including this field */ + __u32 FrameFlags; /* <31> = Retry, <30> = Second Error */ + __u32 CpuOffset; /* Offset to CPU-specific into */ + __u32 SystemOffset; /* Offset to system-specific info */ + __u32 MCHK_Code; + __u32 MCHK_Frame_Rev; + __u64 I_STAT; + __u64 DC_STAT; + __u64 C_ADDR; + __u64 DC1_SYNDROME; + __u64 DC0_SYNDROME; + __u64 C_STAT; + __u64 C_STS; + __u64 RESERVED0; + __u64 EXC_ADDR; + __u64 IER_CM; + __u64 ISUM; + __u64 MM_STAT; + __u64 PAL_BASE; + __u64 I_CTL; + __u64 PCTX; +}; + + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions: + * + * IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and + * K7 can only use linear accesses to get at PCI memory and I/O spaces. + */ + +/* + * Memory functions. All accesses are done through linear space. + */ + +__EXTERN_INLINE void __iomem *irongate_ioportmap(unsigned long addr) +{ + return (void __iomem *)(addr + IRONGATE_IO); +} + +extern void __iomem *irongate_ioremap(unsigned long addr, unsigned long size); +extern void irongate_iounmap(volatile void __iomem *addr); + +__EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr) +{ + return addr >= IRONGATE_MEM; +} + +__EXTERN_INLINE int irongate_is_mmio(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long)xaddr; + return addr < IRONGATE_IO || addr >= IRONGATE_CONF; +} + +#undef __IO_PREFIX +#define __IO_PREFIX irongate +#define irongate_trivial_rw_bw 1 +#define irongate_trivial_rw_lq 1 +#define irongate_trivial_io_bw 1 +#define irongate_trivial_io_lq 1 +#define irongate_trivial_iounmap 0 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_IRONGATE__H__ */ diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h new file mode 100644 index 00000000000..f7cb4b46095 --- /dev/null +++ b/arch/alpha/include/asm/core_lca.h @@ -0,0 +1,361 @@ +#ifndef __ALPHA_LCA__H__ +#define __ALPHA_LCA__H__ + +#include +#include + +/* + * Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068, + * for example). + * + * This file is based on: + * + * DECchip 21066 and DECchip 21068 Alpha AXP Microprocessors + * Hardware Reference Manual; Digital Equipment Corp.; May 1994; + * Maynard, MA; Order Number: EC-N2681-71. + */ + +/* + * NOTE: The LCA uses a Host Address Extension (HAE) register to access + * PCI addresses that are beyond the first 27 bits of address + * space. Updating the HAE requires an external cycle (and + * a memory barrier), which tends to be slow. Instead of updating + * it on each sparse memory access, we keep the current HAE value + * cached in variable cache_hae. Only if the cached HAE differs + * from the desired HAE value do we actually updated HAE register. + * The HAE register is preserved by the interrupt handler entry/exit + * code, so this scheme works even in the presence of interrupts. + * + * Dense memory space doesn't require the HAE, but is restricted to + * aligned 32 and 64 bit accesses. Special Cycle and Interrupt + * Acknowledge cycles may also require the use of the HAE. The LCA + * limits I/O address space to the bottom 24 bits of address space, + * but this easily covers the 16 bit ISA I/O address space. + */ + +/* + * NOTE 2! The memory operations do not set any memory barriers, as + * it's not needed for cases like a frame buffer that is essentially + * memory-like. You need to do them by hand if the operations depend + * on ordering. + * + * Similarly, the port I/O operations do a "mb" only after a write + * operation: if an mb is needed before (as in the case of doing + * memory mapped I/O first, and then a port I/O operation to the same + * device), it needs to be done by hand. + * + * After the above has bitten me 100 times, I'll give up and just do + * the mb all the time, but right now I'm hoping this will work out. + * Avoiding mb's may potentially be a noticeable speed improvement, + * but I can't honestly say I've tested it. + * + * Handling interrupts that need to do mb's to synchronize to + * non-interrupts is another fun race area. Don't do it (because if + * you do, I'll have to do *everything* with interrupts disabled, + * ugh). + */ + +/* + * Memory Controller registers: + */ +#define LCA_MEM_BCR0 (IDENT_ADDR + 0x120000000UL) +#define LCA_MEM_BCR1 (IDENT_ADDR + 0x120000008UL) +#define LCA_MEM_BCR2 (IDENT_ADDR + 0x120000010UL) +#define LCA_MEM_BCR3 (IDENT_ADDR + 0x120000018UL) +#define LCA_MEM_BMR0 (IDENT_ADDR + 0x120000020UL) +#define LCA_MEM_BMR1 (IDENT_ADDR + 0x120000028UL) +#define LCA_MEM_BMR2 (IDENT_ADDR + 0x120000030UL) +#define LCA_MEM_BMR3 (IDENT_ADDR + 0x120000038UL) +#define LCA_MEM_BTR0 (IDENT_ADDR + 0x120000040UL) +#define LCA_MEM_BTR1 (IDENT_ADDR + 0x120000048UL) +#define LCA_MEM_BTR2 (IDENT_ADDR + 0x120000050UL) +#define LCA_MEM_BTR3 (IDENT_ADDR + 0x120000058UL) +#define LCA_MEM_GTR (IDENT_ADDR + 0x120000060UL) +#define LCA_MEM_ESR (IDENT_ADDR + 0x120000068UL) +#define LCA_MEM_EAR (IDENT_ADDR + 0x120000070UL) +#define LCA_MEM_CAR (IDENT_ADDR + 0x120000078UL) +#define LCA_MEM_VGR (IDENT_ADDR + 0x120000080UL) +#define LCA_MEM_PLM (IDENT_ADDR + 0x120000088UL) +#define LCA_MEM_FOR (IDENT_ADDR + 0x120000090UL) + +/* + * I/O Controller registers: + */ +#define LCA_IOC_HAE (IDENT_ADDR + 0x180000000UL) +#define LCA_IOC_CONF (IDENT_ADDR + 0x180000020UL) +#define LCA_IOC_STAT0 (IDENT_ADDR + 0x180000040UL) +#define LCA_IOC_STAT1 (IDENT_ADDR + 0x180000060UL) +#define LCA_IOC_TBIA (IDENT_ADDR + 0x180000080UL) +#define LCA_IOC_TB_ENA (IDENT_ADDR + 0x1800000a0UL) +#define LCA_IOC_SFT_RST (IDENT_ADDR + 0x1800000c0UL) +#define LCA_IOC_PAR_DIS (IDENT_ADDR + 0x1800000e0UL) +#define LCA_IOC_W_BASE0 (IDENT_ADDR + 0x180000100UL) +#define LCA_IOC_W_BASE1 (IDENT_ADDR + 0x180000120UL) +#define LCA_IOC_W_MASK0 (IDENT_ADDR + 0x180000140UL) +#define LCA_IOC_W_MASK1 (IDENT_ADDR + 0x180000160UL) +#define LCA_IOC_T_BASE0 (IDENT_ADDR + 0x180000180UL) +#define LCA_IOC_T_BASE1 (IDENT_ADDR + 0x1800001a0UL) +#define LCA_IOC_TB_TAG0 (IDENT_ADDR + 0x188000000UL) +#define LCA_IOC_TB_TAG1 (IDENT_ADDR + 0x188000020UL) +#define LCA_IOC_TB_TAG2 (IDENT_ADDR + 0x188000040UL) +#define LCA_IOC_TB_TAG3 (IDENT_ADDR + 0x188000060UL) +#define LCA_IOC_TB_TAG4 (IDENT_ADDR + 0x188000070UL) +#define LCA_IOC_TB_TAG5 (IDENT_ADDR + 0x1880000a0UL) +#define LCA_IOC_TB_TAG6 (IDENT_ADDR + 0x1880000c0UL) +#define LCA_IOC_TB_TAG7 (IDENT_ADDR + 0x1880000e0UL) + +/* + * Memory spaces: + */ +#define LCA_IACK_SC (IDENT_ADDR + 0x1a0000000UL) +#define LCA_CONF (IDENT_ADDR + 0x1e0000000UL) +#define LCA_IO (IDENT_ADDR + 0x1c0000000UL) +#define LCA_SPARSE_MEM (IDENT_ADDR + 0x200000000UL) +#define LCA_DENSE_MEM (IDENT_ADDR + 0x300000000UL) + +/* + * Bit definitions for I/O Controller status register 0: + */ +#define LCA_IOC_STAT0_CMD 0xf +#define LCA_IOC_STAT0_ERR (1<<4) +#define LCA_IOC_STAT0_LOST (1<<5) +#define LCA_IOC_STAT0_THIT (1<<6) +#define LCA_IOC_STAT0_TREF (1<<7) +#define LCA_IOC_STAT0_CODE_SHIFT 8 +#define LCA_IOC_STAT0_CODE_MASK 0x7 +#define LCA_IOC_STAT0_P_NBR_SHIFT 13 +#define LCA_IOC_STAT0_P_NBR_MASK 0x7ffff + +#define LCA_HAE_ADDRESS LCA_IOC_HAE + +/* LCA PMR Power Management register defines */ +#define LCA_PMR_ADDR (IDENT_ADDR + 0x120000098UL) +#define LCA_PMR_PDIV 0x7 /* Primary clock divisor */ +#define LCA_PMR_ODIV 0x38 /* Override clock divisor */ +#define LCA_PMR_INTO 0x40 /* Interrupt override */ +#define LCA_PMR_DMAO 0x80 /* DMA override */ +#define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even bits */ +#define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even bits */ +#define LCA_PMR_PRIMARY_MASK 0xfffffffffffffff8L + +/* LCA PMR Macros */ + +#define LCA_READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR) +#define LCA_WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d)) + +#define LCA_GET_PRIMARY(r) ((r) & LCA_PMR_PDIV) +#define LCA_GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV) +#define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c))) + +/* LCA PMR Divisor values */ +#define LCA_PMR_DIV_1 0x0 +#define LCA_PMR_DIV_1_5 0x1 +#define LCA_PMR_DIV_2 0x2 +#define LCA_PMR_DIV_4 0x3 +#define LCA_PMR_DIV_8 0x4 +#define LCA_PMR_DIV_16 0x5 +#define LCA_PMR_DIV_MIN DIV_1 +#define LCA_PMR_DIV_MAX DIV_16 + + +/* + * Data structure for handling LCA machine checks. Correctable errors + * result in a short logout frame, uncorrectable ones in a long one. + */ +struct el_lca_mcheck_short { + struct el_common h; /* common logout header */ + unsigned long esr; /* error-status register */ + unsigned long ear; /* error-address register */ + unsigned long dc_stat; /* dcache status register */ + unsigned long ioc_stat0; /* I/O controller status register 0 */ + unsigned long ioc_stat1; /* I/O controller status register 1 */ +}; + +struct el_lca_mcheck_long { + struct el_common h; /* common logout header */ + unsigned long pt[31]; /* PAL temps */ + unsigned long exc_addr; /* exception address */ + unsigned long pad1[3]; + unsigned long pal_base; /* PALcode base address */ + unsigned long hier; /* hw interrupt enable */ + unsigned long hirr; /* hw interrupt request */ + unsigned long mm_csr; /* MMU control & status */ + unsigned long dc_stat; /* data cache status */ + unsigned long dc_addr; /* data cache addr register */ + unsigned long abox_ctl; /* address box control register */ + unsigned long esr; /* error status register */ + unsigned long ear; /* error address register */ + unsigned long car; /* cache control register */ + unsigned long ioc_stat0; /* I/O controller status register 0 */ + unsigned long ioc_stat1; /* I/O controller status register 1 */ + unsigned long va; /* virtual address register */ +}; + +union el_lca { + struct el_common * c; + struct el_lca_mcheck_long * l; + struct el_lca_mcheck_short * s; +}; + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions: + * + * Unlike Jensen, the Noname machines have no concept of local + * I/O---everything goes over the PCI bus. + * + * There is plenty room for optimization here. In particular, + * the Alpha's insb/insw/extb/extw should be useful in moving + * data to/from the right byte-lanes. + */ + +#define vip volatile int __force * +#define vuip volatile unsigned int __force * +#define vulp volatile unsigned long __force * + +#define LCA_SET_HAE \ + do { \ + if (addr >= (1UL << 24)) { \ + unsigned long msb = addr & 0xf8000000; \ + addr -= msb; \ + set_hae(msb); \ + } \ + } while (0) + + +__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= LCA_DENSE_MEM) { + addr -= LCA_DENSE_MEM; + LCA_SET_HAE; + base_and_type = LCA_SPARSE_MEM + 0x00; + } else { + addr -= LCA_IO; + base_and_type = LCA_IO + 0x00; + } + + result = *(vip) ((addr << 5) + base_and_type); + return __kernel_extbl(result, addr & 3); +} + +__EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= LCA_DENSE_MEM) { + addr -= LCA_DENSE_MEM; + LCA_SET_HAE; + base_and_type = LCA_SPARSE_MEM + 0x00; + } else { + addr -= LCA_IO; + base_and_type = LCA_IO + 0x00; + } + + w = __kernel_insbl(b, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; +} + +__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long result, base_and_type; + + if (addr >= LCA_DENSE_MEM) { + addr -= LCA_DENSE_MEM; + LCA_SET_HAE; + base_and_type = LCA_SPARSE_MEM + 0x08; + } else { + addr -= LCA_IO; + base_and_type = LCA_IO + 0x08; + } + + result = *(vip) ((addr << 5) + base_and_type); + return __kernel_extwl(result, addr & 3); +} + +__EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long w, base_and_type; + + if (addr >= LCA_DENSE_MEM) { + addr -= LCA_DENSE_MEM; + LCA_SET_HAE; + base_and_type = LCA_SPARSE_MEM + 0x08; + } else { + addr -= LCA_IO; + base_and_type = LCA_IO + 0x08; + } + + w = __kernel_inswl(b, addr & 3); + *(vuip) ((addr << 5) + base_and_type) = w; +} + +__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (addr < LCA_DENSE_MEM) + addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; + return *(vuip)addr; +} + +__EXTERN_INLINE void lca_iowrite32(u32 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + if (addr < LCA_DENSE_MEM) + addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; + *(vuip)addr = b; +} + +__EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr) +{ + return (void __iomem *)(addr + LCA_IO); +} + +__EXTERN_INLINE void __iomem *lca_ioremap(unsigned long addr, + unsigned long size) +{ + return (void __iomem *)(addr + LCA_DENSE_MEM); +} + +__EXTERN_INLINE int lca_is_ioaddr(unsigned long addr) +{ + return addr >= IDENT_ADDR + 0x120000000UL; +} + +__EXTERN_INLINE int lca_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr >= LCA_DENSE_MEM; +} + +#undef vip +#undef vuip +#undef vulp + +#undef __IO_PREFIX +#define __IO_PREFIX lca +#define lca_trivial_rw_bw 2 +#define lca_trivial_rw_lq 1 +#define lca_trivial_io_bw 0 +#define lca_trivial_io_lq 0 +#define lca_trivial_iounmap 1 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_LCA__H__ */ diff --git a/arch/alpha/include/asm/core_marvel.h b/arch/alpha/include/asm/core_marvel.h new file mode 100644 index 00000000000..30d55fe7aaf --- /dev/null +++ b/arch/alpha/include/asm/core_marvel.h @@ -0,0 +1,378 @@ +/* + * Marvel systems use the IO7 I/O chip provides PCI/PCIX/AGP access + * + * This file is based on: + * + * Marvel / EV7 System Programmer's Manual + * Revision 1.00 + * 14 May 2001 + */ + +#ifndef __ALPHA_MARVEL__H__ +#define __ALPHA_MARVEL__H__ + +#include +#include +#include + +#include + +#define MARVEL_MAX_PIDS 32 /* as long as we rely on 43-bit superpage */ +#define MARVEL_IRQ_VEC_PE_SHIFT (10) +#define MARVEL_IRQ_VEC_IRQ_MASK ((1 << MARVEL_IRQ_VEC_PE_SHIFT) - 1) +#define MARVEL_NR_IRQS \ + (16 + (MARVEL_MAX_PIDS * (1 << MARVEL_IRQ_VEC_PE_SHIFT))) + +/* + * EV7 RBOX Registers + */ +typedef struct { + volatile unsigned long csr __attribute__((aligned(16))); +} ev7_csr; + +typedef struct { + ev7_csr RBOX_CFG; /* 0x0000 */ + ev7_csr RBOX_NSVC; + ev7_csr RBOX_EWVC; + ev7_csr RBOX_WHAMI; + ev7_csr RBOX_TCTL; /* 0x0040 */ + ev7_csr RBOX_INT; + ev7_csr RBOX_IMASK; + ev7_csr RBOX_IREQ; + ev7_csr RBOX_INTQ; /* 0x0080 */ + ev7_csr RBOX_INTA; + ev7_csr RBOX_IT; + ev7_csr RBOX_SCRATCH1; + ev7_csr RBOX_SCRATCH2; /* 0x00c0 */ + ev7_csr RBOX_L_ERR; +} ev7_csrs; + +/* + * EV7 CSR addressing macros + */ +#define EV7_MASK40(addr) ((addr) & ((1UL << 41) - 1)) +#define EV7_KERN_ADDR(addr) ((void *)(IDENT_ADDR | EV7_MASK40(addr))) + +#define EV7_PE_MASK 0x1ffUL /* 9 bits ( 256 + mem/io ) */ +#define EV7_IPE(pe) ((~((long)(pe)) & EV7_PE_MASK) << 35) + +#define EV7_CSR_PHYS(pe, off) (EV7_IPE(pe) | (0x7FFCUL << 20) | (off)) +#define EV7_CSRS_PHYS(pe) (EV7_CSR_PHYS(pe, 0UL)) + +#define EV7_CSR_KERN(pe, off) (EV7_KERN_ADDR(EV7_CSR_PHYS(pe, off))) +#define EV7_CSRS_KERN(pe) (EV7_KERN_ADDR(EV7_CSRS_PHYS(pe))) + +#define EV7_CSR_OFFSET(name) ((unsigned long)&((ev7_csrs *)NULL)->name.csr) + +/* + * IO7 registers + */ +typedef struct { + volatile unsigned long csr __attribute__((aligned(64))); +} io7_csr; + +typedef struct { + /* I/O Port Control Registers */ + io7_csr POx_CTRL; /* 0x0000 */ + io7_csr POx_CACHE_CTL; + io7_csr POx_TIMER; + io7_csr POx_IO_ADR_EXT; + io7_csr POx_MEM_ADR_EXT; /* 0x0100 */ + io7_csr POx_XCAL_CTRL; + io7_csr rsvd1[2]; /* ?? spec doesn't show 0x180 */ + io7_csr POx_DM_SOURCE; /* 0x0200 */ + io7_csr POx_DM_DEST; + io7_csr POx_DM_SIZE; + io7_csr POx_DM_CTRL; + io7_csr rsvd2[4]; /* 0x0300 */ + + /* AGP Control Registers -- port 3 only */ + io7_csr AGP_CAP_ID; /* 0x0400 */ + io7_csr AGP_STAT; + io7_csr AGP_CMD; + io7_csr rsvd3; + + /* I/O Port Monitor Registers */ + io7_csr POx_MONCTL; /* 0x0500 */ + io7_csr POx_CTRA; + io7_csr POx_CTRB; + io7_csr POx_CTR56; + io7_csr POx_SCRATCH; /* 0x0600 */ + io7_csr POx_XTRA_A; + io7_csr POx_XTRA_TS; + io7_csr POx_XTRA_Z; + io7_csr rsvd4; /* 0x0700 */ + io7_csr POx_THRESHA; + io7_csr POx_THRESHB; + io7_csr rsvd5[33]; + + /* System Address Space Window Control Registers */ + + io7_csr POx_WBASE[4]; /* 0x1000 */ + io7_csr POx_WMASK[4]; + io7_csr POx_TBASE[4]; + io7_csr POx_SG_TBIA; + io7_csr POx_MSI_WBASE; + io7_csr rsvd6[50]; + + /* I/O Port Error Registers */ + io7_csr POx_ERR_SUM; + io7_csr POx_FIRST_ERR; + io7_csr POx_MSK_HEI; + io7_csr POx_TLB_ERR; + io7_csr POx_SPL_COMPLT; + io7_csr POx_TRANS_SUM; + io7_csr POx_FRC_PCI_ERR; + io7_csr POx_MULT_ERR; + io7_csr rsvd7[8]; + + /* I/O Port End of Interrupt Registers */ + io7_csr EOI_DAT; + io7_csr rsvd8[7]; + io7_csr POx_IACK_SPECIAL; + io7_csr rsvd9[103]; +} io7_ioport_csrs; + +typedef struct { + io7_csr IO_ASIC_REV; /* 0x30.0000 */ + io7_csr IO_SYS_REV; + io7_csr SER_CHAIN3; + io7_csr PO7_RST1; + io7_csr PO7_RST2; /* 0x30.0100 */ + io7_csr POx_RST[4]; + io7_csr IO7_DWNH; + io7_csr IO7_MAF; + io7_csr IO7_MAF_TO; + io7_csr IO7_ACC_CLUMP; /* 0x30.0300 */ + io7_csr IO7_PMASK; + io7_csr IO7_IOMASK; + io7_csr IO7_UPH; + io7_csr IO7_UPH_TO; /* 0x30.0400 */ + io7_csr RBX_IREQ_OFF; + io7_csr RBX_INTA_OFF; + io7_csr INT_RTY; + io7_csr PO7_MONCTL; /* 0x30.0500 */ + io7_csr PO7_CTRA; + io7_csr PO7_CTRB; + io7_csr PO7_CTR56; + io7_csr PO7_SCRATCH; /* 0x30.0600 */ + io7_csr PO7_XTRA_A; + io7_csr PO7_XTRA_TS; + io7_csr PO7_XTRA_Z; + io7_csr PO7_PMASK; /* 0x30.0700 */ + io7_csr PO7_THRESHA; + io7_csr PO7_THRESHB; + io7_csr rsvd1[97]; + io7_csr PO7_ERROR_SUM; /* 0x30.2000 */ + io7_csr PO7_BHOLE_MASK; + io7_csr PO7_HEI_MSK; + io7_csr PO7_CRD_MSK; + io7_csr PO7_UNCRR_SYM; /* 0x30.2100 */ + io7_csr PO7_CRRCT_SYM; + io7_csr PO7_ERR_PKT[2]; + io7_csr PO7_UGBGE_SYM; /* 0x30.2200 */ + io7_csr rsbv2[887]; + io7_csr PO7_LSI_CTL[128]; /* 0x31.0000 */ + io7_csr rsvd3[123]; + io7_csr HLT_CTL; /* 0x31.3ec0 */ + io7_csr HPI_CTL; /* 0x31.3f00 */ + io7_csr CRD_CTL; + io7_csr STV_CTL; + io7_csr HEI_CTL; + io7_csr PO7_MSI_CTL[16]; /* 0x31.4000 */ + io7_csr rsvd4[240]; + + /* + * Interrupt Diagnostic / Test + */ + struct { + io7_csr INT_PND; + io7_csr INT_CLR; + io7_csr INT_EOI; + io7_csr rsvd[29]; + } INT_DIAG[4]; + io7_csr rsvd5[125]; /* 0x31.a000 */ + io7_csr MISC_PND; /* 0x31.b800 */ + io7_csr rsvd6[31]; + io7_csr MSI_PND[16]; /* 0x31.c000 */ + io7_csr rsvd7[16]; + io7_csr MSI_CLR[16]; /* 0x31.c800 */ +} io7_port7_csrs; + +/* + * IO7 DMA Window Base register (POx_WBASEx) + */ +#define wbase_m_ena 0x1 +#define wbase_m_sg 0x2 +#define wbase_m_dac 0x4 +#define wbase_m_addr 0xFFF00000 +union IO7_POx_WBASE { + struct { + unsigned ena : 1; /* <0> */ + unsigned sg : 1; /* <1> */ + unsigned dac : 1; /* <2> -- window 3 only */ + unsigned rsvd1 : 17; + unsigned addr : 12; /* <31:20> */ + unsigned rsvd2 : 32; + } bits; + unsigned as_long[2]; + unsigned as_quad; +}; + +/* + * IO7 IID (Interrupt IDentifier) format + * + * For level-sensative interrupts, int_num is encoded as: + * + * bus/port slot/device INTx + * <7:5> <4:2> <1:0> + */ +union IO7_IID { + struct { + unsigned int_num : 9; /* <8:0> */ + unsigned tpu_mask : 4; /* <12:9> rsvd */ + unsigned msi : 1; /* 13 */ + unsigned ipe : 10; /* <23:14> */ + unsigned long rsvd : 40; + } bits; + unsigned int as_long[2]; + unsigned long as_quad; +}; + +/* + * IO7 addressing macros + */ +#define IO7_KERN_ADDR(addr) (EV7_KERN_ADDR(addr)) + +#define IO7_PORT_MASK 0x07UL /* 3 bits of port */ + +#define IO7_IPE(pe) (EV7_IPE(pe)) +#define IO7_IPORT(port) ((~((long)(port)) & IO7_PORT_MASK) << 32) + +#define IO7_HOSE(pe, port) (IO7_IPE(pe) | IO7_IPORT(port)) + +#define IO7_MEM_PHYS(pe, port) (IO7_HOSE(pe, port) | 0x00000000UL) +#define IO7_CONF_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFE000000UL) +#define IO7_IO_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFF000000UL) +#define IO7_CSR_PHYS(pe, port, off) \ + (IO7_HOSE(pe, port) | 0xFF800000UL | (off)) +#define IO7_CSRS_PHYS(pe, port) (IO7_CSR_PHYS(pe, port, 0UL)) +#define IO7_PORT7_CSRS_PHYS(pe) (IO7_CSR_PHYS(pe, 7, 0x300000UL)) + +#define IO7_MEM_KERN(pe, port) (IO7_KERN_ADDR(IO7_MEM_PHYS(pe, port))) +#define IO7_CONF_KERN(pe, port) (IO7_KERN_ADDR(IO7_CONF_PHYS(pe, port))) +#define IO7_IO_KERN(pe, port) (IO7_KERN_ADDR(IO7_IO_PHYS(pe, port))) +#define IO7_CSR_KERN(pe, port, off) (IO7_KERN_ADDR(IO7_CSR_PHYS(pe,port,off))) +#define IO7_CSRS_KERN(pe, port) (IO7_KERN_ADDR(IO7_CSRS_PHYS(pe, port))) +#define IO7_PORT7_CSRS_KERN(pe) (IO7_KERN_ADDR(IO7_PORT7_CSRS_PHYS(pe))) + +#define IO7_PLL_RNGA(pll) (((pll) >> 3) & 0x7) +#define IO7_PLL_RNGB(pll) (((pll) >> 6) & 0x7) + +#define IO7_MEM_SPACE (2UL * 1024 * 1024 * 1024) /* 2GB MEM */ +#define IO7_IO_SPACE (8UL * 1024 * 1024) /* 8MB I/O */ + + +/* + * Offset between ram physical addresses and pci64 DAC addresses + */ +#define IO7_DAC_OFFSET (1UL << 49) + +/* + * This is needed to satisify the IO() macro used in initializing the machvec + */ +#define MARVEL_IACK_SC \ + ((unsigned long) \ + (&(((io7_ioport_csrs *)IO7_CSRS_KERN(0, 0))->POx_IACK_SPECIAL))) + +#ifdef __KERNEL__ + +/* + * IO7 structs + */ +#define IO7_NUM_PORTS 4 +#define IO7_AGP_PORT 3 + +struct io7_port { + struct io7 *io7; + struct pci_controller *hose; + + int enabled; + unsigned int port; + io7_ioport_csrs *csrs; + + unsigned long saved_wbase[4]; + unsigned long saved_wmask[4]; + unsigned long saved_tbase[4]; +}; + +struct io7 { + struct io7 *next; + + unsigned int pe; + io7_port7_csrs *csrs; + struct io7_port ports[IO7_NUM_PORTS]; + + spinlock_t irq_lock; +}; + +#ifndef __EXTERN_INLINE +# define __EXTERN_INLINE extern inline +# define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions. All access through linear space. + */ + +/* + * Memory functions. All accesses through linear space. + */ + +#define vucp volatile unsigned char __force * +#define vusp volatile unsigned short __force * + +extern unsigned int marvel_ioread8(void __iomem *); +extern void marvel_iowrite8(u8 b, void __iomem *); + +__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr) +{ + return __kernel_ldwu(*(vusp)addr); +} + +__EXTERN_INLINE void marvel_iowrite16(u16 b, void __iomem *addr) +{ + __kernel_stw(b, *(vusp)addr); +} + +extern void __iomem *marvel_ioremap(unsigned long addr, unsigned long size); +extern void marvel_iounmap(volatile void __iomem *addr); +extern void __iomem *marvel_ioportmap (unsigned long addr); + +__EXTERN_INLINE int marvel_is_ioaddr(unsigned long addr) +{ + return (addr >> 40) & 1; +} + +extern int marvel_is_mmio(const volatile void __iomem *); + +#undef vucp +#undef vusp + +#undef __IO_PREFIX +#define __IO_PREFIX marvel +#define marvel_trivial_rw_bw 1 +#define marvel_trivial_rw_lq 1 +#define marvel_trivial_io_bw 0 +#define marvel_trivial_io_lq 1 +#define marvel_trivial_iounmap 0 +#include + +#ifdef __IO_EXTERN_INLINE +# undef __EXTERN_INLINE +# undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_MARVEL__H__ */ diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h new file mode 100644 index 00000000000..acf55b48347 --- /dev/null +++ b/arch/alpha/include/asm/core_mcpcia.h @@ -0,0 +1,381 @@ +#ifndef __ALPHA_MCPCIA__H__ +#define __ALPHA_MCPCIA__H__ + +/* Define to experiment with fitting everything into one 128MB HAE window. + One window per bus, that is. */ +#define MCPCIA_ONE_HAE_WINDOW 1 + +#include +#include +#include + +/* + * MCPCIA is the internal name for a core logic chipset which provides + * PCI access for the RAWHIDE family of systems. + * + * This file is based on: + * + * RAWHIDE System Programmer's Manual + * 16-May-96 + * Rev. 1.4 + * + */ + +/*------------------------------------------------------------------------** +** ** +** I/O procedures ** +** ** +** inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers ** +** inportbxt: 8 bits only ** +** inport: alias of inportw ** +** outport: alias of outportw ** +** ** +** inmem[b|w|t|l], outmem[b|w|t|l] 8:16:24:32 ISA memory xfers ** +** inmembxt: 8 bits only ** +** inmem: alias of inmemw ** +** outmem: alias of outmemw ** +** ** +**------------------------------------------------------------------------*/ + + +/* MCPCIA ADDRESS BIT DEFINITIONS + * + * 3333 3333 3322 2222 2222 1111 1111 11 + * 9876 5432 1098 7654 3210 9876 5432 1098 7654 3210 + * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- + * 1 000 + * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- + * | |\| + * | Byte Enable --+ | + * | Transfer Length --+ + * +-- IO space, not cached + * + * Byte Transfer + * Enable Length Transfer Byte Address + * adr<6:5> adr<4:3> Length Enable Adder + * --------------------------------------------- + * 00 00 Byte 1110 0x000 + * 01 00 Byte 1101 0x020 + * 10 00 Byte 1011 0x040 + * 11 00 Byte 0111 0x060 + * + * 00 01 Word 1100 0x008 + * 01 01 Word 1001 0x028 <= Not supported in this code. + * 10 01 Word 0011 0x048 + * + * 00 10 Tribyte 1000 0x010 + * 01 10 Tribyte 0001 0x030 + * + * 10 11 Longword 0000 0x058 + * + * Note that byte enables are asserted low. + * + */ + +#define MCPCIA_MAX_HOSES 4 + +#define MCPCIA_MID(m) ((unsigned long)(m) << 33) + +/* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively. + Durango adds PCI2 and PCI3 at MID 6 and 7 respectively. */ +#define MCPCIA_HOSE2MID(h) ((h) + 4) + +#define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */ + +/* + * Memory spaces: + */ +#define MCPCIA_SPARSE(m) (IDENT_ADDR + 0xf000000000UL + MCPCIA_MID(m)) +#define MCPCIA_DENSE(m) (IDENT_ADDR + 0xf100000000UL + MCPCIA_MID(m)) +#define MCPCIA_IO(m) (IDENT_ADDR + 0xf180000000UL + MCPCIA_MID(m)) +#define MCPCIA_CONF(m) (IDENT_ADDR + 0xf1c0000000UL + MCPCIA_MID(m)) +#define MCPCIA_CSR(m) (IDENT_ADDR + 0xf1e0000000UL + MCPCIA_MID(m)) +#define MCPCIA_IO_IACK(m) (IDENT_ADDR + 0xf1f0000000UL + MCPCIA_MID(m)) +#define MCPCIA_DENSE_IO(m) (IDENT_ADDR + 0xe1fc000000UL + MCPCIA_MID(m)) +#define MCPCIA_DENSE_CONF(m) (IDENT_ADDR + 0xe1fe000000UL + MCPCIA_MID(m)) + +/* + * General Registers + */ +#define MCPCIA_REV(m) (MCPCIA_CSR(m) + 0x000) +#define MCPCIA_WHOAMI(m) (MCPCIA_CSR(m) + 0x040) +#define MCPCIA_PCI_LAT(m) (MCPCIA_CSR(m) + 0x080) +#define MCPCIA_CAP_CTRL(m) (MCPCIA_CSR(m) + 0x100) +#define MCPCIA_HAE_MEM(m) (MCPCIA_CSR(m) + 0x400) +#define MCPCIA_HAE_IO(m) (MCPCIA_CSR(m) + 0x440) +#define _MCPCIA_IACK_SC(m) (MCPCIA_CSR(m) + 0x480) +#define MCPCIA_HAE_DENSE(m) (MCPCIA_CSR(m) + 0x4C0) + +/* + * Interrupt Control registers + */ +#define MCPCIA_INT_CTL(m) (MCPCIA_CSR(m) + 0x500) +#define MCPCIA_INT_REQ(m) (MCPCIA_CSR(m) + 0x540) +#define MCPCIA_INT_TARG(m) (MCPCIA_CSR(m) + 0x580) +#define MCPCIA_INT_ADR(m) (MCPCIA_CSR(m) + 0x5C0) +#define MCPCIA_INT_ADR_EXT(m) (MCPCIA_CSR(m) + 0x600) +#define MCPCIA_INT_MASK0(m) (MCPCIA_CSR(m) + 0x640) +#define MCPCIA_INT_MASK1(m) (MCPCIA_CSR(m) + 0x680) +#define MCPCIA_INT_ACK0(m) (MCPCIA_CSR(m) + 0x10003f00) +#define MCPCIA_INT_ACK1(m) (MCPCIA_CSR(m) + 0x10003f40) + +/* + * Performance Monitor registers + */ +#define MCPCIA_PERF_MON(m) (MCPCIA_CSR(m) + 0x300) +#define MCPCIA_PERF_CONT(m) (MCPCIA_CSR(m) + 0x340) + +/* + * Diagnostic Registers + */ +#define MCPCIA_CAP_DIAG(m) (MCPCIA_CSR(m) + 0x700) +#define MCPCIA_TOP_OF_MEM(m) (MCPCIA_CSR(m) + 0x7C0) + +/* + * Error registers + */ +#define MCPCIA_MC_ERR0(m) (MCPCIA_CSR(m) + 0x800) +#define MCPCIA_MC_ERR1(m) (MCPCIA_CSR(m) + 0x840) +#define MCPCIA_CAP_ERR(m) (MCPCIA_CSR(m) + 0x880) +#define MCPCIA_PCI_ERR1(m) (MCPCIA_CSR(m) + 0x1040) +#define MCPCIA_MDPA_STAT(m) (MCPCIA_CSR(m) + 0x4000) +#define MCPCIA_MDPA_SYN(m) (MCPCIA_CSR(m) + 0x4040) +#define MCPCIA_MDPA_DIAG(m) (MCPCIA_CSR(m) + 0x4080) +#define MCPCIA_MDPB_STAT(m) (MCPCIA_CSR(m) + 0x8000) +#define MCPCIA_MDPB_SYN(m) (MCPCIA_CSR(m) + 0x8040) +#define MCPCIA_MDPB_DIAG(m) (MCPCIA_CSR(m) + 0x8080) + +/* + * PCI Address Translation Registers. + */ +#define MCPCIA_SG_TBIA(m) (MCPCIA_CSR(m) + 0x1300) +#define MCPCIA_HBASE(m) (MCPCIA_CSR(m) + 0x1340) + +#define MCPCIA_W0_BASE(m) (MCPCIA_CSR(m) + 0x1400) +#define MCPCIA_W0_MASK(m) (MCPCIA_CSR(m) + 0x1440) +#define MCPCIA_T0_BASE(m) (MCPCIA_CSR(m) + 0x1480) + +#define MCPCIA_W1_BASE(m) (MCPCIA_CSR(m) + 0x1500) +#define MCPCIA_W1_MASK(m) (MCPCIA_CSR(m) + 0x1540) +#define MCPCIA_T1_BASE(m) (MCPCIA_CSR(m) + 0x1580) + +#define MCPCIA_W2_BASE(m) (MCPCIA_CSR(m) + 0x1600) +#define MCPCIA_W2_MASK(m) (MCPCIA_CSR(m) + 0x1640) +#define MCPCIA_T2_BASE(m) (MCPCIA_CSR(m) + 0x1680) + +#define MCPCIA_W3_BASE(m) (MCPCIA_CSR(m) + 0x1700) +#define MCPCIA_W3_MASK(m) (MCPCIA_CSR(m) + 0x1740) +#define MCPCIA_T3_BASE(m) (MCPCIA_CSR(m) + 0x1780) + +/* Hack! Only words for bus 0. */ + +#ifndef MCPCIA_ONE_HAE_WINDOW +#define MCPCIA_HAE_ADDRESS MCPCIA_HAE_MEM(4) +#endif +#define MCPCIA_IACK_SC _MCPCIA_IACK_SC(4) + +/* + * The canonical non-remaped I/O and MEM addresses have these values + * subtracted out. This is arranged so that folks manipulating ISA + * devices can use their familiar numbers and have them map to bus 0. + */ + +#define MCPCIA_IO_BIAS MCPCIA_IO(4) +#define MCPCIA_MEM_BIAS MCPCIA_DENSE(4) + +/* Offset between ram physical addresses and pci64 DAC bus addresses. */ +#define MCPCIA_DAC_OFFSET (1UL << 40) + +/* + * Data structure for handling MCPCIA machine checks: + */ +struct el_MCPCIA_uncorrected_frame_mcheck { + struct el_common header; + struct el_common_EV5_uncorrectable_mcheck procdata; +}; + + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions: + * + * MCPCIA, the RAWHIDE family PCI/memory support chipset for the EV5 (21164) + * and EV56 (21164a) processors, can use either a sparse address mapping + * scheme, or the so-called byte-word PCI address space, to get at PCI memory + * and I/O. + * + * Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE. + */ + +/* + * Memory functions. 64-bit and 32-bit accesses are done through + * dense memory space, everything else through sparse space. + * + * For reading and writing 8 and 16 bit quantities we need to + * go through one of the three sparse address mapping regions + * and use the HAE_MEM CSR to provide some bits of the address. + * The following few routines use only sparse address region 1 + * which gives 1Gbyte of accessible space which relates exactly + * to the amount of PCI memory mapping *into* system address space. + * See p 6-17 of the specification but it looks something like this: + * + * 21164 Address: + * + * 3 2 1 + * 9876543210987654321098765432109876543210 + * 1ZZZZ0.PCI.QW.Address............BBLL + * + * ZZ = SBZ + * BB = Byte offset + * LL = Transfer length + * + * PCI Address: + * + * 3 2 1 + * 10987654321098765432109876543210 + * HHH....PCI.QW.Address........ 00 + * + * HHH = 31:29 HAE_MEM CSR + * + */ + +#define vip volatile int __force * +#define vuip volatile unsigned int __force * + +#ifdef MCPCIA_ONE_HAE_WINDOW +#define MCPCIA_FROB_MMIO \ + if (__mcpcia_is_mmio(hose)) { \ + set_hae(hose & 0xffffffff); \ + hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \ + } +#else +#define MCPCIA_FROB_MMIO \ + if (__mcpcia_is_mmio(hose)) { \ + hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \ + } +#endif + +extern inline int __mcpcia_is_mmio(unsigned long addr) +{ + return (addr & 0x80000000UL) == 0; +} + +__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; + unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; + unsigned long result; + + MCPCIA_FROB_MMIO; + + result = *(vip) ((addr << 5) + hose + 0x00); + return __kernel_extbl(result, addr & 3); +} + +__EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; + unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; + unsigned long w; + + MCPCIA_FROB_MMIO; + + w = __kernel_insbl(b, addr & 3); + *(vuip) ((addr << 5) + hose + 0x00) = w; +} + +__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; + unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; + unsigned long result; + + MCPCIA_FROB_MMIO; + + result = *(vip) ((addr << 5) + hose + 0x08); + return __kernel_extwl(result, addr & 3); +} + +__EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; + unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; + unsigned long w; + + MCPCIA_FROB_MMIO; + + w = __kernel_inswl(b, addr & 3); + *(vuip) ((addr << 5) + hose + 0x08) = w; +} + +__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr) +{ + unsigned long addr = (unsigned long)xaddr; + + if (!__mcpcia_is_mmio(addr)) + addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; + + return *(vuip)addr; +} + +__EXTERN_INLINE void mcpcia_iowrite32(u32 b, void __iomem *xaddr) +{ + unsigned long addr = (unsigned long)xaddr; + + if (!__mcpcia_is_mmio(addr)) + addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; + + *(vuip)addr = b; +} + + +__EXTERN_INLINE void __iomem *mcpcia_ioportmap(unsigned long addr) +{ + return (void __iomem *)(addr + MCPCIA_IO_BIAS); +} + +__EXTERN_INLINE void __iomem *mcpcia_ioremap(unsigned long addr, + unsigned long size) +{ + return (void __iomem *)(addr + MCPCIA_MEM_BIAS); +} + +__EXTERN_INLINE int mcpcia_is_ioaddr(unsigned long addr) +{ + return addr >= MCPCIA_SPARSE(0); +} + +__EXTERN_INLINE int mcpcia_is_mmio(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + return __mcpcia_is_mmio(addr); +} + +#undef MCPCIA_FROB_MMIO + +#undef vip +#undef vuip + +#undef __IO_PREFIX +#define __IO_PREFIX mcpcia +#define mcpcia_trivial_rw_bw 2 +#define mcpcia_trivial_rw_lq 1 +#define mcpcia_trivial_io_bw 0 +#define mcpcia_trivial_io_lq 0 +#define mcpcia_trivial_iounmap 1 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_MCPCIA__H__ */ diff --git a/arch/alpha/include/asm/core_polaris.h b/arch/alpha/include/asm/core_polaris.h new file mode 100644 index 00000000000..2f966b64659 --- /dev/null +++ b/arch/alpha/include/asm/core_polaris.h @@ -0,0 +1,110 @@ +#ifndef __ALPHA_POLARIS__H__ +#define __ALPHA_POLARIS__H__ + +#include +#include + +/* + * POLARIS is the internal name for a core logic chipset which provides + * memory controller and PCI access for the 21164PC chip based systems. + * + * This file is based on: + * + * Polaris System Controller + * Device Functional Specification + * 22-Jan-98 + * Rev. 4.2 + * + */ + +/* Polaris memory regions */ +#define POLARIS_SPARSE_MEM_BASE (IDENT_ADDR + 0xf800000000UL) +#define POLARIS_DENSE_MEM_BASE (IDENT_ADDR + 0xf900000000UL) +#define POLARIS_SPARSE_IO_BASE (IDENT_ADDR + 0xf980000000UL) +#define POLARIS_SPARSE_CONFIG_BASE (IDENT_ADDR + 0xf9c0000000UL) +#define POLARIS_IACK_BASE (IDENT_ADDR + 0xf9f8000000UL) +#define POLARIS_DENSE_IO_BASE (IDENT_ADDR + 0xf9fc000000UL) +#define POLARIS_DENSE_CONFIG_BASE (IDENT_ADDR + 0xf9fe000000UL) + +#define POLARIS_IACK_SC POLARIS_IACK_BASE + +/* The Polaris command/status registers live in PCI Config space for + * bus 0/device 0. As such, they may be bytes, words, or doublewords. + */ +#define POLARIS_W_VENID (POLARIS_DENSE_CONFIG_BASE) +#define POLARIS_W_DEVID (POLARIS_DENSE_CONFIG_BASE+2) +#define POLARIS_W_CMD (POLARIS_DENSE_CONFIG_BASE+4) +#define POLARIS_W_STATUS (POLARIS_DENSE_CONFIG_BASE+6) + +/* + * Data structure for handling POLARIS machine checks: + */ +struct el_POLARIS_sysdata_mcheck { + u_long psc_status; + u_long psc_pcictl0; + u_long psc_pcictl1; + u_long psc_pcictl2; +}; + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions: + * + * POLARIS, the PCI/memory support chipset for the PCA56 (21164PC) + * processors, can use either a sparse address mapping scheme, or the + * so-called byte-word PCI address space, to get at PCI memory and I/O. + * + * However, we will support only the BWX form. + */ + +/* + * Memory functions. Polaris allows all accesses (byte/word + * as well as long/quad) to be done through dense space. + * + * We will only support DENSE access via BWX insns. + */ + +__EXTERN_INLINE void __iomem *polaris_ioportmap(unsigned long addr) +{ + return (void __iomem *)(addr + POLARIS_DENSE_IO_BASE); +} + +__EXTERN_INLINE void __iomem *polaris_ioremap(unsigned long addr, + unsigned long size) +{ + return (void __iomem *)(addr + POLARIS_DENSE_MEM_BASE); +} + +__EXTERN_INLINE int polaris_is_ioaddr(unsigned long addr) +{ + return addr >= POLARIS_SPARSE_MEM_BASE; +} + +__EXTERN_INLINE int polaris_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr < POLARIS_SPARSE_IO_BASE; +} + +#undef __IO_PREFIX +#define __IO_PREFIX polaris +#define polaris_trivial_rw_bw 1 +#define polaris_trivial_rw_lq 1 +#define polaris_trivial_io_bw 1 +#define polaris_trivial_io_lq 1 +#define polaris_trivial_iounmap 1 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_POLARIS__H__ */ diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h new file mode 100644 index 00000000000..46bfff58f67 --- /dev/null +++ b/arch/alpha/include/asm/core_t2.h @@ -0,0 +1,633 @@ +#ifndef __ALPHA_T2__H__ +#define __ALPHA_T2__H__ + +#include +#include +#include +#include + +/* + * T2 is the internal name for the core logic chipset which provides + * memory controller and PCI access for the SABLE-based systems. + * + * This file is based on: + * + * SABLE I/O Specification + * Revision/Update Information: 1.3 + * + * jestabro@amt.tay1.dec.com Initial Version. + * + */ + +#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */ + +/* GAMMA-SABLE is a SABLE with EV5-based CPUs */ +/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */ +#define _GAMMA_BIAS 0x8000000000UL + +#if defined(CONFIG_ALPHA_GENERIC) +#define GAMMA_BIAS alpha_mv.sys.t2.gamma_bias +#elif defined(CONFIG_ALPHA_GAMMA) +#define GAMMA_BIAS _GAMMA_BIAS +#else +#define GAMMA_BIAS 0 +#endif + +/* + * Memory spaces: + */ +#define T2_CONF (IDENT_ADDR + GAMMA_BIAS + 0x390000000UL) +#define T2_IO (IDENT_ADDR + GAMMA_BIAS + 0x3a0000000UL) +#define T2_SPARSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x200000000UL) +#define T2_DENSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x3c0000000UL) + +#define T2_IOCSR (IDENT_ADDR + GAMMA_BIAS + 0x38e000000UL) +#define T2_CERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000020UL) +#define T2_CERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000040UL) +#define T2_CERR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000060UL) +#define T2_PERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000080UL) +#define T2_PERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000a0UL) +#define T2_PSCR (IDENT_ADDR + GAMMA_BIAS + 0x38e0000c0UL) +#define T2_HAE_1 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000e0UL) +#define T2_HAE_2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000100UL) +#define T2_HBASE (IDENT_ADDR + GAMMA_BIAS + 0x38e000120UL) +#define T2_WBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000140UL) +#define T2_WMASK1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000160UL) +#define T2_TBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000180UL) +#define T2_WBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001a0UL) +#define T2_WMASK2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL) +#define T2_TBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL) +#define T2_TLBBR (IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL) +#define T2_IVR (IDENT_ADDR + GAMMA_BIAS + 0x38e000220UL) +#define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL) +#define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL) + +/* The CSRs below are T3/T4 only */ +#define T2_WBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000280UL) +#define T2_WMASK3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002a0UL) +#define T2_TBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002c0UL) + +#define T2_TDR0 (IDENT_ADDR + GAMMA_BIAS + 0x38e000300UL) +#define T2_TDR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000320UL) +#define T2_TDR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000340UL) +#define T2_TDR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000360UL) +#define T2_TDR4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000380UL) +#define T2_TDR5 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003a0UL) +#define T2_TDR6 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003c0UL) +#define T2_TDR7 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003e0UL) + +#define T2_WBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000400UL) +#define T2_WMASK4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000420UL) +#define T2_TBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000440UL) + +#define T2_AIR (IDENT_ADDR + GAMMA_BIAS + 0x38e000460UL) +#define T2_VAR (IDENT_ADDR + GAMMA_BIAS + 0x38e000480UL) +#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL) +#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL) + +#define T2_HAE_ADDRESS T2_HAE_1 + +/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to + 3.8fff.ffff + * + * +--------------+ 3 8000 0000 + * | CPU 0 CSRs | + * +--------------+ 3 8100 0000 + * | CPU 1 CSRs | + * +--------------+ 3 8200 0000 + * | CPU 2 CSRs | + * +--------------+ 3 8300 0000 + * | CPU 3 CSRs | + * +--------------+ 3 8400 0000 + * | CPU Reserved | + * +--------------+ 3 8700 0000 + * | Mem Reserved | + * +--------------+ 3 8800 0000 + * | Mem 0 CSRs | + * +--------------+ 3 8900 0000 + * | Mem 1 CSRs | + * +--------------+ 3 8a00 0000 + * | Mem 2 CSRs | + * +--------------+ 3 8b00 0000 + * | Mem 3 CSRs | + * +--------------+ 3 8c00 0000 + * | Mem Reserved | + * +--------------+ 3 8e00 0000 + * | PCI Bridge | + * +--------------+ 3 8f00 0000 + * | Expansion IO | + * +--------------+ 3 9000 0000 + * + * + */ +#define T2_CPU0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x380000000L) +#define T2_CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L) +#define T2_CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L) +#define T2_CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L) + +#define T2_CPUn_BASE(n) (T2_CPU0_BASE + (((n)&3) * 0x001000000L)) + +#define T2_MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L) +#define T2_MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L) +#define T2_MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L) +#define T2_MEM3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L) + + +/* + * Sable CPU Module CSRS + * + * These are CSRs for hardware other than the CPU chip on the CPU module. + * The CPU module has Backup Cache control logic, Cbus control logic, and + * interrupt control logic on it. There is a duplicate tag store to speed + * up maintaining cache coherency. + */ + +struct sable_cpu_csr { + unsigned long bcc; long fill_00[3]; /* Backup Cache Control */ + unsigned long bcce; long fill_01[3]; /* Backup Cache Correctable Error */ + unsigned long bccea; long fill_02[3]; /* B-Cache Corr Err Address Latch */ + unsigned long bcue; long fill_03[3]; /* B-Cache Uncorrectable Error */ + unsigned long bcuea; long fill_04[3]; /* B-Cache Uncorr Err Addr Latch */ + unsigned long dter; long fill_05[3]; /* Duplicate Tag Error */ + unsigned long cbctl; long fill_06[3]; /* CBus Control */ + unsigned long cbe; long fill_07[3]; /* CBus Error */ + unsigned long cbeal; long fill_08[3]; /* CBus Error Addr Latch low */ + unsigned long cbeah; long fill_09[3]; /* CBus Error Addr Latch high */ + unsigned long pmbx; long fill_10[3]; /* Processor Mailbox */ + unsigned long ipir; long fill_11[3]; /* Inter-Processor Int Request */ + unsigned long sic; long fill_12[3]; /* System Interrupt Clear */ + unsigned long adlk; long fill_13[3]; /* Address Lock (LDxL/STxC) */ + unsigned long madrl; long fill_14[3]; /* CBus Miss Address */ + unsigned long rev; long fill_15[3]; /* CMIC Revision */ +}; + +/* + * Data structure for handling T2 machine checks: + */ +struct el_t2_frame_header { + unsigned int elcf_fid; /* Frame ID (from above) */ + unsigned int elcf_size; /* Size of frame in bytes */ +}; + +struct el_t2_procdata_mcheck { + unsigned long elfmc_paltemp[32]; /* PAL TEMP REGS. */ + /* EV4-specific fields */ + unsigned long elfmc_exc_addr; /* Addr of excepting insn. */ + unsigned long elfmc_exc_sum; /* Summary of arith traps. */ + unsigned long elfmc_exc_mask; /* Exception mask (from exc_sum). */ + unsigned long elfmc_iccsr; /* IBox hardware enables. */ + unsigned long elfmc_pal_base; /* Base address for PALcode. */ + unsigned long elfmc_hier; /* Hardware Interrupt Enable. */ + unsigned long elfmc_hirr; /* Hardware Interrupt Request. */ + unsigned long elfmc_mm_csr; /* D-stream fault info. */ + unsigned long elfmc_dc_stat; /* D-cache status (ECC/Parity Err). */ + unsigned long elfmc_dc_addr; /* EV3 Phys Addr for ECC/DPERR. */ + unsigned long elfmc_abox_ctl; /* ABox Control Register. */ + unsigned long elfmc_biu_stat; /* BIU Status. */ + unsigned long elfmc_biu_addr; /* BUI Address. */ + unsigned long elfmc_biu_ctl; /* BIU Control. */ + unsigned long elfmc_fill_syndrome; /* For correcting ECC errors. */ + unsigned long elfmc_fill_addr;/* Cache block which was being read. */ + unsigned long elfmc_va; /* Effective VA of fault or miss. */ + unsigned long elfmc_bc_tag; /* Backup Cache Tag Probe Results. */ +}; + +/* + * Sable processor specific Machine Check Data segment. + */ + +struct el_t2_logout_header { + unsigned int elfl_size; /* size in bytes of logout area. */ + unsigned int elfl_sbz1:31; /* Should be zero. */ + unsigned int elfl_retry:1; /* Retry flag. */ + unsigned int elfl_procoffset; /* Processor-specific offset. */ + unsigned int elfl_sysoffset; /* Offset of system-specific. */ + unsigned int elfl_error_type; /* PAL error type code. */ + unsigned int elfl_frame_rev; /* PAL Frame revision. */ +}; +struct el_t2_sysdata_mcheck { + unsigned long elcmc_bcc; /* CSR 0 */ + unsigned long elcmc_bcce; /* CSR 1 */ + unsigned long elcmc_bccea; /* CSR 2 */ + unsigned long elcmc_bcue; /* CSR 3 */ + unsigned long elcmc_bcuea; /* CSR 4 */ + unsigned long elcmc_dter; /* CSR 5 */ + unsigned long elcmc_cbctl; /* CSR 6 */ + unsigned long elcmc_cbe; /* CSR 7 */ + unsigned long elcmc_cbeal; /* CSR 8 */ + unsigned long elcmc_cbeah; /* CSR 9 */ + unsigned long elcmc_pmbx; /* CSR 10 */ + unsigned long elcmc_ipir; /* CSR 11 */ + unsigned long elcmc_sic; /* CSR 12 */ + unsigned long elcmc_adlk; /* CSR 13 */ + unsigned long elcmc_madrl; /* CSR 14 */ + unsigned long elcmc_crrev4; /* CSR 15 */ +}; + +/* + * Sable memory error frame - sable pfms section 3.42 + */ +struct el_t2_data_memory { + struct el_t2_frame_header elcm_hdr; /* ID$MEM-FERR = 0x08 */ + unsigned int elcm_module; /* Module id. */ + unsigned int elcm_res04; /* Reserved. */ + unsigned long elcm_merr; /* CSR0: Error Reg 1. */ + unsigned long elcm_mcmd1; /* CSR1: Command Trap 1. */ + unsigned long elcm_mcmd2; /* CSR2: Command Trap 2. */ + unsigned long elcm_mconf; /* CSR3: Configuration. */ + unsigned long elcm_medc1; /* CSR4: EDC Status 1. */ + unsigned long elcm_medc2; /* CSR5: EDC Status 2. */ + unsigned long elcm_medcc; /* CSR6: EDC Control. */ + unsigned long elcm_msctl; /* CSR7: Stream Buffer Control. */ + unsigned long elcm_mref; /* CSR8: Refresh Control. */ + unsigned long elcm_filter; /* CSR9: CRD Filter Control. */ +}; + + +/* + * Sable other CPU error frame - sable pfms section 3.43 + */ +struct el_t2_data_other_cpu { + short elco_cpuid; /* CPU ID */ + short elco_res02[3]; + unsigned long elco_bcc; /* CSR 0 */ + unsigned long elco_bcce; /* CSR 1 */ + unsigned long elco_bccea; /* CSR 2 */ + unsigned long elco_bcue; /* CSR 3 */ + unsigned long elco_bcuea; /* CSR 4 */ + unsigned long elco_dter; /* CSR 5 */ + unsigned long elco_cbctl; /* CSR 6 */ + unsigned long elco_cbe; /* CSR 7 */ + unsigned long elco_cbeal; /* CSR 8 */ + unsigned long elco_cbeah; /* CSR 9 */ + unsigned long elco_pmbx; /* CSR 10 */ + unsigned long elco_ipir; /* CSR 11 */ + unsigned long elco_sic; /* CSR 12 */ + unsigned long elco_adlk; /* CSR 13 */ + unsigned long elco_madrl; /* CSR 14 */ + unsigned long elco_crrev4; /* CSR 15 */ +}; + +/* + * Sable other CPU error frame - sable pfms section 3.44 + */ +struct el_t2_data_t2{ + struct el_t2_frame_header elct_hdr; /* ID$T2-FRAME */ + unsigned long elct_iocsr; /* IO Control and Status Register */ + unsigned long elct_cerr1; /* Cbus Error Register 1 */ + unsigned long elct_cerr2; /* Cbus Error Register 2 */ + unsigned long elct_cerr3; /* Cbus Error Register 3 */ + unsigned long elct_perr1; /* PCI Error Register 1 */ + unsigned long elct_perr2; /* PCI Error Register 2 */ + unsigned long elct_hae0_1; /* High Address Extension Register 1 */ + unsigned long elct_hae0_2; /* High Address Extension Register 2 */ + unsigned long elct_hbase; /* High Base Register */ + unsigned long elct_wbase1; /* Window Base Register 1 */ + unsigned long elct_wmask1; /* Window Mask Register 1 */ + unsigned long elct_tbase1; /* Translated Base Register 1 */ + unsigned long elct_wbase2; /* Window Base Register 2 */ + unsigned long elct_wmask2; /* Window Mask Register 2 */ + unsigned long elct_tbase2; /* Translated Base Register 2 */ + unsigned long elct_tdr0; /* TLB Data Register 0 */ + unsigned long elct_tdr1; /* TLB Data Register 1 */ + unsigned long elct_tdr2; /* TLB Data Register 2 */ + unsigned long elct_tdr3; /* TLB Data Register 3 */ + unsigned long elct_tdr4; /* TLB Data Register 4 */ + unsigned long elct_tdr5; /* TLB Data Register 5 */ + unsigned long elct_tdr6; /* TLB Data Register 6 */ + unsigned long elct_tdr7; /* TLB Data Register 7 */ +}; + +/* + * Sable error log data structure - sable pfms section 3.40 + */ +struct el_t2_data_corrected { + unsigned long elcpb_biu_stat; + unsigned long elcpb_biu_addr; + unsigned long elcpb_biu_ctl; + unsigned long elcpb_fill_syndrome; + unsigned long elcpb_fill_addr; + unsigned long elcpb_bc_tag; +}; + +/* + * Sable error log data structure + * Note there are 4 memory slots on sable (see t2.h) + */ +struct el_t2_frame_mcheck { + struct el_t2_frame_header elfmc_header; /* ID$P-FRAME_MCHECK */ + struct el_t2_logout_header elfmc_hdr; + struct el_t2_procdata_mcheck elfmc_procdata; + struct el_t2_sysdata_mcheck elfmc_sysdata; + struct el_t2_data_t2 elfmc_t2data; + struct el_t2_data_memory elfmc_memdata[4]; + struct el_t2_frame_header elfmc_footer; /* empty */ +}; + + +/* + * Sable error log data structures on memory errors + */ +struct el_t2_frame_corrected { + struct el_t2_frame_header elfcc_header; /* ID$P-BC-COR */ + struct el_t2_logout_header elfcc_hdr; + struct el_t2_data_corrected elfcc_procdata; +/* struct el_t2_data_t2 elfcc_t2data; */ +/* struct el_t2_data_memory elfcc_memdata[4]; */ + struct el_t2_frame_header elfcc_footer; /* empty */ +}; + + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions: + * + * T2 (the core logic PCI/memory support chipset for the SABLE + * series of processors uses a sparse address mapping scheme to + * get at PCI memory and I/O. + */ + +#define vip volatile int * +#define vuip volatile unsigned int * + +extern inline u8 t2_inb(unsigned long addr) +{ + long result = *(vip) ((addr << 5) + T2_IO + 0x00); + return __kernel_extbl(result, addr & 3); +} + +extern inline void t2_outb(u8 b, unsigned long addr) +{ + unsigned long w; + + w = __kernel_insbl(b, addr & 3); + *(vuip) ((addr << 5) + T2_IO + 0x00) = w; + mb(); +} + +extern inline u16 t2_inw(unsigned long addr) +{ + long result = *(vip) ((addr << 5) + T2_IO + 0x08); + return __kernel_extwl(result, addr & 3); +} + +extern inline void t2_outw(u16 b, unsigned long addr) +{ + unsigned long w; + + w = __kernel_inswl(b, addr & 3); + *(vuip) ((addr << 5) + T2_IO + 0x08) = w; + mb(); +} + +extern inline u32 t2_inl(unsigned long addr) +{ + return *(vuip) ((addr << 5) + T2_IO + 0x18); +} + +extern inline void t2_outl(u32 b, unsigned long addr) +{ + *(vuip) ((addr << 5) + T2_IO + 0x18) = b; + mb(); +} + + +/* + * Memory functions. + * + * For reading and writing 8 and 16 bit quantities we need to + * go through one of the three sparse address mapping regions + * and use the HAE_MEM CSR to provide some bits of the address. + * The following few routines use only sparse address region 1 + * which gives 1Gbyte of accessible space which relates exactly + * to the amount of PCI memory mapping *into* system address space. + * See p 6-17 of the specification but it looks something like this: + * + * 21164 Address: + * + * 3 2 1 + * 9876543210987654321098765432109876543210 + * 1ZZZZ0.PCI.QW.Address............BBLL + * + * ZZ = SBZ + * BB = Byte offset + * LL = Transfer length + * + * PCI Address: + * + * 3 2 1 + * 10987654321098765432109876543210 + * HHH....PCI.QW.Address........ 00 + * + * HHH = 31:29 HAE_MEM CSR + * + */ + +#define t2_set_hae { \ + msb = addr >> 27; \ + addr &= T2_MEM_R1_MASK; \ + set_hae(msb); \ +} + +extern spinlock_t t2_hae_lock; + +/* + * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since + * they may be called directly, rather than through the + * ioreadNN/iowriteNN routines. + */ + +__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; + unsigned long result, msb; + unsigned long flags; + spin_lock_irqsave(&t2_hae_lock, flags); + + t2_set_hae; + + result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); + spin_unlock_irqrestore(&t2_hae_lock, flags); + return __kernel_extbl(result, addr & 3); +} + +__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; + unsigned long result, msb; + unsigned long flags; + spin_lock_irqsave(&t2_hae_lock, flags); + + t2_set_hae; + + result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); + spin_unlock_irqrestore(&t2_hae_lock, flags); + return __kernel_extwl(result, addr & 3); +} + +/* + * On SABLE with T2, we must use SPARSE memory even for 32-bit access, + * because we cannot access all of DENSE without changing its HAE. + */ +__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; + unsigned long result, msb; + unsigned long flags; + spin_lock_irqsave(&t2_hae_lock, flags); + + t2_set_hae; + + result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); + spin_unlock_irqrestore(&t2_hae_lock, flags); + return result & 0xffffffffUL; +} + +__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; + unsigned long r0, r1, work, msb; + unsigned long flags; + spin_lock_irqsave(&t2_hae_lock, flags); + + t2_set_hae; + + work = (addr << 5) + T2_SPARSE_MEM + 0x18; + r0 = *(vuip)(work); + r1 = *(vuip)(work + (4 << 5)); + spin_unlock_irqrestore(&t2_hae_lock, flags); + return r1 << 32 | r0; +} + +__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; + unsigned long msb, w; + unsigned long flags; + spin_lock_irqsave(&t2_hae_lock, flags); + + t2_set_hae; + + w = __kernel_insbl(b, addr & 3); + *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; + spin_unlock_irqrestore(&t2_hae_lock, flags); +} + +__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; + unsigned long msb, w; + unsigned long flags; + spin_lock_irqsave(&t2_hae_lock, flags); + + t2_set_hae; + + w = __kernel_inswl(b, addr & 3); + *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; + spin_unlock_irqrestore(&t2_hae_lock, flags); +} + +/* + * On SABLE with T2, we must use SPARSE memory even for 32-bit access, + * because we cannot access all of DENSE without changing its HAE. + */ +__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; + unsigned long msb; + unsigned long flags; + spin_lock_irqsave(&t2_hae_lock, flags); + + t2_set_hae; + + *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; + spin_unlock_irqrestore(&t2_hae_lock, flags); +} + +__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; + unsigned long msb, work; + unsigned long flags; + spin_lock_irqsave(&t2_hae_lock, flags); + + t2_set_hae; + + work = (addr << 5) + T2_SPARSE_MEM + 0x18; + *(vuip)work = b; + *(vuip)(work + (4 << 5)) = b >> 32; + spin_unlock_irqrestore(&t2_hae_lock, flags); +} + +__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) +{ + return (void __iomem *)(addr + T2_IO); +} + +__EXTERN_INLINE void __iomem *t2_ioremap(unsigned long addr, + unsigned long size) +{ + return (void __iomem *)(addr + T2_DENSE_MEM); +} + +__EXTERN_INLINE int t2_is_ioaddr(unsigned long addr) +{ + return (long)addr >= 0; +} + +__EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr >= T2_DENSE_MEM; +} + +/* New-style ioread interface. The mmio routines are so ugly for T2 that + it doesn't make sense to merge the pio and mmio routines. */ + +#define IOPORT(OS, NS) \ +__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \ +{ \ + if (t2_is_mmio(xaddr)) \ + return t2_read##OS(xaddr); \ + else \ + return t2_in##OS((unsigned long)xaddr - T2_IO); \ +} \ +__EXTERN_INLINE void t2_iowrite##NS(u##NS b, void __iomem *xaddr) \ +{ \ + if (t2_is_mmio(xaddr)) \ + t2_write##OS(b, xaddr); \ + else \ + t2_out##OS(b, (unsigned long)xaddr - T2_IO); \ +} + +IOPORT(b, 8) +IOPORT(w, 16) +IOPORT(l, 32) + +#undef IOPORT + +#undef vip +#undef vuip + +#undef __IO_PREFIX +#define __IO_PREFIX t2 +#define t2_trivial_rw_bw 0 +#define t2_trivial_rw_lq 0 +#define t2_trivial_io_bw 0 +#define t2_trivial_io_lq 0 +#define t2_trivial_iounmap 1 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_T2__H__ */ diff --git a/arch/alpha/include/asm/core_titan.h b/arch/alpha/include/asm/core_titan.h new file mode 100644 index 00000000000..a17f6f33b68 --- /dev/null +++ b/arch/alpha/include/asm/core_titan.h @@ -0,0 +1,410 @@ +#ifndef __ALPHA_TITAN__H__ +#define __ALPHA_TITAN__H__ + +#include +#include +#include + +/* + * TITAN is the internal names for a core logic chipset which provides + * memory controller and PCI/AGP access for 21264 based systems. + * + * This file is based on: + * + * Titan Chipset Engineering Specification + * Revision 0.12 + * 13 July 1999 + * + */ + +/* XXX: Do we need to conditionalize on this? */ +#ifdef USE_48_BIT_KSEG +#define TI_BIAS 0x80000000000UL +#else +#define TI_BIAS 0x10000000000UL +#endif + +/* + * CChip, DChip, and PChip registers + */ + +typedef struct { + volatile unsigned long csr __attribute__((aligned(64))); +} titan_64; + +typedef struct { + titan_64 csc; + titan_64 mtr; + titan_64 misc; + titan_64 mpd; + titan_64 aar0; + titan_64 aar1; + titan_64 aar2; + titan_64 aar3; + titan_64 dim0; + titan_64 dim1; + titan_64 dir0; + titan_64 dir1; + titan_64 drir; + titan_64 prben; + titan_64 iic0; + titan_64 iic1; + titan_64 mpr0; + titan_64 mpr1; + titan_64 mpr2; + titan_64 mpr3; + titan_64 rsvd[2]; + titan_64 ttr; + titan_64 tdr; + titan_64 dim2; + titan_64 dim3; + titan_64 dir2; + titan_64 dir3; + titan_64 iic2; + titan_64 iic3; + titan_64 pwr; + titan_64 reserved[17]; + titan_64 cmonctla; + titan_64 cmonctlb; + titan_64 cmoncnt01; + titan_64 cmoncnt23; + titan_64 cpen; +} titan_cchip; + +typedef struct { + titan_64 dsc; + titan_64 str; + titan_64 drev; + titan_64 dsc2; +} titan_dchip; + +typedef struct { + titan_64 wsba[4]; + titan_64 wsm[4]; + titan_64 tba[4]; + titan_64 pctl; + titan_64 plat; + titan_64 reserved0[2]; + union { + struct { + titan_64 serror; + titan_64 serren; + titan_64 serrset; + titan_64 reserved0; + titan_64 gperror; + titan_64 gperren; + titan_64 gperrset; + titan_64 reserved1; + titan_64 gtlbiv; + titan_64 gtlbia; + titan_64 reserved2[2]; + titan_64 sctl; + titan_64 reserved3[3]; + } g; + struct { + titan_64 agperror; + titan_64 agperren; + titan_64 agperrset; + titan_64 agplastwr; + titan_64 aperror; + titan_64 aperren; + titan_64 aperrset; + titan_64 reserved0; + titan_64 atlbiv; + titan_64 atlbia; + titan_64 reserved1[6]; + } a; + } port_specific; + titan_64 sprst; + titan_64 reserved1[31]; +} titan_pachip_port; + +typedef struct { + titan_pachip_port g_port; + titan_pachip_port a_port; +} titan_pachip; + +#define TITAN_cchip ((titan_cchip *)(IDENT_ADDR+TI_BIAS+0x1A0000000UL)) +#define TITAN_dchip ((titan_dchip *)(IDENT_ADDR+TI_BIAS+0x1B0000800UL)) +#define TITAN_pachip0 ((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x180000000UL)) +#define TITAN_pachip1 ((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x380000000UL)) +extern unsigned TITAN_agp; +extern int TITAN_bootcpu; + +/* + * TITAN PA-chip Window Space Base Address register. + * (WSBA[0-2]) + */ +#define wsba_m_ena 0x1 +#define wsba_m_sg 0x2 +#define wsba_m_addr 0xFFF00000 +#define wmask_k_sz1gb 0x3FF00000 +union TPAchipWSBA { + struct { + unsigned wsba_v_ena : 1; + unsigned wsba_v_sg : 1; + unsigned wsba_v_rsvd1 : 18; + unsigned wsba_v_addr : 12; + unsigned wsba_v_rsvd2 : 32; + } wsba_r_bits; + int wsba_q_whole [2]; +}; + +/* + * TITAN PA-chip Control Register + * This definition covers both the G-Port GPCTL and the A-PORT APCTL. + * Bits <51:0> are the same in both cases. APCTL<63:52> are only + * applicable to AGP. + */ +#define pctl_m_fbtb 0x00000001 +#define pctl_m_thdis 0x00000002 +#define pctl_m_chaindis 0x00000004 +#define pctl_m_tgtlat 0x00000018 +#define pctl_m_hole 0x00000020 +#define pctl_m_mwin 0x00000040 +#define pctl_m_arbena 0x00000080 +#define pctl_m_prigrp 0x0000FF00 +#define pctl_m_ppri 0x00010000 +#define pctl_m_pcispd66 0x00020000 +#define pctl_m_cngstlt 0x003C0000 +#define pctl_m_ptpdesten 0x3FC00000 +#define pctl_m_dpcen 0x40000000 +#define pctl_m_apcen 0x0000000080000000UL +#define pctl_m_dcrtv 0x0000000300000000UL +#define pctl_m_en_stepping 0x0000000400000000UL +#define apctl_m_rsvd1 0x000FFFF800000000UL +#define apctl_m_agp_rate 0x0030000000000000UL +#define apctl_m_agp_sba_en 0x0040000000000000UL +#define apctl_m_agp_en 0x0080000000000000UL +#define apctl_m_rsvd2 0x0100000000000000UL +#define apctl_m_agp_present 0x0200000000000000UL +#define apctl_agp_hp_rd 0x1C00000000000000UL +#define apctl_agp_lp_rd 0xE000000000000000UL +#define gpctl_m_rsvd 0xFFFFFFF800000000UL +union TPAchipPCTL { + struct { + unsigned pctl_v_fbtb : 1; /* A/G [0] */ + unsigned pctl_v_thdis : 1; /* A/G [1] */ + unsigned pctl_v_chaindis : 1; /* A/G [2] */ + unsigned pctl_v_tgtlat : 2; /* A/G [4:3] */ + unsigned pctl_v_hole : 1; /* A/G [5] */ + unsigned pctl_v_mwin : 1; /* A/G [6] */ + unsigned pctl_v_arbena : 1; /* A/G [7] */ + unsigned pctl_v_prigrp : 8; /* A/G [15:8] */ + unsigned pctl_v_ppri : 1; /* A/G [16] */ + unsigned pctl_v_pcispd66 : 1; /* A/G [17] */ + unsigned pctl_v_cngstlt : 4; /* A/G [21:18] */ + unsigned pctl_v_ptpdesten : 8; /* A/G [29:22] */ + unsigned pctl_v_dpcen : 1; /* A/G [30] */ + unsigned pctl_v_apcen : 1; /* A/G [31] */ + unsigned pctl_v_dcrtv : 2; /* A/G [33:32] */ + unsigned pctl_v_en_stepping :1; /* A/G [34] */ + unsigned apctl_v_rsvd1 : 17; /* A [51:35] */ + unsigned apctl_v_agp_rate : 2; /* A [53:52] */ + unsigned apctl_v_agp_sba_en : 1; /* A [54] */ + unsigned apctl_v_agp_en : 1; /* A [55] */ + unsigned apctl_v_rsvd2 : 1; /* A [56] */ + unsigned apctl_v_agp_present : 1; /* A [57] */ + unsigned apctl_v_agp_hp_rd : 3; /* A [60:58] */ + unsigned apctl_v_agp_lp_rd : 3; /* A [63:61] */ + } pctl_r_bits; + unsigned int pctl_l_whole [2]; + unsigned long pctl_q_whole; +}; + +/* + * SERROR / SERREN / SERRSET + */ +union TPAchipSERR { + struct { + unsigned serr_v_lost_uecc : 1; /* [0] */ + unsigned serr_v_uecc : 1; /* [1] */ + unsigned serr_v_cre : 1; /* [2] */ + unsigned serr_v_nxio : 1; /* [3] */ + unsigned serr_v_lost_cre : 1; /* [4] */ + unsigned serr_v_rsvd0 : 10; /* [14:5] */ + unsigned serr_v_addr : 32; /* [46:15] */ + unsigned serr_v_rsvd1 : 5; /* [51:47] */ + unsigned serr_v_source : 2; /* [53:52] */ + unsigned serr_v_cmd : 2; /* [55:54] */ + unsigned serr_v_syn : 8; /* [63:56] */ + } serr_r_bits; + unsigned int serr_l_whole[2]; + unsigned long serr_q_whole; +}; + +/* + * GPERROR / APERROR / GPERREN / APERREN / GPERRSET / APERRSET + */ +union TPAchipPERR { + struct { + unsigned long perr_v_lost : 1; /* [0] */ + unsigned long perr_v_serr : 1; /* [1] */ + unsigned long perr_v_perr : 1; /* [2] */ + unsigned long perr_v_dcrto : 1; /* [3] */ + unsigned long perr_v_sge : 1; /* [4] */ + unsigned long perr_v_ape : 1; /* [5] */ + unsigned long perr_v_ta : 1; /* [6] */ + unsigned long perr_v_dpe : 1; /* [7] */ + unsigned long perr_v_nds : 1; /* [8] */ + unsigned long perr_v_iptpr : 1; /* [9] */ + unsigned long perr_v_iptpw : 1; /* [10] */ + unsigned long perr_v_rsvd0 : 3; /* [13:11] */ + unsigned long perr_v_addr : 33; /* [46:14] */ + unsigned long perr_v_dac : 1; /* [47] */ + unsigned long perr_v_mwin : 1; /* [48] */ + unsigned long perr_v_rsvd1 : 3; /* [51:49] */ + unsigned long perr_v_cmd : 4; /* [55:52] */ + unsigned long perr_v_rsvd2 : 8; /* [63:56] */ + } perr_r_bits; + unsigned int perr_l_whole[2]; + unsigned long perr_q_whole; +}; + +/* + * AGPERROR / AGPERREN / AGPERRSET + */ +union TPAchipAGPERR { + struct { + unsigned agperr_v_lost : 1; /* [0] */ + unsigned agperr_v_lpqfull : 1; /* [1] */ + unsigned apgerr_v_hpqfull : 1; /* [2] */ + unsigned agperr_v_rescmd : 1; /* [3] */ + unsigned agperr_v_ipte : 1; /* [4] */ + unsigned agperr_v_ptp : 1; /* [5] */ + unsigned agperr_v_nowindow : 1; /* [6] */ + unsigned agperr_v_rsvd0 : 8; /* [14:7] */ + unsigned agperr_v_addr : 32; /* [46:15] */ + unsigned agperr_v_rsvd1 : 1; /* [47] */ + unsigned agperr_v_dac : 1; /* [48] */ + unsigned agperr_v_mwin : 1; /* [49] */ + unsigned agperr_v_cmd : 3; /* [52:50] */ + unsigned agperr_v_length : 6; /* [58:53] */ + unsigned agperr_v_fence : 1; /* [59] */ + unsigned agperr_v_rsvd2 : 4; /* [63:60] */ + } agperr_r_bits; + unsigned int agperr_l_whole[2]; + unsigned long agperr_q_whole; +}; +/* + * Memory spaces: + * Hose numbers are assigned as follows: + * 0 - pachip 0 / G Port + * 1 - pachip 1 / G Port + * 2 - pachip 0 / A Port + * 3 - pachip 1 / A Port + */ +#define TITAN_HOSE_SHIFT (33) +#define TITAN_HOSE(h) (((unsigned long)(h)) << TITAN_HOSE_SHIFT) +#define TITAN_BASE (IDENT_ADDR + TI_BIAS) +#define TITAN_MEM(h) (TITAN_BASE+TITAN_HOSE(h)+0x000000000UL) +#define _TITAN_IACK_SC(h) (TITAN_BASE+TITAN_HOSE(h)+0x1F8000000UL) +#define TITAN_IO(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FC000000UL) +#define TITAN_CONF(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FE000000UL) + +#define TITAN_HOSE_MASK TITAN_HOSE(3) +#define TITAN_IACK_SC _TITAN_IACK_SC(0) /* hack! */ + +/* + * The canonical non-remaped I/O and MEM addresses have these values + * subtracted out. This is arranged so that folks manipulating ISA + * devices can use their familiar numbers and have them map to bus 0. + */ + +#define TITAN_IO_BIAS TITAN_IO(0) +#define TITAN_MEM_BIAS TITAN_MEM(0) + +/* The IO address space is larger than 0xffff */ +#define TITAN_IO_SPACE (TITAN_CONF(0) - TITAN_IO(0)) + +/* TIG Space */ +#define TITAN_TIG_SPACE (TITAN_BASE + 0x100000000UL) + +/* Offset between ram physical addresses and pci64 DAC bus addresses. */ +/* ??? Just a guess. Ought to confirm it hasn't been moved. */ +#define TITAN_DAC_OFFSET (1UL << 40) + +/* + * Data structure for handling TITAN machine checks: + */ +#define SCB_Q_SYSERR 0x620 +#define SCB_Q_PROCERR 0x630 +#define SCB_Q_SYSMCHK 0x660 +#define SCB_Q_PROCMCHK 0x670 +#define SCB_Q_SYSEVENT 0x680 /* environmental / system management */ +struct el_TITAN_sysdata_mcheck { + u64 summary; /* 0x00 */ + u64 c_dirx; /* 0x08 */ + u64 c_misc; /* 0x10 */ + u64 p0_serror; /* 0x18 */ + u64 p0_gperror; /* 0x20 */ + u64 p0_aperror; /* 0x28 */ + u64 p0_agperror;/* 0x30 */ + u64 p1_serror; /* 0x38 */ + u64 p1_gperror; /* 0x40 */ + u64 p1_aperror; /* 0x48 */ + u64 p1_agperror;/* 0x50 */ +}; + +/* + * System area for a privateer 680 environmental/system management mcheck + */ +struct el_PRIVATEER_envdata_mcheck { + u64 summary; /* 0x00 */ + u64 c_dirx; /* 0x08 */ + u64 smir; /* 0x10 */ + u64 cpuir; /* 0x18 */ + u64 psir; /* 0x20 */ + u64 fault; /* 0x28 */ + u64 sys_doors; /* 0x30 */ + u64 temp_warn; /* 0x38 */ + u64 fan_ctrl; /* 0x40 */ + u64 code; /* 0x48 */ + u64 reserved; /* 0x50 */ +}; + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions: + * + * TITAN, a 21??? PCI/memory support chipset for the EV6 (21264) + * can only use linear accesses to get at PCI/AGP memory and I/O spaces. + */ + +/* + * Memory functions. all accesses are done through linear space. + */ +extern void __iomem *titan_ioportmap(unsigned long addr); +extern void __iomem *titan_ioremap(unsigned long addr, unsigned long size); +extern void titan_iounmap(volatile void __iomem *addr); + +__EXTERN_INLINE int titan_is_ioaddr(unsigned long addr) +{ + return addr >= TITAN_BASE; +} + +extern int titan_is_mmio(const volatile void __iomem *addr); + +#undef __IO_PREFIX +#define __IO_PREFIX titan +#define titan_trivial_rw_bw 1 +#define titan_trivial_rw_lq 1 +#define titan_trivial_io_bw 1 +#define titan_trivial_io_lq 1 +#define titan_trivial_iounmap 0 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_TITAN__H__ */ diff --git a/arch/alpha/include/asm/core_tsunami.h b/arch/alpha/include/asm/core_tsunami.h new file mode 100644 index 00000000000..58d4fe48742 --- /dev/null +++ b/arch/alpha/include/asm/core_tsunami.h @@ -0,0 +1,335 @@ +#ifndef __ALPHA_TSUNAMI__H__ +#define __ALPHA_TSUNAMI__H__ + +#include +#include +#include + +/* + * TSUNAMI/TYPHOON are the internal names for the core logic chipset which + * provides memory controller and PCI access for the 21264 based systems. + * + * This file is based on: + * + * Tsunami System Programmers Manual + * Preliminary, Chapters 2-5 + * + */ + +/* XXX: Do we need to conditionalize on this? */ +#ifdef USE_48_BIT_KSEG +#define TS_BIAS 0x80000000000UL +#else +#define TS_BIAS 0x10000000000UL +#endif + +/* + * CChip, DChip, and PChip registers + */ + +typedef struct { + volatile unsigned long csr __attribute__((aligned(64))); +} tsunami_64; + +typedef struct { + tsunami_64 csc; + tsunami_64 mtr; + tsunami_64 misc; + tsunami_64 mpd; + tsunami_64 aar0; + tsunami_64 aar1; + tsunami_64 aar2; + tsunami_64 aar3; + tsunami_64 dim0; + tsunami_64 dim1; + tsunami_64 dir0; + tsunami_64 dir1; + tsunami_64 drir; + tsunami_64 prben; + tsunami_64 iic; /* a.k.a. iic0 */ + tsunami_64 wdr; /* a.k.a. iic1 */ + tsunami_64 mpr0; + tsunami_64 mpr1; + tsunami_64 mpr2; + tsunami_64 mpr3; + tsunami_64 mctl; + tsunami_64 __pad1; + tsunami_64 ttr; + tsunami_64 tdr; + tsunami_64 dim2; + tsunami_64 dim3; + tsunami_64 dir2; + tsunami_64 dir3; + tsunami_64 iic2; + tsunami_64 iic3; +} tsunami_cchip; + +typedef struct { + tsunami_64 dsc; + tsunami_64 str; + tsunami_64 drev; +} tsunami_dchip; + +typedef struct { + tsunami_64 wsba[4]; + tsunami_64 wsm[4]; + tsunami_64 tba[4]; + tsunami_64 pctl; + tsunami_64 plat; + tsunami_64 reserved; + tsunami_64 perror; + tsunami_64 perrmask; + tsunami_64 perrset; + tsunami_64 tlbiv; + tsunami_64 tlbia; + tsunami_64 pmonctl; + tsunami_64 pmoncnt; +} tsunami_pchip; + +#define TSUNAMI_cchip ((tsunami_cchip *)(IDENT_ADDR+TS_BIAS+0x1A0000000UL)) +#define TSUNAMI_dchip ((tsunami_dchip *)(IDENT_ADDR+TS_BIAS+0x1B0000800UL)) +#define TSUNAMI_pchip0 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x180000000UL)) +#define TSUNAMI_pchip1 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x380000000UL)) +extern int TSUNAMI_bootcpu; + +/* + * TSUNAMI Pchip Error register. + */ + +#define perror_m_lost 0x1 +#define perror_m_serr 0x2 +#define perror_m_perr 0x4 +#define perror_m_dcrto 0x8 +#define perror_m_sge 0x10 +#define perror_m_ape 0x20 +#define perror_m_ta 0x40 +#define perror_m_rdpe 0x80 +#define perror_m_nds 0x100 +#define perror_m_rto 0x200 +#define perror_m_uecc 0x400 +#define perror_m_cre 0x800 +#define perror_m_addrl 0xFFFFFFFF0000UL +#define perror_m_addrh 0x7000000000000UL +#define perror_m_cmd 0xF0000000000000UL +#define perror_m_syn 0xFF00000000000000UL +union TPchipPERROR { + struct { + unsigned int perror_v_lost : 1; + unsigned perror_v_serr : 1; + unsigned perror_v_perr : 1; + unsigned perror_v_dcrto : 1; + unsigned perror_v_sge : 1; + unsigned perror_v_ape : 1; + unsigned perror_v_ta : 1; + unsigned perror_v_rdpe : 1; + unsigned perror_v_nds : 1; + unsigned perror_v_rto : 1; + unsigned perror_v_uecc : 1; + unsigned perror_v_cre : 1; + unsigned perror_v_rsvd1 : 4; + unsigned perror_v_addrl : 32; + unsigned perror_v_addrh : 3; + unsigned perror_v_rsvd2 : 1; + unsigned perror_v_cmd : 4; + unsigned perror_v_syn : 8; + } perror_r_bits; + int perror_q_whole [2]; +}; + +/* + * TSUNAMI Pchip Window Space Base Address register. + */ +#define wsba_m_ena 0x1 +#define wsba_m_sg 0x2 +#define wsba_m_ptp 0x4 +#define wsba_m_addr 0xFFF00000 +#define wmask_k_sz1gb 0x3FF00000 +union TPchipWSBA { + struct { + unsigned wsba_v_ena : 1; + unsigned wsba_v_sg : 1; + unsigned wsba_v_ptp : 1; + unsigned wsba_v_rsvd1 : 17; + unsigned wsba_v_addr : 12; + unsigned wsba_v_rsvd2 : 32; + } wsba_r_bits; + int wsba_q_whole [2]; +}; + +/* + * TSUNAMI Pchip Control Register + */ +#define pctl_m_fdsc 0x1 +#define pctl_m_fbtb 0x2 +#define pctl_m_thdis 0x4 +#define pctl_m_chaindis 0x8 +#define pctl_m_tgtlat 0x10 +#define pctl_m_hole 0x20 +#define pctl_m_mwin 0x40 +#define pctl_m_arbena 0x80 +#define pctl_m_prigrp 0x7F00 +#define pctl_m_ppri 0x8000 +#define pctl_m_rsvd1 0x30000 +#define pctl_m_eccen 0x40000 +#define pctl_m_padm 0x80000 +#define pctl_m_cdqmax 0xF00000 +#define pctl_m_rev 0xFF000000 +#define pctl_m_crqmax 0xF00000000UL +#define pctl_m_ptpmax 0xF000000000UL +#define pctl_m_pclkx 0x30000000000UL +#define pctl_m_fdsdis 0x40000000000UL +#define pctl_m_fdwdis 0x80000000000UL +#define pctl_m_ptevrfy 0x100000000000UL +#define pctl_m_rpp 0x200000000000UL +#define pctl_m_pid 0xC00000000000UL +#define pctl_m_rsvd2 0xFFFF000000000000UL + +union TPchipPCTL { + struct { + unsigned pctl_v_fdsc : 1; + unsigned pctl_v_fbtb : 1; + unsigned pctl_v_thdis : 1; + unsigned pctl_v_chaindis : 1; + unsigned pctl_v_tgtlat : 1; + unsigned pctl_v_hole : 1; + unsigned pctl_v_mwin : 1; + unsigned pctl_v_arbena : 1; + unsigned pctl_v_prigrp : 7; + unsigned pctl_v_ppri : 1; + unsigned pctl_v_rsvd1 : 2; + unsigned pctl_v_eccen : 1; + unsigned pctl_v_padm : 1; + unsigned pctl_v_cdqmax : 4; + unsigned pctl_v_rev : 8; + unsigned pctl_v_crqmax : 4; + unsigned pctl_v_ptpmax : 4; + unsigned pctl_v_pclkx : 2; + unsigned pctl_v_fdsdis : 1; + unsigned pctl_v_fdwdis : 1; + unsigned pctl_v_ptevrfy : 1; + unsigned pctl_v_rpp : 1; + unsigned pctl_v_pid : 2; + unsigned pctl_v_rsvd2 : 16; + } pctl_r_bits; + int pctl_q_whole [2]; +}; + +/* + * TSUNAMI Pchip Error Mask Register. + */ +#define perrmask_m_lost 0x1 +#define perrmask_m_serr 0x2 +#define perrmask_m_perr 0x4 +#define perrmask_m_dcrto 0x8 +#define perrmask_m_sge 0x10 +#define perrmask_m_ape 0x20 +#define perrmask_m_ta 0x40 +#define perrmask_m_rdpe 0x80 +#define perrmask_m_nds 0x100 +#define perrmask_m_rto 0x200 +#define perrmask_m_uecc 0x400 +#define perrmask_m_cre 0x800 +#define perrmask_m_rsvd 0xFFFFFFFFFFFFF000UL +union TPchipPERRMASK { + struct { + unsigned int perrmask_v_lost : 1; + unsigned perrmask_v_serr : 1; + unsigned perrmask_v_perr : 1; + unsigned perrmask_v_dcrto : 1; + unsigned perrmask_v_sge : 1; + unsigned perrmask_v_ape : 1; + unsigned perrmask_v_ta : 1; + unsigned perrmask_v_rdpe : 1; + unsigned perrmask_v_nds : 1; + unsigned perrmask_v_rto : 1; + unsigned perrmask_v_uecc : 1; + unsigned perrmask_v_cre : 1; + unsigned perrmask_v_rsvd1 : 20; + unsigned perrmask_v_rsvd2 : 32; + } perrmask_r_bits; + int perrmask_q_whole [2]; +}; + +/* + * Memory spaces: + */ +#define TSUNAMI_HOSE(h) (((unsigned long)(h)) << 33) +#define TSUNAMI_BASE (IDENT_ADDR + TS_BIAS) + +#define TSUNAMI_MEM(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x000000000UL) +#define _TSUNAMI_IACK_SC(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1F8000000UL) +#define TSUNAMI_IO(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FC000000UL) +#define TSUNAMI_CONF(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FE000000UL) + +#define TSUNAMI_IACK_SC _TSUNAMI_IACK_SC(0) /* hack! */ + + +/* + * The canonical non-remaped I/O and MEM addresses have these values + * subtracted out. This is arranged so that folks manipulating ISA + * devices can use their familiar numbers and have them map to bus 0. + */ + +#define TSUNAMI_IO_BIAS TSUNAMI_IO(0) +#define TSUNAMI_MEM_BIAS TSUNAMI_MEM(0) + +/* The IO address space is larger than 0xffff */ +#define TSUNAMI_IO_SPACE (TSUNAMI_CONF(0) - TSUNAMI_IO(0)) + +/* Offset between ram physical addresses and pci64 DAC bus addresses. */ +#define TSUNAMI_DAC_OFFSET (1UL << 40) + +/* + * Data structure for handling TSUNAMI machine checks: + */ +struct el_TSUNAMI_sysdata_mcheck { +}; + + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * I/O functions: + * + * TSUNAMI, the 21??? PCI/memory support chipset for the EV6 (21264) + * can only use linear accesses to get at PCI memory and I/O spaces. + */ + +/* + * Memory functions. all accesses are done through linear space. + */ +extern void __iomem *tsunami_ioportmap(unsigned long addr); +extern void __iomem *tsunami_ioremap(unsigned long addr, unsigned long size); +__EXTERN_INLINE int tsunami_is_ioaddr(unsigned long addr) +{ + return addr >= TSUNAMI_BASE; +} + +__EXTERN_INLINE int tsunami_is_mmio(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + return (addr & 0x100000000UL) == 0; +} + +#undef __IO_PREFIX +#define __IO_PREFIX tsunami +#define tsunami_trivial_rw_bw 1 +#define tsunami_trivial_rw_lq 1 +#define tsunami_trivial_io_bw 1 +#define tsunami_trivial_io_lq 1 +#define tsunami_trivial_iounmap 1 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_TSUNAMI__H__ */ diff --git a/arch/alpha/include/asm/core_wildfire.h b/arch/alpha/include/asm/core_wildfire.h new file mode 100644 index 00000000000..cd562f544ba --- /dev/null +++ b/arch/alpha/include/asm/core_wildfire.h @@ -0,0 +1,318 @@ +#ifndef __ALPHA_WILDFIRE__H__ +#define __ALPHA_WILDFIRE__H__ + +#include +#include + +#define WILDFIRE_MAX_QBB 8 /* more than 8 requires other mods */ +#define WILDFIRE_PCA_PER_QBB 4 +#define WILDFIRE_IRQ_PER_PCA 64 + +#define WILDFIRE_NR_IRQS \ + (WILDFIRE_MAX_QBB * WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) + +extern unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB]; +extern unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB]; +#define QBB_MAP_EMPTY 0xff + +extern unsigned long wildfire_hard_qbb_mask; +extern unsigned long wildfire_soft_qbb_mask; +extern unsigned long wildfire_gp_mask; +extern unsigned long wildfire_hs_mask; +extern unsigned long wildfire_iop_mask; +extern unsigned long wildfire_ior_mask; +extern unsigned long wildfire_pca_mask; +extern unsigned long wildfire_cpu_mask; +extern unsigned long wildfire_mem_mask; + +#define WILDFIRE_QBB_EXISTS(qbbno) (wildfire_soft_qbb_mask & (1 << (qbbno))) + +#define WILDFIRE_MEM_EXISTS(qbbno) (wildfire_mem_mask & (0xf << ((qbbno) << 2))) + +#define WILDFIRE_PCA_EXISTS(qbbno, pcano) \ + (wildfire_pca_mask & (1 << (((qbbno) << 2) + (pcano)))) + +typedef struct { + volatile unsigned long csr __attribute__((aligned(64))); +} wildfire_64; + +typedef struct { + volatile unsigned long csr __attribute__((aligned(256))); +} wildfire_256; + +typedef struct { + volatile unsigned long csr __attribute__((aligned(2048))); +} wildfire_2k; + +typedef struct { + wildfire_64 qsd_whami; + wildfire_64 qsd_rev; + wildfire_64 qsd_port_present; + wildfire_64 qsd_port_active; + wildfire_64 qsd_fault_ena; + wildfire_64 qsd_cpu_int_ena; + wildfire_64 qsd_mem_config; + wildfire_64 qsd_err_sum; + wildfire_64 ce_sum[4]; + wildfire_64 dev_init[4]; + wildfire_64 it_int[4]; + wildfire_64 ip_int[4]; + wildfire_64 uce_sum[4]; + wildfire_64 se_sum__non_dev_int[4]; + wildfire_64 scratch[4]; + wildfire_64 qsd_timer; + wildfire_64 qsd_diag; +} wildfire_qsd; + +typedef struct { + wildfire_256 qsd_whami; + wildfire_256 __pad1; + wildfire_256 ce_sum; + wildfire_256 dev_init; + wildfire_256 it_int; + wildfire_256 ip_int; + wildfire_256 uce_sum; + wildfire_256 se_sum; +} wildfire_fast_qsd; + +typedef struct { + wildfire_2k qsa_qbb_id; + wildfire_2k __pad1; + wildfire_2k qsa_port_ena; + wildfire_2k qsa_scratch; + wildfire_2k qsa_config[5]; + wildfire_2k qsa_ref_int; + wildfire_2k qsa_qbb_pop[2]; + wildfire_2k qsa_dtag_fc; + wildfire_2k __pad2[3]; + wildfire_2k qsa_diag; + wildfire_2k qsa_diag_lock[4]; + wildfire_2k __pad3[11]; + wildfire_2k qsa_cpu_err_sum; + wildfire_2k qsa_misc_err_sum; + wildfire_2k qsa_tmo_err_sum; + wildfire_2k qsa_err_ena; + wildfire_2k qsa_tmo_config; + wildfire_2k qsa_ill_cmd_err_sum; + wildfire_2k __pad4[26]; + wildfire_2k qsa_busy_mask; + wildfire_2k qsa_arr_valid; + wildfire_2k __pad5[2]; + wildfire_2k qsa_port_map[4]; + wildfire_2k qsa_arr_addr[8]; + wildfire_2k qsa_arr_mask[8]; +} wildfire_qsa; + +typedef struct { + wildfire_64 ioa_config; + wildfire_64 iod_config; + wildfire_64 iop_switch_credits; + wildfire_64 __pad1; + wildfire_64 iop_hose_credits; + wildfire_64 __pad2[11]; + struct { + wildfire_64 __pad3; + wildfire_64 init; + } iop_hose[4]; + wildfire_64 ioa_hose_0_ctrl; + wildfire_64 iod_hose_0_ctrl; + wildfire_64 ioa_hose_1_ctrl; + wildfire_64 iod_hose_1_ctrl; + wildfire_64 ioa_hose_2_ctrl; + wildfire_64 iod_hose_2_ctrl; + wildfire_64 ioa_hose_3_ctrl; + wildfire_64 iod_hose_3_ctrl; + struct { + wildfire_64 target; + wildfire_64 __pad4; + } iop_dev_int[4]; + + wildfire_64 iop_err_int_target; + wildfire_64 __pad5[7]; + wildfire_64 iop_qbb_err_sum; + wildfire_64 __pad6; + wildfire_64 iop_qbb_se_sum; + wildfire_64 __pad7; + wildfire_64 ioa_err_sum; + wildfire_64 iod_err_sum; + wildfire_64 __pad8[4]; + wildfire_64 ioa_diag_force_err; + wildfire_64 iod_diag_force_err; + wildfire_64 __pad9[4]; + wildfire_64 iop_diag_send_err_int; + wildfire_64 __pad10[15]; + wildfire_64 ioa_scratch; + wildfire_64 iod_scratch; +} wildfire_iop; + +typedef struct { + wildfire_2k gpa_qbb_map[4]; + wildfire_2k gpa_mem_pop_map; + wildfire_2k gpa_scratch; + wildfire_2k gpa_diag; + wildfire_2k gpa_config_0; + wildfire_2k __pad1; + wildfire_2k gpa_init_id; + wildfire_2k gpa_config_2; + /* not complete */ +} wildfire_gp; + +typedef struct { + wildfire_64 pca_what_am_i; + wildfire_64 pca_err_sum; + wildfire_64 pca_diag_force_err; + wildfire_64 pca_diag_send_err_int; + wildfire_64 pca_hose_credits; + wildfire_64 pca_scratch; + wildfire_64 pca_micro_addr; + wildfire_64 pca_micro_data; + wildfire_64 pca_pend_int; + wildfire_64 pca_sent_int; + wildfire_64 __pad1; + wildfire_64 pca_stdio_edge_level; + wildfire_64 __pad2[52]; + struct { + wildfire_64 target; + wildfire_64 enable; + } pca_int[4]; + wildfire_64 __pad3[56]; + wildfire_64 pca_alt_sent_int[32]; +} wildfire_pca; + +typedef struct { + wildfire_64 ne_what_am_i; + /* not complete */ +} wildfire_ne; + +typedef struct { + wildfire_64 fe_what_am_i; + /* not complete */ +} wildfire_fe; + +typedef struct { + wildfire_64 pci_io_addr_ext; + wildfire_64 pci_ctrl; + wildfire_64 pci_err_sum; + wildfire_64 pci_err_addr; + wildfire_64 pci_stall_cnt; + wildfire_64 pci_iack_special; + wildfire_64 __pad1[2]; + wildfire_64 pci_pend_int; + wildfire_64 pci_sent_int; + wildfire_64 __pad2[54]; + struct { + wildfire_64 wbase; + wildfire_64 wmask; + wildfire_64 tbase; + } pci_window[4]; + wildfire_64 pci_flush_tlb; + wildfire_64 pci_perf_mon; +} wildfire_pci; + +#define WILDFIRE_ENTITY_SHIFT 18 + +#define WILDFIRE_GP_ENTITY (0x10UL << WILDFIRE_ENTITY_SHIFT) +#define WILDFIRE_IOP_ENTITY (0x08UL << WILDFIRE_ENTITY_SHIFT) +#define WILDFIRE_QSA_ENTITY (0x04UL << WILDFIRE_ENTITY_SHIFT) +#define WILDFIRE_QSD_ENTITY_SLOW (0x05UL << WILDFIRE_ENTITY_SHIFT) +#define WILDFIRE_QSD_ENTITY_FAST (0x01UL << WILDFIRE_ENTITY_SHIFT) + +#define WILDFIRE_PCA_ENTITY(pca) ((0xc|(pca))<>1)|((((h)&1)|2)<<16)|(((1UL<<13)-1)<<23))) + +#define WILDFIRE_IO_BIAS WILDFIRE_IO(0,0) +#define WILDFIRE_MEM_BIAS WILDFIRE_MEM(0,0) /* ??? */ + +/* The IO address space is larger than 0xffff */ +#define WILDFIRE_IO_SPACE (8UL*1024*1024) + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * Memory functions. all accesses are done through linear space. + */ + +__EXTERN_INLINE void __iomem *wildfire_ioportmap(unsigned long addr) +{ + return (void __iomem *)(addr + WILDFIRE_IO_BIAS); +} + +__EXTERN_INLINE void __iomem *wildfire_ioremap(unsigned long addr, + unsigned long size) +{ + return (void __iomem *)(addr + WILDFIRE_MEM_BIAS); +} + +__EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr) +{ + return addr >= WILDFIRE_BASE; +} + +__EXTERN_INLINE int wildfire_is_mmio(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long)xaddr; + return (addr & 0x100000000UL) == 0; +} + +#undef __IO_PREFIX +#define __IO_PREFIX wildfire +#define wildfire_trivial_rw_bw 1 +#define wildfire_trivial_rw_lq 1 +#define wildfire_trivial_io_bw 1 +#define wildfire_trivial_io_lq 1 +#define wildfire_trivial_iounmap 1 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_WILDFIRE__H__ */ diff --git a/arch/alpha/include/asm/cputime.h b/arch/alpha/include/asm/cputime.h new file mode 100644 index 00000000000..19577fd9323 --- /dev/null +++ b/arch/alpha/include/asm/cputime.h @@ -0,0 +1,6 @@ +#ifndef __ALPHA_CPUTIME_H +#define __ALPHA_CPUTIME_H + +#include + +#endif /* __ALPHA_CPUTIME_H */ diff --git a/arch/alpha/include/asm/current.h b/arch/alpha/include/asm/current.h new file mode 100644 index 00000000000..094d285a1b3 --- /dev/null +++ b/arch/alpha/include/asm/current.h @@ -0,0 +1,9 @@ +#ifndef _ALPHA_CURRENT_H +#define _ALPHA_CURRENT_H + +#include + +#define get_current() (current_thread_info()->task) +#define current get_current() + +#endif /* _ALPHA_CURRENT_H */ diff --git a/arch/alpha/include/asm/delay.h b/arch/alpha/include/asm/delay.h new file mode 100644 index 00000000000..2aa3f410f7e --- /dev/null +++ b/arch/alpha/include/asm/delay.h @@ -0,0 +1,10 @@ +#ifndef __ALPHA_DELAY_H +#define __ALPHA_DELAY_H + +extern void __delay(int loops); +extern void udelay(unsigned long usecs); + +extern void ndelay(unsigned long nsecs); +#define ndelay ndelay + +#endif /* defined(__ALPHA_DELAY_H) */ diff --git a/arch/alpha/include/asm/device.h b/arch/alpha/include/asm/device.h new file mode 100644 index 00000000000..d8f9872b0e2 --- /dev/null +++ b/arch/alpha/include/asm/device.h @@ -0,0 +1,7 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#include + diff --git a/arch/alpha/include/asm/div64.h b/arch/alpha/include/asm/div64.h new file mode 100644 index 00000000000..6cd978cefb2 --- /dev/null +++ b/arch/alpha/include/asm/div64.h @@ -0,0 +1 @@ +#include diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h new file mode 100644 index 00000000000..a5801ae02e4 --- /dev/null +++ b/arch/alpha/include/asm/dma-mapping.h @@ -0,0 +1,69 @@ +#ifndef _ALPHA_DMA_MAPPING_H +#define _ALPHA_DMA_MAPPING_H + + +#ifdef CONFIG_PCI + +#include + +#define dma_map_single(dev, va, size, dir) \ + pci_map_single(alpha_gendev_to_pci(dev), va, size, dir) +#define dma_unmap_single(dev, addr, size, dir) \ + pci_unmap_single(alpha_gendev_to_pci(dev), addr, size, dir) +#define dma_alloc_coherent(dev, size, addr, gfp) \ + __pci_alloc_consistent(alpha_gendev_to_pci(dev), size, addr, gfp) +#define dma_free_coherent(dev, size, va, addr) \ + pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr) +#define dma_map_page(dev, page, off, size, dir) \ + pci_map_page(alpha_gendev_to_pci(dev), page, off, size, dir) +#define dma_unmap_page(dev, addr, size, dir) \ + pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir) +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(alpha_gendev_to_pci(dev), sg, nents, dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir) +#define dma_supported(dev, mask) \ + pci_dma_supported(alpha_gendev_to_pci(dev), mask) +#define dma_mapping_error(dev, addr) \ + pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr) + +#else /* no PCI - no IOMMU. */ + +struct scatterlist; +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); + +#define dma_free_coherent(dev, size, va, addr) \ + free_pages((unsigned long)va, get_order(size)) +#define dma_supported(dev, mask) (mask < 0x00ffffffUL ? 0 : 1) +#define dma_map_single(dev, va, size, dir) virt_to_phys(va) +#define dma_map_page(dev, page, off, size, dir) (page_to_pa(page) + off) + +#define dma_unmap_single(dev, addr, size, dir) ((void)0) +#define dma_unmap_page(dev, addr, size, dir) ((void)0) +#define dma_unmap_sg(dev, sg, nents, dir) ((void)0) + +#define dma_mapping_error(dev, addr) (0) + +#endif /* !CONFIG_PCI */ + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +#define dma_is_consistent(d, h) (1) + +int dma_set_mask(struct device *dev, u64 mask); + +#define dma_sync_single_for_cpu(dev, addr, size, dir) ((void)0) +#define dma_sync_single_for_device(dev, addr, size, dir) ((void)0) +#define dma_sync_single_range(dev, addr, off, size, dir) ((void)0) +#define dma_sync_sg_for_cpu(dev, sg, nents, dir) ((void)0) +#define dma_sync_sg_for_device(dev, sg, nents, dir) ((void)0) +#define dma_cache_sync(dev, va, size, dir) ((void)0) +#define dma_sync_single_range_for_cpu(dev, addr, offset, size, dir) ((void)0) +#define dma_sync_single_range_for_device(dev, addr, offset, size, dir) ((void)0) + +#define dma_get_cache_alignment() L1_CACHE_BYTES + +#endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/arch/alpha/include/asm/dma.h b/arch/alpha/include/asm/dma.h new file mode 100644 index 00000000000..87cfdbdf08f --- /dev/null +++ b/arch/alpha/include/asm/dma.h @@ -0,0 +1,376 @@ +/* + * include/asm-alpha/dma.h + * + * This is essentially the same as the i386 DMA stuff, as the AlphaPCs + * use ISA-compatible dma. The only extension is support for high-page + * registers that allow to set the top 8 bits of a 32-bit DMA address. + * This register should be written last when setting up a DMA address + * as this will also enable DMA across 64 KB boundaries. + */ + +/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + */ + +#ifndef _ASM_DMA_H +#define _ASM_DMA_H + +#include +#include + +#define dma_outb outb +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#define MAX_DMA_CHANNELS 8 + +/* + ISA DMA limitations on Alpha platforms, + + These may be due to SIO (PCI<->ISA bridge) chipset limitation, or + just a wiring limit. +*/ + +/* The maximum address for ISA DMA transfer on Alpha XL, due to an + hardware SIO limitation, is 64MB. +*/ +#define ALPHA_XL_MAX_ISA_DMA_ADDRESS 0x04000000UL + +/* The maximum address for ISA DMA transfer on RUFFIAN, + due to an hardware SIO limitation, is 16MB. +*/ +#define ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS 0x01000000UL + +/* The maximum address for ISA DMA transfer on SABLE, and some ALCORs, + due to an hardware SIO chip limitation, is 2GB. +*/ +#define ALPHA_SABLE_MAX_ISA_DMA_ADDRESS 0x80000000UL +#define ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS 0x80000000UL + +/* + Maximum address for all the others is the complete 32-bit bus + address space. +*/ +#define ALPHA_MAX_ISA_DMA_ADDRESS 0x100000000UL + +#ifdef CONFIG_ALPHA_GENERIC +# define MAX_ISA_DMA_ADDRESS (alpha_mv.max_isa_dma_address) +#else +# if defined(CONFIG_ALPHA_XL) +# define MAX_ISA_DMA_ADDRESS ALPHA_XL_MAX_ISA_DMA_ADDRESS +# elif defined(CONFIG_ALPHA_RUFFIAN) +# define MAX_ISA_DMA_ADDRESS ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS +# elif defined(CONFIG_ALPHA_SABLE) +# define MAX_ISA_DMA_ADDRESS ALPHA_SABLE_MAX_ISA_DMA_ADDRESS +# elif defined(CONFIG_ALPHA_ALCOR) +# define MAX_ISA_DMA_ADDRESS ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS +# else +# define MAX_ISA_DMA_ADDRESS ALPHA_MAX_ISA_DMA_ADDRESS +# endif +#endif + +/* If we have the iommu, we don't have any address limitations on DMA. + Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone + like i386. */ +#define MAX_DMA_ADDRESS (alpha_mv.mv_pci_tbi ? \ + ~0UL : IDENT_ADDR + 0x01000000) + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ +#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ +#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +#define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0) +#define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1) +#define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2) +#define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3) +#define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4) +#define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5) +#define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6) +#define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7) + +#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + +extern spinlock_t dma_spin_lock; + +static __inline__ unsigned long claim_dma_lock(void) +{ + unsigned long flags; + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static __inline__ void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static __inline__ void enable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static __inline__ void disable_dma(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while interrupts are disabled! --- + */ +static __inline__ void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr<=3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static __inline__ void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr<=3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr&3), DMA2_MODE_REG); +} + +/* set extended mode for a specific DMA channel */ +static __inline__ void set_dma_ext_mode(unsigned int dmanr, char ext_mode) +{ + if (dmanr<=3) + dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG); + else + dma_outb(ext_mode | (dmanr&3), DMA2_EXT_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register. + */ +static __inline__ void set_dma_page(unsigned int dmanr, unsigned int pagenr) +{ + switch(dmanr) { + case 0: + dma_outb(pagenr, DMA_PAGE_0); + dma_outb((pagenr >> 8), DMA_HIPAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_PAGE_1); + dma_outb((pagenr >> 8), DMA_HIPAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_PAGE_2); + dma_outb((pagenr >> 8), DMA_HIPAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_PAGE_3); + dma_outb((pagenr >> 8), DMA_HIPAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_PAGE_5); + dma_outb((pagenr >> 8), DMA_HIPAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_PAGE_6); + dma_outb((pagenr >> 8), DMA_HIPAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_PAGE_7); + dma_outb((pagenr >> 8), DMA_HIPAGE_7); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + if (dmanr <= 3) { + dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); + } else { + dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); + } + set_dma_page(dmanr, a>>16); /* set hipage last to enable 32-bit mode */ +} + + +/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); + } else { + dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static __inline__ int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE + : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr<=3)? count : (count<<1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ +#define KERNEL_HAVE_CHECK_DMA +extern int check_dma(unsigned int dmanr); + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + + +#endif /* _ASM_DMA_H */ diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h new file mode 100644 index 00000000000..fc1002ea1e0 --- /dev/null +++ b/arch/alpha/include/asm/elf.h @@ -0,0 +1,165 @@ +#ifndef __ASM_ALPHA_ELF_H +#define __ASM_ALPHA_ELF_H + +#include + +/* Special values for the st_other field in the symbol table. */ + +#define STO_ALPHA_NOPV 0x80 +#define STO_ALPHA_STD_GPLOAD 0x88 + +/* + * Alpha ELF relocation types + */ +#define R_ALPHA_NONE 0 /* No reloc */ +#define R_ALPHA_REFLONG 1 /* Direct 32 bit */ +#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */ +#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */ +#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_ALPHA_GPDISP 6 /* Add displacement to GP */ +#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_ALPHA_SREL16 9 /* PC relative 16 bit */ +#define R_ALPHA_SREL32 10 /* PC relative 32 bit */ +#define R_ALPHA_SREL64 11 /* PC relative 64 bit */ +#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */ +#define R_ALPHA_COPY 24 /* Copy symbol at runtime */ +#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */ +#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ +#define R_ALPHA_RELATIVE 27 /* Adjust by program base */ +#define R_ALPHA_BRSGP 28 +#define R_ALPHA_TLSGD 29 +#define R_ALPHA_TLS_LDM 30 +#define R_ALPHA_DTPMOD64 31 +#define R_ALPHA_GOTDTPREL 32 +#define R_ALPHA_DTPREL64 33 +#define R_ALPHA_DTPRELHI 34 +#define R_ALPHA_DTPRELLO 35 +#define R_ALPHA_DTPREL16 36 +#define R_ALPHA_GOTTPREL 37 +#define R_ALPHA_TPREL64 38 +#define R_ALPHA_TPRELHI 39 +#define R_ALPHA_TPRELLO 40 +#define R_ALPHA_TPREL16 41 + +#define SHF_ALPHA_GPREL 0x10000000 + +/* Legal values for e_flags field of Elf64_Ehdr. */ + +#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */ + +/* + * ELF register definitions.. + */ + +/* + * The OSF/1 version of makes gregset_t 46 entries long. + * I have no idea why that is so. For now, we just leave it at 33 + * (32 general regs + processor status word). + */ +#define ELF_NGREG 33 +#define ELF_NFPREG 32 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef double elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_ALPHA + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 8192 + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + +/* $0 is set by ld.so to a pointer to a function which might be + registered using atexit. This provides a mean for the dynamic + linker to call DT_FINI functions for shared libraries that have + been loaded before the code runs. + + So that we can use the same startup file with static executables, + we start programs with a value of 0 to indicate that there is no + such function. */ + +#define ELF_PLAT_INIT(_r, load_addr) _r->r0 = 0 + +/* The registers are layed out in pt_regs for PAL and syscall + convenience. Re-order them for the linear elf_gregset_t. */ + +struct pt_regs; +struct thread_info; +struct task_struct; +extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, + struct thread_info *ti); +#define ELF_CORE_COPY_REGS(DEST, REGS) \ + dump_elf_thread(DEST, REGS, current_thread_info()); + +/* Similar, but for a thread other than current. */ + +extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task); +#define ELF_CORE_COPY_TASK_REGS(TASK, DEST) \ + dump_elf_task(*(DEST), TASK) + +/* Similar, but for the FP registers. */ + +extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task); +#define ELF_CORE_COPY_FPREGS(TASK, DEST) \ + dump_elf_task_fp(*(DEST), TASK) + +/* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This is trivial on Alpha, + but not so on other machines. */ + +#define ELF_HWCAP (~amask(-1)) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. */ + +#define ELF_PLATFORM \ +({ \ + enum implver_enum i_ = implver(); \ + ( i_ == IMPLVER_EV4 ? "ev4" \ + : i_ == IMPLVER_EV5 \ + ? (amask(AMASK_BWX) ? "ev5" : "ev56") \ + : amask (AMASK_CIX) ? "ev6" : "ev67"); \ +}) + +#define SET_PERSONALITY(EX, IBCS2) \ + set_personality(((EX).e_flags & EF_ALPHA_32BIT) \ + ? PER_LINUX_32BIT : (IBCS2) ? PER_SVR4 : PER_LINUX) + +extern int alpha_l1i_cacheshape; +extern int alpha_l1d_cacheshape; +extern int alpha_l2_cacheshape; +extern int alpha_l3_cacheshape; + +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ +#define ARCH_DLINFO \ + do { \ + NEW_AUX_ENT(AT_L1I_CACHESHAPE, alpha_l1i_cacheshape); \ + NEW_AUX_ENT(AT_L1D_CACHESHAPE, alpha_l1d_cacheshape); \ + NEW_AUX_ENT(AT_L2_CACHESHAPE, alpha_l2_cacheshape); \ + NEW_AUX_ENT(AT_L3_CACHESHAPE, alpha_l3_cacheshape); \ + } while (0) + +#endif /* __ASM_ALPHA_ELF_H */ diff --git a/arch/alpha/include/asm/emergency-restart.h b/arch/alpha/include/asm/emergency-restart.h new file mode 100644 index 00000000000..108d8c48e42 --- /dev/null +++ b/arch/alpha/include/asm/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +#include + +#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/alpha/include/asm/err_common.h b/arch/alpha/include/asm/err_common.h new file mode 100644 index 00000000000..c2509594210 --- /dev/null +++ b/arch/alpha/include/asm/err_common.h @@ -0,0 +1,118 @@ +/* + * linux/include/asm-alpha/err_common.h + * + * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) + * + * Contains declarations and macros to support Alpha error handling + * implementations. + */ + +#ifndef __ALPHA_ERR_COMMON_H +#define __ALPHA_ERR_COMMON_H 1 + +/* + * SCB Vector definitions + */ +#define SCB_Q_SYSERR 0x620 +#define SCB_Q_PROCERR 0x630 +#define SCB_Q_SYSMCHK 0x660 +#define SCB_Q_PROCMCHK 0x670 +#define SCB_Q_SYSEVENT 0x680 + +/* + * Disposition definitions for logout frame parser + */ +#define MCHK_DISPOSITION_UNKNOWN_ERROR 0x00 +#define MCHK_DISPOSITION_REPORT 0x01 +#define MCHK_DISPOSITION_DISMISS 0x02 + +/* + * Error Log definitions + */ +/* + * Types + */ + +#define EL_CLASS__TERMINATION (0) +# define EL_TYPE__TERMINATION__TERMINATION (0) +#define EL_CLASS__HEADER (5) +# define EL_TYPE__HEADER__SYSTEM_ERROR_FRAME (1) +# define EL_TYPE__HEADER__SYSTEM_EVENT_FRAME (2) +# define EL_TYPE__HEADER__HALT_FRAME (3) +# define EL_TYPE__HEADER__LOGOUT_FRAME (19) +#define EL_CLASS__GENERAL_NOTIFICATION (9) +#define EL_CLASS__PCI_ERROR_FRAME (11) +#define EL_CLASS__REGATTA_FAMILY (12) +# define EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME (1) +# define EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME (2) +# define EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME (3) +# define EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED (8) +# define EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED (9) +# define EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED (10) +# define EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT (11) +# define EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT (12) +#define EL_CLASS__PAL (14) +# define EL_TYPE__PAL__LOGOUT_FRAME (1) +# define EL_TYPE__PAL__EV7_PROCESSOR (4) +# define EL_TYPE__PAL__EV7_ZBOX (5) +# define EL_TYPE__PAL__EV7_RBOX (6) +# define EL_TYPE__PAL__EV7_IO (7) +# define EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE (10) +# define EL_TYPE__PAL__ENV__AIRMOVER_FAN (11) +# define EL_TYPE__PAL__ENV__VOLTAGE (12) +# define EL_TYPE__PAL__ENV__INTRUSION (13) +# define EL_TYPE__PAL__ENV__POWER_SUPPLY (14) +# define EL_TYPE__PAL__ENV__LAN (15) +# define EL_TYPE__PAL__ENV__HOT_PLUG (16) + +union el_timestamp { + struct { + u8 second; + u8 minute; + u8 hour; + u8 day; + u8 month; + u8 year; + } b; + u64 as_int; +}; + +struct el_subpacket { + u16 length; /* length of header (in bytes) */ + u16 class; /* header class and type... */ + u16 type; /* ...determine content */ + u16 revision; /* header revision */ + union { + struct { /* Class 5, Type 1 - System Error */ + u32 frame_length; + u32 frame_packet_count; + } sys_err; + struct { /* Class 5, Type 2 - System Event */ + union el_timestamp timestamp; + u32 frame_length; + u32 frame_packet_count; + } sys_event; + struct { /* Class 5, Type 3 - Double Error Halt */ + u16 halt_code; + u16 reserved; + union el_timestamp timestamp; + u32 frame_length; + u32 frame_packet_count; + } err_halt; + struct { /* Clasee 5, Type 19 - Logout Frame Header */ + u32 frame_length; + u32 frame_flags; + u32 cpu_offset; + u32 system_offset; + } logout_header; + struct { /* Class 12 - Regatta */ + u64 cpuid; + u64 data_start[1]; + } regatta_frame; + struct { /* Raw */ + u64 data_start[1]; + } raw; + } by_type; +}; + +#endif /* __ALPHA_ERR_COMMON_H */ diff --git a/arch/alpha/include/asm/err_ev6.h b/arch/alpha/include/asm/err_ev6.h new file mode 100644 index 00000000000..ea637791e4a --- /dev/null +++ b/arch/alpha/include/asm/err_ev6.h @@ -0,0 +1,6 @@ +#ifndef __ALPHA_ERR_EV6_H +#define __ALPHA_ERR_EV6_H 1 + +/* Dummy include for now. */ + +#endif /* __ALPHA_ERR_EV6_H */ diff --git a/arch/alpha/include/asm/err_ev7.h b/arch/alpha/include/asm/err_ev7.h new file mode 100644 index 00000000000..87f99777c2e --- /dev/null +++ b/arch/alpha/include/asm/err_ev7.h @@ -0,0 +1,202 @@ +#ifndef __ALPHA_ERR_EV7_H +#define __ALPHA_ERR_EV7_H 1 + +/* + * Data for el packet class PAL (14), type LOGOUT_FRAME (1) + */ +struct ev7_pal_logout_subpacket { + u32 mchk_code; + u32 subpacket_count; + u64 whami; + u64 rbox_whami; + u64 rbox_int; + u64 exc_addr; + union el_timestamp timestamp; + u64 halt_code; + u64 reserved; +}; + +/* + * Data for el packet class PAL (14), type EV7_PROCESSOR (4) + */ +struct ev7_pal_processor_subpacket { + u64 i_stat; + u64 dc_stat; + u64 c_addr; + u64 c_syndrome_1; + u64 c_syndrome_0; + u64 c_stat; + u64 c_sts; + u64 mm_stat; + u64 exc_addr; + u64 ier_cm; + u64 isum; + u64 pal_base; + u64 i_ctl; + u64 process_context; + u64 cbox_ctl; + u64 cbox_stp_ctl; + u64 cbox_acc_ctl; + u64 cbox_lcl_set; + u64 cbox_gbl_set; + u64 bbox_ctl; + u64 bbox_err_sts; + u64 bbox_err_idx; + u64 cbox_ddp_err_sts; + u64 bbox_dat_rmp; + u64 reserved[2]; +}; + +/* + * Data for el packet class PAL (14), type EV7_ZBOX (5) + */ +struct ev7_pal_zbox_subpacket { + u32 zbox0_dram_err_status_1; + u32 zbox0_dram_err_status_2; + u32 zbox0_dram_err_status_3; + u32 zbox0_dram_err_ctl; + u32 zbox0_dram_err_adr; + u32 zbox0_dift_timeout; + u32 zbox0_dram_mapper_ctl; + u32 zbox0_frc_err_adr; + u32 zbox0_dift_err_status; + u32 reserved1; + u32 zbox1_dram_err_status_1; + u32 zbox1_dram_err_status_2; + u32 zbox1_dram_err_status_3; + u32 zbox1_dram_err_ctl; + u32 zbox1_dram_err_adr; + u32 zbox1_dift_timeout; + u32 zbox1_dram_mapper_ctl; + u32 zbox1_frc_err_adr; + u32 zbox1_dift_err_status; + u32 reserved2; + u64 cbox_ctl; + u64 cbox_stp_ctl; + u64 zbox0_error_pa; + u64 zbox1_error_pa; + u64 zbox0_ored_syndrome; + u64 zbox1_ored_syndrome; + u64 reserved3[2]; +}; + +/* + * Data for el packet class PAL (14), type EV7_RBOX (6) + */ +struct ev7_pal_rbox_subpacket { + u64 rbox_cfg; + u64 rbox_n_cfg; + u64 rbox_s_cfg; + u64 rbox_e_cfg; + u64 rbox_w_cfg; + u64 rbox_n_err; + u64 rbox_s_err; + u64 rbox_e_err; + u64 rbox_w_err; + u64 rbox_io_cfg; + u64 rbox_io_err; + u64 rbox_l_err; + u64 rbox_whoami; + u64 rbox_imask; + u64 rbox_intq; + u64 rbox_int; + u64 reserved[2]; +}; + +/* + * Data for el packet class PAL (14), type EV7_IO (7) + */ +struct ev7_pal_io_one_port { + u64 pox_err_sum; + u64 pox_tlb_err; + u64 pox_spl_cmplt; + u64 pox_trans_sum; + u64 pox_first_err; + u64 pox_mult_err; + u64 pox_dm_source; + u64 pox_dm_dest; + u64 pox_dm_size; + u64 pox_dm_ctrl; + u64 reserved; +}; + +struct ev7_pal_io_subpacket { + u64 io_asic_rev; + u64 io_sys_rev; + u64 io7_uph; + u64 hpi_ctl; + u64 crd_ctl; + u64 hei_ctl; + u64 po7_error_sum; + u64 po7_uncrr_sym; + u64 po7_crrct_sym; + u64 po7_ugbge_sym; + u64 po7_err_pkt0; + u64 po7_err_pkt1; + u64 reserved[2]; + struct ev7_pal_io_one_port ports[4]; +}; + +/* + * Environmental subpacket. Data used for el packets: + * class PAL (14), type AMBIENT_TEMPERATURE (10) + * class PAL (14), type AIRMOVER_FAN (11) + * class PAL (14), type VOLTAGE (12) + * class PAL (14), type INTRUSION (13) + * class PAL (14), type POWER_SUPPLY (14) + * class PAL (14), type LAN (15) + * class PAL (14), type HOT_PLUG (16) + */ +struct ev7_pal_environmental_subpacket { + u16 cabinet; + u16 drawer; + u16 reserved1[2]; + u8 module_type; + u8 unit_id; /* unit reporting condition */ + u8 reserved2; + u8 condition; /* condition reported */ +}; + +/* + * Convert environmental type to index + */ +static inline int ev7_lf_env_index(int type) +{ + BUG_ON((type < EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE) + || (type > EL_TYPE__PAL__ENV__HOT_PLUG)); + + return type - EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE; +} + +/* + * Data for generic el packet class PAL. + */ +struct ev7_pal_subpacket { + union { + struct ev7_pal_logout_subpacket logout; /* Type 1 */ + struct ev7_pal_processor_subpacket ev7; /* Type 4 */ + struct ev7_pal_zbox_subpacket zbox; /* Type 5 */ + struct ev7_pal_rbox_subpacket rbox; /* Type 6 */ + struct ev7_pal_io_subpacket io; /* Type 7 */ + struct ev7_pal_environmental_subpacket env; /* Type 10-16 */ + u64 as_quad[1]; /* Raw u64 */ + } by_type; +}; + +/* + * Struct to contain collected logout from subpackets. + */ +struct ev7_lf_subpackets { + struct ev7_pal_logout_subpacket *logout; /* Type 1 */ + struct ev7_pal_processor_subpacket *ev7; /* Type 4 */ + struct ev7_pal_zbox_subpacket *zbox; /* Type 5 */ + struct ev7_pal_rbox_subpacket *rbox; /* Type 6 */ + struct ev7_pal_io_subpacket *io; /* Type 7 */ + struct ev7_pal_environmental_subpacket *env[7]; /* Type 10-16 */ + + unsigned int io_pid; +}; + +#endif /* __ALPHA_ERR_EV7_H */ + + diff --git a/arch/alpha/include/asm/errno.h b/arch/alpha/include/asm/errno.h new file mode 100644 index 00000000000..69e2655249d --- /dev/null +++ b/arch/alpha/include/asm/errno.h @@ -0,0 +1,123 @@ +#ifndef _ALPHA_ERRNO_H +#define _ALPHA_ERRNO_H + +#include + +#undef EAGAIN /* 11 in errno-base.h */ + +#define EDEADLK 11 /* Resource deadlock would occur */ + +#define EAGAIN 35 /* Try again */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection because of reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Transport endpoint is already connected */ +#define ENOTCONN 57 /* Transport endpoint is not connected */ +#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 59 /* Too many references: cannot splice */ +#define ETIMEDOUT 60 /* Connection timed out */ +#define ECONNREFUSED 61 /* Connection refused */ +#define ELOOP 62 /* Too many symbolic links encountered */ +#define ENAMETOOLONG 63 /* File name too long */ +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ + +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Quota exceeded */ +#define ESTALE 70 /* Stale NFS file handle */ +#define EREMOTE 71 /* Object is remote */ + +#define ENOLCK 77 /* No record locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#define ENOMSG 80 /* No message of desired type */ +#define EIDRM 81 /* Identifier removed */ +#define ENOSR 82 /* Out of streams resources */ +#define ETIME 83 /* Timer expired */ +#define EBADMSG 84 /* Not a data message */ +#define EPROTO 85 /* Protocol error */ +#define ENODATA 86 /* No data available */ +#define ENOSTR 87 /* Device not a stream */ + +#define ENOPKG 92 /* Package not installed */ + +#define EILSEQ 116 /* Illegal byte sequence */ + +/* The following are just random noise.. */ +#define ECHRNG 88 /* Channel number out of range */ +#define EL2NSYNC 89 /* Level 2 not synchronized */ +#define EL3HLT 90 /* Level 3 halted */ +#define EL3RST 91 /* Level 3 reset */ + +#define ELNRNG 93 /* Link number out of range */ +#define EUNATCH 94 /* Protocol driver not attached */ +#define ENOCSI 95 /* No CSI structure available */ +#define EL2HLT 96 /* Level 2 halted */ +#define EBADE 97 /* Invalid exchange */ +#define EBADR 98 /* Invalid request descriptor */ +#define EXFULL 99 /* Exchange full */ +#define ENOANO 100 /* No anode */ +#define EBADRQC 101 /* Invalid request code */ +#define EBADSLT 102 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 104 /* Bad font file format */ +#define ENONET 105 /* Machine is not on the network */ +#define ENOLINK 106 /* Link has been severed */ +#define EADV 107 /* Advertise error */ +#define ESRMNT 108 /* Srmount error */ +#define ECOMM 109 /* Communication error on send */ +#define EMULTIHOP 110 /* Multihop attempted */ +#define EDOTDOT 111 /* RFS specific error */ +#define EOVERFLOW 112 /* Value too large for defined data type */ +#define ENOTUNIQ 113 /* Name not unique on network */ +#define EBADFD 114 /* File descriptor in bad state */ +#define EREMCHG 115 /* Remote address changed */ + +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ + +#define ELIBACC 122 /* Can not access a needed shared library */ +#define ELIBBAD 123 /* Accessing a corrupted shared library */ +#define ELIBSCN 124 /* .lib section in a.out corrupted */ +#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 126 /* Cannot exec a shared library directly */ +#define ERESTART 127 /* Interrupted system call should be restarted */ +#define ESTRPIPE 128 /* Streams pipe error */ + +#define ENOMEDIUM 129 /* No medium found */ +#define EMEDIUMTYPE 130 /* Wrong medium type */ +#define ECANCELED 131 /* Operation Cancelled */ +#define ENOKEY 132 /* Required key not available */ +#define EKEYEXPIRED 133 /* Key has expired */ +#define EKEYREVOKED 134 /* Key has been revoked */ +#define EKEYREJECTED 135 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + +#endif diff --git a/arch/alpha/include/asm/fb.h b/arch/alpha/include/asm/fb.h new file mode 100644 index 00000000000..fa9bbb96b2b --- /dev/null +++ b/arch/alpha/include/asm/fb.h @@ -0,0 +1,13 @@ +#ifndef _ASM_FB_H_ +#define _ASM_FB_H_ +#include + +/* Caching is off in the I/O space quadrant by design. */ +#define fb_pgprotect(...) do {} while (0) + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* _ASM_FB_H_ */ diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h new file mode 100644 index 00000000000..25da0017ec8 --- /dev/null +++ b/arch/alpha/include/asm/fcntl.h @@ -0,0 +1,43 @@ +#ifndef _ALPHA_FCNTL_H +#define _ALPHA_FCNTL_H + +/* open/fcntl - O_SYNC is only implemented on blocks devices and on files + located on an ext2 file system */ +#define O_CREAT 01000 /* not fcntl */ +#define O_TRUNC 02000 /* not fcntl */ +#define O_EXCL 04000 /* not fcntl */ +#define O_NOCTTY 010000 /* not fcntl */ + +#define O_NONBLOCK 00004 +#define O_APPEND 00010 +#define O_SYNC 040000 +#define O_DIRECTORY 0100000 /* must be a directory */ +#define O_NOFOLLOW 0200000 /* don't follow links */ +#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */ +#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */ +#define O_NOATIME 04000000 +#define O_CLOEXEC 010000000 /* set close_on_exec */ + +#define F_GETLK 7 +#define F_SETLK 8 +#define F_SETLKW 9 + +#define F_SETOWN 5 /* for sockets. */ +#define F_GETOWN 6 /* for sockets. */ +#define F_SETSIG 10 /* for sockets. */ +#define F_GETSIG 11 /* for sockets. */ + +/* for posix fcntl() and lockf() */ +#define F_RDLCK 1 +#define F_WRLCK 2 +#define F_UNLCK 8 + +/* for old implementation of bsd flock () */ +#define F_EXLCK 16 /* or 3 */ +#define F_SHLCK 32 /* or 4 */ + +#define F_INPROGRESS 64 + +#include + +#endif diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h new file mode 100644 index 00000000000..0be50413b2b --- /dev/null +++ b/arch/alpha/include/asm/floppy.h @@ -0,0 +1,115 @@ +/* + * Architecture specific parts of the Floppy driver + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1995 + */ +#ifndef __ASM_ALPHA_FLOPPY_H +#define __ASM_ALPHA_FLOPPY_H + + +#define fd_inb(port) inb_p(port) +#define fd_outb(value,port) outb_p(value,port) + +#define fd_enable_dma() enable_dma(FLOPPY_DMA) +#define fd_disable_dma() disable_dma(FLOPPY_DMA) +#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy") +#define fd_free_dma() free_dma(FLOPPY_DMA) +#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA) +#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode) +#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,virt_to_bus(addr)) +#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count) +#define fd_enable_irq() enable_irq(FLOPPY_IRQ) +#define fd_disable_irq() disable_irq(FLOPPY_IRQ) +#define fd_cacheflush(addr,size) /* nothing */ +#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ + IRQF_DISABLED, "floppy", NULL) +#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); + +#ifdef CONFIG_PCI + +#include + +#define fd_dma_setup(addr,size,mode,io) alpha_fd_dma_setup(addr,size,mode,io) + +static __inline__ int +alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io) +{ + static unsigned long prev_size; + static dma_addr_t bus_addr = 0; + static char *prev_addr; + static int prev_dir; + int dir; + + dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE; + + if (bus_addr + && (addr != prev_addr || size != prev_size || dir != prev_dir)) { + /* different from last time -- unmap prev */ + pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir); + bus_addr = 0; + } + + if (!bus_addr) /* need to map it */ + bus_addr = pci_map_single(isa_bridge, addr, size, dir); + + /* remember this one as prev */ + prev_addr = addr; + prev_size = size; + prev_dir = dir; + + fd_clear_dma_ff(); + fd_cacheflush(addr, size); + fd_set_dma_mode(mode); + set_dma_addr(FLOPPY_DMA, bus_addr); + fd_set_dma_count(size); + virtual_dma_port = io; + fd_enable_dma(); + + return 0; +} + +#endif /* CONFIG_PCI */ + +__inline__ void virtual_dma_init(void) +{ + /* Nothing to do on an Alpha */ +} + +static int FDC1 = 0x3f0; +static int FDC2 = -1; + +/* + * Again, the CMOS information doesn't work on the alpha.. + */ +#define FLOPPY0_TYPE 6 +#define FLOPPY1_TYPE 0 + +#define N_FDC 2 +#define N_DRIVE 8 + +/* + * Most Alphas have no problems with floppy DMA crossing 64k borders, + * except for certain ones, like XL and RUFFIAN. + * + * However, the test is simple and fast, and this *is* floppy, after all, + * so we do it for all platforms, just to make sure. + * + * This is advantageous in other circumstances as well, as in moving + * about the PCI DMA windows and forcing the floppy to start doing + * scatter-gather when it never had before, and there *is* a problem + * on that platform... ;-} + */ + +static inline unsigned long CROSS_64KB(void *a, unsigned long s) +{ + unsigned long p = (unsigned long)a; + return ((p + s - 1) ^ p) & ~0xffffUL; +} + +#define EXTRA_FLOPPY_PARAMS + +#endif /* __ASM_ALPHA_FLOPPY_H */ diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h new file mode 100644 index 00000000000..ecb17a72acc --- /dev/null +++ b/arch/alpha/include/asm/fpu.h @@ -0,0 +1,193 @@ +#ifndef __ASM_ALPHA_FPU_H +#define __ASM_ALPHA_FPU_H + +/* + * Alpha floating-point control register defines: + */ +#define FPCR_DNOD (1UL<<47) /* denorm INV trap disable */ +#define FPCR_DNZ (1UL<<48) /* denorms to zero */ +#define FPCR_INVD (1UL<<49) /* invalid op disable (opt.) */ +#define FPCR_DZED (1UL<<50) /* division by zero disable (opt.) */ +#define FPCR_OVFD (1UL<<51) /* overflow disable (optional) */ +#define FPCR_INV (1UL<<52) /* invalid operation */ +#define FPCR_DZE (1UL<<53) /* division by zero */ +#define FPCR_OVF (1UL<<54) /* overflow */ +#define FPCR_UNF (1UL<<55) /* underflow */ +#define FPCR_INE (1UL<<56) /* inexact */ +#define FPCR_IOV (1UL<<57) /* integer overflow */ +#define FPCR_UNDZ (1UL<<60) /* underflow to zero (opt.) */ +#define FPCR_UNFD (1UL<<61) /* underflow disable (opt.) */ +#define FPCR_INED (1UL<<62) /* inexact disable (opt.) */ +#define FPCR_SUM (1UL<<63) /* summary bit */ + +#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */ +#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */ +#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */ +#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */ +#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */ +#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT) + +#define FPCR_MASK 0xffff800000000000L + +/* + * IEEE trap enables are implemented in software. These per-thread + * bits are stored in the "ieee_state" field of "struct thread_info". + * Thus, the bits are defined so as not to conflict with the + * floating-point enable bit (which is architected). On top of that, + * we want to make these bits compatible with OSF/1 so + * ieee_set_fp_control() etc. can be implemented easily and + * compatibly. The corresponding definitions are in + * /usr/include/machine/fpu.h under OSF/1. + */ +#define IEEE_TRAP_ENABLE_INV (1UL<<1) /* invalid op */ +#define IEEE_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */ +#define IEEE_TRAP_ENABLE_OVF (1UL<<3) /* overflow */ +#define IEEE_TRAP_ENABLE_UNF (1UL<<4) /* underflow */ +#define IEEE_TRAP_ENABLE_INE (1UL<<5) /* inexact */ +#define IEEE_TRAP_ENABLE_DNO (1UL<<6) /* denorm */ +#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\ + IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\ + IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO) + +/* Denorm and Underflow flushing */ +#define IEEE_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */ +#define IEEE_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */ + +#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ) + +/* status bits coming from fpcr: */ +#define IEEE_STATUS_INV (1UL<<17) +#define IEEE_STATUS_DZE (1UL<<18) +#define IEEE_STATUS_OVF (1UL<<19) +#define IEEE_STATUS_UNF (1UL<<20) +#define IEEE_STATUS_INE (1UL<<21) +#define IEEE_STATUS_DNO (1UL<<22) + +#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \ + IEEE_STATUS_OVF | IEEE_STATUS_UNF | \ + IEEE_STATUS_INE | IEEE_STATUS_DNO) + +#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \ + IEEE_STATUS_MASK | IEEE_MAP_MASK) + +#define IEEE_CURRENT_RM_SHIFT 32 +#define IEEE_CURRENT_RM_MASK (3UL<> 35) & IEEE_STATUS_MASK; + sw |= (fp >> 36) & IEEE_MAP_DMZ; + sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV + | IEEE_TRAP_ENABLE_DZE + | IEEE_TRAP_ENABLE_OVF); + sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE); + sw |= (fp >> 47) & IEEE_MAP_UMZ; + sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO; + return sw; +} + +#ifdef __KERNEL__ + +/* The following two functions don't need trapb/excb instructions + around the mf_fpcr/mt_fpcr instructions because (a) the kernel + never generates arithmetic faults and (b) call_pal instructions + are implied trap barriers. */ + +static inline unsigned long +rdfpcr(void) +{ + unsigned long tmp, ret; + +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) + __asm__ __volatile__ ( + "ftoit $f0,%0\n\t" + "mf_fpcr $f0\n\t" + "ftoit $f0,%1\n\t" + "itoft %0,$f0" + : "=r"(tmp), "=r"(ret)); +#else + __asm__ __volatile__ ( + "stt $f0,%0\n\t" + "mf_fpcr $f0\n\t" + "stt $f0,%1\n\t" + "ldt $f0,%0" + : "=m"(tmp), "=m"(ret)); +#endif + + return ret; +} + +static inline void +wrfpcr(unsigned long val) +{ + unsigned long tmp; + +#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) + __asm__ __volatile__ ( + "ftoit $f0,%0\n\t" + "itoft %1,$f0\n\t" + "mt_fpcr $f0\n\t" + "itoft %0,$f0" + : "=&r"(tmp) : "r"(val)); +#else + __asm__ __volatile__ ( + "stt $f0,%0\n\t" + "ldt $f0,%1\n\t" + "mt_fpcr $f0\n\t" + "ldt $f0,%0" + : "=m"(tmp) : "m"(val)); +#endif +} + +static inline unsigned long +swcr_update_status(unsigned long swcr, unsigned long fpcr) +{ + /* EV6 implements most of the bits in hardware. Collect + the acrued exception bits from the real fpcr. */ + if (implver() == IMPLVER_EV6) { + swcr &= ~IEEE_STATUS_MASK; + swcr |= (fpcr >> 35) & IEEE_STATUS_MASK; + } + return swcr; +} + +extern unsigned long alpha_read_fp_reg (unsigned long reg); +extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); +extern unsigned long alpha_read_fp_reg_s (unsigned long reg); +extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val); + +#endif /* __KERNEL__ */ + +#endif /* __ASM_ALPHA_FPU_H */ diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h new file mode 100644 index 00000000000..6a332a9f099 --- /dev/null +++ b/arch/alpha/include/asm/futex.h @@ -0,0 +1,6 @@ +#ifndef _ASM_FUTEX_H +#define _ASM_FUTEX_H + +#include + +#endif diff --git a/arch/alpha/include/asm/gct.h b/arch/alpha/include/asm/gct.h new file mode 100644 index 00000000000..3504c704927 --- /dev/null +++ b/arch/alpha/include/asm/gct.h @@ -0,0 +1,58 @@ +#ifndef __ALPHA_GCT_H +#define __ALPHA_GCT_H + +typedef u64 gct_id; +typedef u64 gct6_handle; + +typedef struct __gct6_node { + u8 type; + u8 subtype; + u16 size; + u32 hd_extension; + gct6_handle owner; + gct6_handle active_user; + gct_id id; + u64 flags; + u16 rev; + u16 change_counter; + u16 max_child; + u16 reserved1; + gct6_handle saved_owner; + gct6_handle affinity; + gct6_handle parent; + gct6_handle next; + gct6_handle prev; + gct6_handle child; + u64 fw_flags; + u64 os_usage; + u64 fru_id; + u32 checksum; + u32 magic; /* 'GLXY' */ +} gct6_node; + +typedef struct { + u8 type; + u8 subtype; + void (*callout)(gct6_node *); +} gct6_search_struct; + +#define GCT_NODE_MAGIC 0x59584c47 /* 'GLXY' */ + +/* + * node types + */ +#define GCT_TYPE_HOSE 0x0E + +/* + * node subtypes + */ +#define GCT_SUBTYPE_IO_PORT_MODULE 0x2C + +#define GCT_NODE_PTR(off) ((gct6_node *)((char *)hwrpb + \ + hwrpb->frut_offset + \ + (gct6_handle)(off))) \ + +int gct6_find_nodes(gct6_node *, gct6_search_struct *); + +#endif /* __ALPHA_GCT_H */ + diff --git a/arch/alpha/include/asm/gentrap.h b/arch/alpha/include/asm/gentrap.h new file mode 100644 index 00000000000..ae50cc3192c --- /dev/null +++ b/arch/alpha/include/asm/gentrap.h @@ -0,0 +1,37 @@ +#ifndef _ASMAXP_GENTRAP_H +#define _ASMAXP_GENTRAP_H + +/* + * Definitions for gentrap causes. They are generated by user-level + * programs and therefore should be compatible with the corresponding + * OSF/1 definitions. + */ +#define GEN_INTOVF -1 /* integer overflow */ +#define GEN_INTDIV -2 /* integer division by zero */ +#define GEN_FLTOVF -3 /* fp overflow */ +#define GEN_FLTDIV -4 /* fp division by zero */ +#define GEN_FLTUND -5 /* fp underflow */ +#define GEN_FLTINV -6 /* invalid fp operand */ +#define GEN_FLTINE -7 /* inexact fp operand */ +#define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */ +#define GEN_DECDIV -9 /* decimal division by zero */ +#define GEN_DECINV -10 /* invalid decimal operand */ +#define GEN_ROPRAND -11 /* reserved operand */ +#define GEN_ASSERTERR -12 /* assertion error */ +#define GEN_NULPTRERR -13 /* null pointer error */ +#define GEN_STKOVF -14 /* stack overflow */ +#define GEN_STRLENERR -15 /* string length error */ +#define GEN_SUBSTRERR -16 /* substring error */ +#define GEN_RANGERR -17 /* range error */ +#define GEN_SUBRNG -18 +#define GEN_SUBRNG1 -19 +#define GEN_SUBRNG2 -20 +#define GEN_SUBRNG3 -21 /* these report range errors for */ +#define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */ +#define GEN_SUBRNG5 -23 +#define GEN_SUBRNG6 -24 +#define GEN_SUBRNG7 -25 + +/* the remaining codes (-26..-1023) are reserved. */ + +#endif /* _ASMAXP_GENTRAP_H */ diff --git a/arch/alpha/include/asm/hardirq.h b/arch/alpha/include/asm/hardirq.h new file mode 100644 index 00000000000..d953e234daa --- /dev/null +++ b/arch/alpha/include/asm/hardirq.h @@ -0,0 +1,30 @@ +#ifndef _ALPHA_HARDIRQ_H +#define _ALPHA_HARDIRQ_H + +#include +#include + + +/* entry.S is sensitive to the offsets of these fields */ +typedef struct { + unsigned long __softirq_pending; +} ____cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ + +void ack_bad_irq(unsigned int irq); + +#define HARDIRQ_BITS 12 + +/* + * The hardirq mask has to be large enough to have + * space for potentially nestable IRQ sources in the system + * to nest on a single CPU. On Alpha, interrupts are masked at the CPU + * by IPL as well as at the system level. We only have 8 IPLs (UNIX PALcode) + * so we really only have 8 nestable IRQs, but allow some overhead + */ +#if (1 << HARDIRQ_BITS) < 16 +#error HARDIRQ_BITS is too low! +#endif + +#endif /* _ALPHA_HARDIRQ_H */ diff --git a/arch/alpha/include/asm/hw_irq.h b/arch/alpha/include/asm/hw_irq.h new file mode 100644 index 00000000000..a37db0f9509 --- /dev/null +++ b/arch/alpha/include/asm/hw_irq.h @@ -0,0 +1,13 @@ +#ifndef _ALPHA_HW_IRQ_H +#define _ALPHA_HW_IRQ_H + + +extern volatile unsigned long irq_err_count; + +#ifdef CONFIG_ALPHA_GENERIC +#define ACTUAL_NR_IRQS alpha_mv.nr_irqs +#else +#define ACTUAL_NR_IRQS NR_IRQS +#endif + +#endif diff --git a/arch/alpha/include/asm/hwrpb.h b/arch/alpha/include/asm/hwrpb.h new file mode 100644 index 00000000000..8e8f871af7c --- /dev/null +++ b/arch/alpha/include/asm/hwrpb.h @@ -0,0 +1,220 @@ +#ifndef __ALPHA_HWRPB_H +#define __ALPHA_HWRPB_H + +#define INIT_HWRPB ((struct hwrpb_struct *) 0x10000000) + +/* + * DEC processor types for Alpha systems. Found in HWRPB. + * These values are architected. + */ + +#define EV3_CPU 1 /* EV3 */ +#define EV4_CPU 2 /* EV4 (21064) */ +#define LCA4_CPU 4 /* LCA4 (21066/21068) */ +#define EV5_CPU 5 /* EV5 (21164) */ +#define EV45_CPU 6 /* EV4.5 (21064/xxx) */ +#define EV56_CPU 7 /* EV5.6 (21164) */ +#define EV6_CPU 8 /* EV6 (21264) */ +#define PCA56_CPU 9 /* PCA56 (21164PC) */ +#define PCA57_CPU 10 /* PCA57 (notyet) */ +#define EV67_CPU 11 /* EV67 (21264A) */ +#define EV68CB_CPU 12 /* EV68CB (21264C) */ +#define EV68AL_CPU 13 /* EV68AL (21264B) */ +#define EV68CX_CPU 14 /* EV68CX (21264D) */ +#define EV7_CPU 15 /* EV7 (21364) */ +#define EV79_CPU 16 /* EV79 (21364??) */ +#define EV69_CPU 17 /* EV69 (21264/EV69A) */ + +/* + * DEC system types for Alpha systems. Found in HWRPB. + * These values are architected. + */ + +#define ST_ADU 1 /* Alpha ADU systype */ +#define ST_DEC_4000 2 /* Cobra systype */ +#define ST_DEC_7000 3 /* Ruby systype */ +#define ST_DEC_3000_500 4 /* Flamingo systype */ +#define ST_DEC_2000_300 6 /* Jensen systype */ +#define ST_DEC_3000_300 7 /* Pelican systype */ +#define ST_DEC_2100_A500 9 /* Sable systype */ +#define ST_DEC_AXPVME_64 10 /* AXPvme system type */ +#define ST_DEC_AXPPCI_33 11 /* NoName system type */ +#define ST_DEC_TLASER 12 /* Turbolaser systype */ +#define ST_DEC_2100_A50 13 /* Avanti systype */ +#define ST_DEC_MUSTANG 14 /* Mustang systype */ +#define ST_DEC_ALCOR 15 /* Alcor (EV5) systype */ +#define ST_DEC_1000 17 /* Mikasa systype */ +#define ST_DEC_EB64 18 /* EB64 systype */ +#define ST_DEC_EB66 19 /* EB66 systype */ +#define ST_DEC_EB64P 20 /* EB64+ systype */ +#define ST_DEC_BURNS 21 /* laptop systype */ +#define ST_DEC_RAWHIDE 22 /* Rawhide systype */ +#define ST_DEC_K2 23 /* K2 systype */ +#define ST_DEC_LYNX 24 /* Lynx systype */ +#define ST_DEC_XL 25 /* Alpha XL systype */ +#define ST_DEC_EB164 26 /* EB164 systype */ +#define ST_DEC_NORITAKE 27 /* Noritake systype */ +#define ST_DEC_CORTEX 28 /* Cortex systype */ +#define ST_DEC_MIATA 30 /* Miata systype */ +#define ST_DEC_XXM 31 /* XXM systype */ +#define ST_DEC_TAKARA 32 /* Takara systype */ +#define ST_DEC_YUKON 33 /* Yukon systype */ +#define ST_DEC_TSUNAMI 34 /* Tsunami systype */ +#define ST_DEC_WILDFIRE 35 /* Wildfire systype */ +#define ST_DEC_CUSCO 36 /* CUSCO systype */ +#define ST_DEC_EIGER 37 /* Eiger systype */ +#define ST_DEC_TITAN 38 /* Titan systype */ +#define ST_DEC_MARVEL 39 /* Marvel systype */ + +/* UNOFFICIAL!!! */ +#define ST_UNOFFICIAL_BIAS 100 +#define ST_DTI_RUFFIAN 101 /* RUFFIAN systype */ + +/* Alpha Processor, Inc. systems */ +#define ST_API_BIAS 200 +#define ST_API_NAUTILUS 201 /* UP1000 systype */ + +struct pcb_struct { + unsigned long ksp; + unsigned long usp; + unsigned long ptbr; + unsigned int pcc; + unsigned int asn; + unsigned long unique; + unsigned long flags; + unsigned long res1, res2; +}; + +struct percpu_struct { + unsigned long hwpcb[16]; + unsigned long flags; + unsigned long pal_mem_size; + unsigned long pal_scratch_size; + unsigned long pal_mem_pa; + unsigned long pal_scratch_pa; + unsigned long pal_revision; + unsigned long type; + unsigned long variation; + unsigned long revision; + unsigned long serial_no[2]; + unsigned long logout_area_pa; + unsigned long logout_area_len; + unsigned long halt_PCBB; + unsigned long halt_PC; + unsigned long halt_PS; + unsigned long halt_arg; + unsigned long halt_ra; + unsigned long halt_pv; + unsigned long halt_reason; + unsigned long res; + unsigned long ipc_buffer[21]; + unsigned long palcode_avail[16]; + unsigned long compatibility; + unsigned long console_data_log_pa; + unsigned long console_data_log_length; + unsigned long bcache_info; +}; + +struct procdesc_struct { + unsigned long weird_vms_stuff; + unsigned long address; +}; + +struct vf_map_struct { + unsigned long va; + unsigned long pa; + unsigned long count; +}; + +struct crb_struct { + struct procdesc_struct * dispatch_va; + struct procdesc_struct * dispatch_pa; + struct procdesc_struct * fixup_va; + struct procdesc_struct * fixup_pa; + /* virtual->physical map */ + unsigned long map_entries; + unsigned long map_pages; + struct vf_map_struct map[1]; +}; + +struct memclust_struct { + unsigned long start_pfn; + unsigned long numpages; + unsigned long numtested; + unsigned long bitmap_va; + unsigned long bitmap_pa; + unsigned long bitmap_chksum; + unsigned long usage; +}; + +struct memdesc_struct { + unsigned long chksum; + unsigned long optional_pa; + unsigned long numclusters; + struct memclust_struct cluster[0]; +}; + +struct dsr_struct { + long smm; /* SMM nubber used by LMF */ + unsigned long lurt_off; /* offset to LURT table */ + unsigned long sysname_off; /* offset to sysname char count */ +}; + +struct hwrpb_struct { + unsigned long phys_addr; /* check: physical address of the hwrpb */ + unsigned long id; /* check: "HWRPB\0\0\0" */ + unsigned long revision; + unsigned long size; /* size of hwrpb */ + unsigned long cpuid; + unsigned long pagesize; /* 8192, I hope */ + unsigned long pa_bits; /* number of physical address bits */ + unsigned long max_asn; + unsigned char ssn[16]; /* system serial number: big bother is watching */ + unsigned long sys_type; + unsigned long sys_variation; + unsigned long sys_revision; + unsigned long intr_freq; /* interval clock frequency * 4096 */ + unsigned long cycle_freq; /* cycle counter frequency */ + unsigned long vptb; /* Virtual Page Table Base address */ + unsigned long res1; + unsigned long tbhb_offset; /* Translation Buffer Hint Block */ + unsigned long nr_processors; + unsigned long processor_size; + unsigned long processor_offset; + unsigned long ctb_nr; + unsigned long ctb_size; /* console terminal block size */ + unsigned long ctbt_offset; /* console terminal block table offset */ + unsigned long crb_offset; /* console callback routine block */ + unsigned long mddt_offset; /* memory data descriptor table */ + unsigned long cdb_offset; /* configuration data block (or NULL) */ + unsigned long frut_offset; /* FRU table (or NULL) */ + void (*save_terminal)(unsigned long); + unsigned long save_terminal_data; + void (*restore_terminal)(unsigned long); + unsigned long restore_terminal_data; + void (*CPU_restart)(unsigned long); + unsigned long CPU_restart_data; + unsigned long res2; + unsigned long res3; + unsigned long chksum; + unsigned long rxrdy; + unsigned long txrdy; + unsigned long dsr_offset; /* "Dynamic System Recognition Data Block Table" */ +}; + +#ifdef __KERNEL__ + +extern struct hwrpb_struct *hwrpb; + +static inline void +hwrpb_update_checksum(struct hwrpb_struct *h) +{ + unsigned long sum = 0, *l; + for (l = (unsigned long *) h; l < (unsigned long *) &h->chksum; ++l) + sum += *l; + h->chksum = sum; +} + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_HWRPB_H */ diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h new file mode 100644 index 00000000000..e971ab000f9 --- /dev/null +++ b/arch/alpha/include/asm/io.h @@ -0,0 +1,577 @@ +#ifndef __ALPHA_IO_H +#define __ALPHA_IO_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include +#include +#include + +/* The generic header contains only prototypes. Including it ensures that + the implementation we have here matches that interface. */ +#include + +/* We don't use IO slowdowns on the Alpha, but.. */ +#define __SLOW_DOWN_IO do { } while (0) +#define SLOW_DOWN_IO do { } while (0) + +/* + * Virtual -> physical identity mapping starts at this offset + */ +#ifdef USE_48_BIT_KSEG +#define IDENT_ADDR 0xffff800000000000UL +#else +#define IDENT_ADDR 0xfffffc0000000000UL +#endif + +/* + * We try to avoid hae updates (thus the cache), but when we + * do need to update the hae, we need to do it atomically, so + * that any interrupts wouldn't get confused with the hae + * register not being up-to-date with respect to the hardware + * value. + */ +extern inline void __set_hae(unsigned long new_hae) +{ + unsigned long flags; + local_irq_save(flags); + + alpha_mv.hae_cache = new_hae; + *alpha_mv.hae_register = new_hae; + mb(); + /* Re-read to make sure it was written. */ + new_hae = *alpha_mv.hae_register; + + local_irq_restore(flags); +} + +extern inline void set_hae(unsigned long new_hae) +{ + if (new_hae != alpha_mv.hae_cache) + __set_hae(new_hae); +} + +/* + * Change virtual addresses to physical addresses and vv. + */ +#ifdef USE_48_BIT_KSEG +static inline unsigned long virt_to_phys(void *address) +{ + return (unsigned long)address - IDENT_ADDR; +} + +static inline void * phys_to_virt(unsigned long address) +{ + return (void *) (address + IDENT_ADDR); +} +#else +static inline unsigned long virt_to_phys(void *address) +{ + unsigned long phys = (unsigned long)address; + + /* Sign-extend from bit 41. */ + phys <<= (64 - 41); + phys = (long)phys >> (64 - 41); + + /* Crop to the physical address width of the processor. */ + phys &= (1ul << hwrpb->pa_bits) - 1; + + return phys; +} + +static inline void * phys_to_virt(unsigned long address) +{ + return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); +} +#endif + +#define page_to_phys(page) page_to_pa(page) + +static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page) +{ + return page_to_phys(page); +} + +/* This depends on working iommu. */ +#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0) + +/* Maximum PIO space address supported? */ +#define IO_SPACE_LIMIT 0xffff + +/* + * Change addresses as seen by the kernel (virtual) to addresses as + * seen by a device (bus), and vice versa. + * + * Note that this only works for a limited range of kernel addresses, + * and very well may not span all memory. Consider this interface + * deprecated in favour of the DMA-mapping API. + */ +extern unsigned long __direct_map_base; +extern unsigned long __direct_map_size; + +static inline unsigned long __deprecated virt_to_bus(void *address) +{ + unsigned long phys = virt_to_phys(address); + unsigned long bus = phys + __direct_map_base; + return phys <= __direct_map_size ? bus : 0; +} +#define isa_virt_to_bus virt_to_bus + +static inline void * __deprecated bus_to_virt(unsigned long address) +{ + void *virt; + + /* This check is a sanity check but also ensures that bus address 0 + maps to virtual address 0 which is useful to detect null pointers + (the NCR driver is much simpler if NULL pointers are preserved). */ + address -= __direct_map_base; + virt = phys_to_virt(address); + return (long)address <= 0 ? NULL : virt; +} +#define isa_bus_to_virt bus_to_virt + +/* + * There are different chipsets to interface the Alpha CPUs to the world. + */ + +#define IO_CONCAT(a,b) _IO_CONCAT(a,b) +#define _IO_CONCAT(a,b) a ## _ ## b + +#ifdef CONFIG_ALPHA_GENERIC + +/* In a generic kernel, we always go through the machine vector. */ + +#define REMAP1(TYPE, NAME, QUAL) \ +static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ +{ \ + return alpha_mv.mv_##NAME(addr); \ +} + +#define REMAP2(TYPE, NAME, QUAL) \ +static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ +{ \ + alpha_mv.mv_##NAME(b, addr); \ +} + +REMAP1(unsigned int, ioread8, /**/) +REMAP1(unsigned int, ioread16, /**/) +REMAP1(unsigned int, ioread32, /**/) +REMAP1(u8, readb, const volatile) +REMAP1(u16, readw, const volatile) +REMAP1(u32, readl, const volatile) +REMAP1(u64, readq, const volatile) + +REMAP2(u8, iowrite8, /**/) +REMAP2(u16, iowrite16, /**/) +REMAP2(u32, iowrite32, /**/) +REMAP2(u8, writeb, volatile) +REMAP2(u16, writew, volatile) +REMAP2(u32, writel, volatile) +REMAP2(u64, writeq, volatile) + +#undef REMAP1 +#undef REMAP2 + +extern inline void __iomem *generic_ioportmap(unsigned long a) +{ + return alpha_mv.mv_ioportmap(a); +} + +static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) +{ + return alpha_mv.mv_ioremap(a, s); +} + +static inline void generic_iounmap(volatile void __iomem *a) +{ + return alpha_mv.mv_iounmap(a); +} + +static inline int generic_is_ioaddr(unsigned long a) +{ + return alpha_mv.mv_is_ioaddr(a); +} + +static inline int generic_is_mmio(const volatile void __iomem *a) +{ + return alpha_mv.mv_is_mmio(a); +} + +#define __IO_PREFIX generic +#define generic_trivial_rw_bw 0 +#define generic_trivial_rw_lq 0 +#define generic_trivial_io_bw 0 +#define generic_trivial_io_lq 0 +#define generic_trivial_iounmap 0 + +#else + +#if defined(CONFIG_ALPHA_APECS) +# include +#elif defined(CONFIG_ALPHA_CIA) +# include +#elif defined(CONFIG_ALPHA_IRONGATE) +# include +#elif defined(CONFIG_ALPHA_JENSEN) +# include +#elif defined(CONFIG_ALPHA_LCA) +# include +#elif defined(CONFIG_ALPHA_MARVEL) +# include +#elif defined(CONFIG_ALPHA_MCPCIA) +# include +#elif defined(CONFIG_ALPHA_POLARIS) +# include +#elif defined(CONFIG_ALPHA_T2) +# include +#elif defined(CONFIG_ALPHA_TSUNAMI) +# include +#elif defined(CONFIG_ALPHA_TITAN) +# include +#elif defined(CONFIG_ALPHA_WILDFIRE) +# include +#else +#error "What system is this?" +#endif + +#endif /* GENERIC */ + +/* + * We always have external versions of these routines. + */ +extern u8 inb(unsigned long port); +extern u16 inw(unsigned long port); +extern u32 inl(unsigned long port); +extern void outb(u8 b, unsigned long port); +extern void outw(u16 b, unsigned long port); +extern void outl(u32 b, unsigned long port); + +extern u8 readb(const volatile void __iomem *addr); +extern u16 readw(const volatile void __iomem *addr); +extern u32 readl(const volatile void __iomem *addr); +extern u64 readq(const volatile void __iomem *addr); +extern void writeb(u8 b, volatile void __iomem *addr); +extern void writew(u16 b, volatile void __iomem *addr); +extern void writel(u32 b, volatile void __iomem *addr); +extern void writeq(u64 b, volatile void __iomem *addr); + +extern u8 __raw_readb(const volatile void __iomem *addr); +extern u16 __raw_readw(const volatile void __iomem *addr); +extern u32 __raw_readl(const volatile void __iomem *addr); +extern u64 __raw_readq(const volatile void __iomem *addr); +extern void __raw_writeb(u8 b, volatile void __iomem *addr); +extern void __raw_writew(u16 b, volatile void __iomem *addr); +extern void __raw_writel(u32 b, volatile void __iomem *addr); +extern void __raw_writeq(u64 b, volatile void __iomem *addr); + +/* + * Mapping from port numbers to __iomem space is pretty easy. + */ + +/* These two have to be extern inline because of the extern prototype from + . It is not legal to mix "extern" and "static" for + the same declaration. */ +extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) +{ + return IO_CONCAT(__IO_PREFIX,ioportmap) (port); +} + +extern inline void ioport_unmap(void __iomem *addr) +{ +} + +static inline void __iomem *ioremap(unsigned long port, unsigned long size) +{ + return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); +} + +static inline void __iomem *__ioremap(unsigned long port, unsigned long size, + unsigned long flags) +{ + return ioremap(port, size); +} + +static inline void __iomem * ioremap_nocache(unsigned long offset, + unsigned long size) +{ + return ioremap(offset, size); +} + +static inline void iounmap(volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iounmap)(addr); +} + +static inline int __is_ioaddr(unsigned long addr) +{ + return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); +} +#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) + +static inline int __is_mmio(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); +} + + +/* + * If the actual I/O bits are sufficiently trivial, then expand inline. + */ + +#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) +extern inline unsigned int ioread8(void __iomem *addr) +{ + unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); + mb(); + return ret; +} + +extern inline unsigned int ioread16(void __iomem *addr) +{ + unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); + mb(); + return ret; +} + +extern inline void iowrite8(u8 b, void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); + mb(); +} + +extern inline void iowrite16(u16 b, void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); + mb(); +} + +extern inline u8 inb(unsigned long port) +{ + return ioread8(ioport_map(port, 1)); +} + +extern inline u16 inw(unsigned long port) +{ + return ioread16(ioport_map(port, 2)); +} + +extern inline void outb(u8 b, unsigned long port) +{ + iowrite8(b, ioport_map(port, 1)); +} + +extern inline void outw(u16 b, unsigned long port) +{ + iowrite16(b, ioport_map(port, 2)); +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) +extern inline unsigned int ioread32(void __iomem *addr) +{ + unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); + mb(); + return ret; +} + +extern inline void iowrite32(u32 b, void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); + mb(); +} + +extern inline u32 inl(unsigned long port) +{ + return ioread32(ioport_map(port, 4)); +} + +extern inline void outl(u32 b, unsigned long port) +{ + iowrite32(b, ioport_map(port, 4)); +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 +extern inline u8 __raw_readb(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readb)(addr); +} + +extern inline u16 __raw_readw(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readw)(addr); +} + +extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writeb)(b, addr); +} + +extern inline void __raw_writew(u16 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writew)(b, addr); +} + +extern inline u8 readb(const volatile void __iomem *addr) +{ + u8 ret = __raw_readb(addr); + mb(); + return ret; +} + +extern inline u16 readw(const volatile void __iomem *addr) +{ + u16 ret = __raw_readw(addr); + mb(); + return ret; +} + +extern inline void writeb(u8 b, volatile void __iomem *addr) +{ + __raw_writeb(b, addr); + mb(); +} + +extern inline void writew(u16 b, volatile void __iomem *addr) +{ + __raw_writew(b, addr); + mb(); +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 +extern inline u32 __raw_readl(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readl)(addr); +} + +extern inline u64 __raw_readq(const volatile void __iomem *addr) +{ + return IO_CONCAT(__IO_PREFIX,readq)(addr); +} + +extern inline void __raw_writel(u32 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writel)(b, addr); +} + +extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) +{ + IO_CONCAT(__IO_PREFIX,writeq)(b, addr); +} + +extern inline u32 readl(const volatile void __iomem *addr) +{ + u32 ret = __raw_readl(addr); + mb(); + return ret; +} + +extern inline u64 readq(const volatile void __iomem *addr) +{ + u64 ret = __raw_readq(addr); + mb(); + return ret; +} + +extern inline void writel(u32 b, volatile void __iomem *addr) +{ + __raw_writel(b, addr); + mb(); +} + +extern inline void writeq(u64 b, volatile void __iomem *addr) +{ + __raw_writeq(b, addr); + mb(); +} +#endif + +#define inb_p inb +#define inw_p inw +#define inl_p inl +#define outb_p outb +#define outw_p outw +#define outl_p outl +#define readb_relaxed(addr) __raw_readb(addr) +#define readw_relaxed(addr) __raw_readw(addr) +#define readl_relaxed(addr) __raw_readl(addr) +#define readq_relaxed(addr) __raw_readq(addr) + +#define mmiowb() + +/* + * String version of IO memory access ops: + */ +extern void memcpy_fromio(void *, const volatile void __iomem *, long); +extern void memcpy_toio(volatile void __iomem *, const void *, long); +extern void _memset_c_io(volatile void __iomem *, unsigned long, long); + +static inline void memset_io(volatile void __iomem *addr, u8 c, long len) +{ + _memset_c_io(addr, 0x0101010101010101UL * c, len); +} + +#define __HAVE_ARCH_MEMSETW_IO +static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) +{ + _memset_c_io(addr, 0x0001000100010001UL * c, len); +} + +/* + * String versions of in/out ops: + */ +extern void insb (unsigned long port, void *dst, unsigned long count); +extern void insw (unsigned long port, void *dst, unsigned long count); +extern void insl (unsigned long port, void *dst, unsigned long count); +extern void outsb (unsigned long port, const void *src, unsigned long count); +extern void outsw (unsigned long port, const void *src, unsigned long count); +extern void outsl (unsigned long port, const void *src, unsigned long count); + +/* + * The Alpha Jensen hardware for some rather strange reason puts + * the RTC clock at 0x170 instead of 0x70. Probably due to some + * misguided idea about using 0x70 for NMI stuff. + * + * These defines will override the defaults when doing RTC queries + */ + +#ifdef CONFIG_ALPHA_GENERIC +# define RTC_PORT(x) ((x) + alpha_mv.rtc_port) +#else +# ifdef CONFIG_ALPHA_JENSEN +# define RTC_PORT(x) (0x170+(x)) +# else +# define RTC_PORT(x) (0x70 + (x)) +# endif +#endif +#define RTC_ALWAYS_BCD 0 + +/* + * Some mucking forons use if[n]def writeq to check if platform has it. + * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them + * to play with; for now just use cpp anti-recursion logics and make sure + * that damn thing is defined and expands to itself. + */ + +#define writeq writeq +#define readq readq + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_IO_H */ diff --git a/arch/alpha/include/asm/io_trivial.h b/arch/alpha/include/asm/io_trivial.h new file mode 100644 index 00000000000..1c77f10b4b3 --- /dev/null +++ b/arch/alpha/include/asm/io_trivial.h @@ -0,0 +1,131 @@ +/* Trivial implementations of basic i/o routines. Assumes that all + of the hard work has been done by ioremap and ioportmap, and that + access to i/o space is linear. */ + +/* This file may be included multiple times. */ + +#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) +__EXTERN_INLINE unsigned int +IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a) +{ + return __kernel_ldbu(*(volatile u8 __force *)a); +} + +__EXTERN_INLINE unsigned int +IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a) +{ + return __kernel_ldwu(*(volatile u16 __force *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,iowrite8)(u8 b, void __iomem *a) +{ + __kernel_stb(b, *(volatile u8 __force *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a) +{ + __kernel_stw(b, *(volatile u16 __force *)a); +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) +__EXTERN_INLINE unsigned int +IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a) +{ + return *(volatile u32 __force *)a; +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,iowrite32)(u32 b, void __iomem *a) +{ + *(volatile u32 __force *)a = b; +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 +__EXTERN_INLINE u8 +IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) +{ + return __kernel_ldbu(*(const volatile u8 __force *)a); +} + +__EXTERN_INLINE u16 +IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) +{ + return __kernel_ldwu(*(const volatile u16 __force *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a) +{ + __kernel_stb(b, *(volatile u8 __force *)a); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) +{ + __kernel_stw(b, *(volatile u16 __force *)a); +} +#elif IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 2 +__EXTERN_INLINE u8 +IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) +{ + void __iomem *addr = (void __iomem *)a; + return IO_CONCAT(__IO_PREFIX,ioread8)(addr); +} + +__EXTERN_INLINE u16 +IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) +{ + void __iomem *addr = (void __iomem *)a; + return IO_CONCAT(__IO_PREFIX,ioread16)(addr); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a) +{ + void __iomem *addr = (void __iomem *)a; + IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) +{ + void __iomem *addr = (void __iomem *)a; + IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 +__EXTERN_INLINE u32 +IO_CONCAT(__IO_PREFIX,readl)(const volatile void __iomem *a) +{ + return *(const volatile u32 __force *)a; +} + +__EXTERN_INLINE u64 +IO_CONCAT(__IO_PREFIX,readq)(const volatile void __iomem *a) +{ + return *(const volatile u64 __force *)a; +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writel)(u32 b, volatile void __iomem *a) +{ + *(volatile u32 __force *)a = b; +} + +__EXTERN_INLINE void +IO_CONCAT(__IO_PREFIX,writeq)(u64 b, volatile void __iomem *a) +{ + *(volatile u64 __force *)a = b; +} +#endif + +#if IO_CONCAT(__IO_PREFIX,trivial_iounmap) +__EXTERN_INLINE void IO_CONCAT(__IO_PREFIX,iounmap)(volatile void __iomem *a) +{ +} +#endif diff --git a/arch/alpha/include/asm/ioctl.h b/arch/alpha/include/asm/ioctl.h new file mode 100644 index 00000000000..fc63727f417 --- /dev/null +++ b/arch/alpha/include/asm/ioctl.h @@ -0,0 +1,66 @@ +#ifndef _ALPHA_IOCTL_H +#define _ALPHA_IOCTL_H + +/* + * The original linux ioctl numbering scheme was just a general + * "anything goes" setup, where more or less random numbers were + * assigned. Sorry, I was clueless when I started out on this. + * + * On the alpha, we'll try to clean it up a bit, using a more sane + * ioctl numbering, and also trying to be compatible with OSF/1 in + * the process. I'd like to clean it up for the i386 as well, but + * it's so painful recognizing both the new and the old numbers.. + */ + +#define _IOC_NRBITS 8 +#define _IOC_TYPEBITS 8 +#define _IOC_SIZEBITS 13 +#define _IOC_DIRBITS 3 + +#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) +#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) +#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) +#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) + +#define _IOC_NRSHIFT 0 +#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) +#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) +#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +/* + * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit. + * And this turns out useful to catch old ioctl numbers in header + * files for us. + */ +#define _IOC_NONE 1U +#define _IOC_READ 2U +#define _IOC_WRITE 4U + +#define _IOC(dir,type,nr,size) \ + ((unsigned int) \ + (((dir) << _IOC_DIRSHIFT) | \ + ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT))) + +/* used to create numbers */ +#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) +#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) +#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) +#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) + +/* used to decode them.. */ +#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) +#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) +#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) +#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) + +/* ...and for the drivers/sound files... */ + +#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) +#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) +#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) +#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) +#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) + +#endif /* _ALPHA_IOCTL_H */ diff --git a/arch/alpha/include/asm/ioctls.h b/arch/alpha/include/asm/ioctls.h new file mode 100644 index 00000000000..67bb9f6fdbe --- /dev/null +++ b/arch/alpha/include/asm/ioctls.h @@ -0,0 +1,112 @@ +#ifndef _ASM_ALPHA_IOCTLS_H +#define _ASM_ALPHA_IOCTLS_H + +#include + +#define FIOCLEX _IO('f', 1) +#define FIONCLEX _IO('f', 2) +#define FIOASYNC _IOW('f', 125, int) +#define FIONBIO _IOW('f', 126, int) +#define FIONREAD _IOR('f', 127, int) +#define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) + +#define TIOCGETP _IOR('t', 8, struct sgttyb) +#define TIOCSETP _IOW('t', 9, struct sgttyb) +#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */ + +#define TIOCSETC _IOW('t', 17, struct tchars) +#define TIOCGETC _IOR('t', 18, struct tchars) +#define TCGETS _IOR('t', 19, struct termios) +#define TCSETS _IOW('t', 20, struct termios) +#define TCSETSW _IOW('t', 21, struct termios) +#define TCSETSF _IOW('t', 22, struct termios) + +#define TCGETA _IOR('t', 23, struct termio) +#define TCSETA _IOW('t', 24, struct termio) +#define TCSETAW _IOW('t', 25, struct termio) +#define TCSETAF _IOW('t', 28, struct termio) + +#define TCSBRK _IO('t', 29) +#define TCXONC _IO('t', 30) +#define TCFLSH _IO('t', 31) + +#define TIOCSWINSZ _IOW('t', 103, struct winsize) +#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ +#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ +#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ + +#define TIOCGLTC _IOR('t', 116, struct ltchars) +#define TIOCSLTC _IOW('t', 117, struct ltchars) +#define TIOCSPGRP _IOW('t', 118, int) +#define TIOCGPGRP _IOR('t', 119, int) + +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E + +#define TIOCSTI 0x5412 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +# define TIOCM_LE 0x001 +# define TIOCM_DTR 0x002 +# define TIOCM_RTS 0x004 +# define TIOCM_ST 0x008 +# define TIOCM_SR 0x010 +# define TIOCM_CTS 0x020 +# define TIOCM_CAR 0x040 +# define TIOCM_RNG 0x080 +# define TIOCM_DSR 0x100 +# define TIOCM_CD TIOCM_CAR +# define TIOCM_RI TIOCM_RNG +# define TIOCM_OUT1 0x2000 +# define TIOCM_OUT2 0x4000 +# define TIOCM_LOOP 0x8000 + +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +# define TIOCPKT_DATA 0 +# define TIOCPKT_FLUSHREAD 1 +# define TIOCPKT_FLUSHWRITE 2 +# define TIOCPKT_STOP 4 +# define TIOCPKT_START 8 +# define TIOCPKT_NOSTOP 16 +# define TIOCPKT_DOSTOP 32 + + +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ + +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ + /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ +# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ + +#endif /* _ASM_ALPHA_IOCTLS_H */ diff --git a/arch/alpha/include/asm/ipcbuf.h b/arch/alpha/include/asm/ipcbuf.h new file mode 100644 index 00000000000..d9c0e1a5070 --- /dev/null +++ b/arch/alpha/include/asm/ipcbuf.h @@ -0,0 +1,28 @@ +#ifndef _ALPHA_IPCBUF_H +#define _ALPHA_IPCBUF_H + +/* + * The ipc64_perm structure for alpha architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit seq + * - 2 miscellaneous 64-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; + unsigned short __pad1; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _ALPHA_IPCBUF_H */ diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h new file mode 100644 index 00000000000..06377400dc0 --- /dev/null +++ b/arch/alpha/include/asm/irq.h @@ -0,0 +1,91 @@ +#ifndef _ALPHA_IRQ_H +#define _ALPHA_IRQ_H + +/* + * linux/include/alpha/irq.h + * + * (C) 1994 Linus Torvalds + */ + +#include + +#if defined(CONFIG_ALPHA_GENERIC) + +/* Here NR_IRQS is not exact, but rather an upper bound. This is used + many places throughout the kernel to size static arrays. That's ok, + we'll use alpha_mv.nr_irqs when we want the real thing. */ + +/* When LEGACY_START_ADDRESS is selected, we leave out: + TITAN + WILDFIRE + MARVEL + + This helps keep the kernel object size reasonable for the majority + of machines. +*/ + +# if defined(CONFIG_ALPHA_LEGACY_START_ADDRESS) +# define NR_IRQS (128) /* max is RAWHIDE/TAKARA */ +# else +# define NR_IRQS (32768 + 16) /* marvel - 32 pids */ +# endif + +#elif defined(CONFIG_ALPHA_CABRIOLET) || \ + defined(CONFIG_ALPHA_EB66P) || \ + defined(CONFIG_ALPHA_EB164) || \ + defined(CONFIG_ALPHA_PC164) || \ + defined(CONFIG_ALPHA_LX164) +# define NR_IRQS 35 + +#elif defined(CONFIG_ALPHA_EB66) || \ + defined(CONFIG_ALPHA_EB64P) || \ + defined(CONFIG_ALPHA_MIKASA) +# define NR_IRQS 32 + +#elif defined(CONFIG_ALPHA_ALCOR) || \ + defined(CONFIG_ALPHA_MIATA) || \ + defined(CONFIG_ALPHA_RUFFIAN) || \ + defined(CONFIG_ALPHA_RX164) || \ + defined(CONFIG_ALPHA_NORITAKE) +# define NR_IRQS 48 + +#elif defined(CONFIG_ALPHA_SABLE) || \ + defined(CONFIG_ALPHA_SX164) +# define NR_IRQS 40 + +#elif defined(CONFIG_ALPHA_DP264) || \ + defined(CONFIG_ALPHA_LYNX) || \ + defined(CONFIG_ALPHA_SHARK) || \ + defined(CONFIG_ALPHA_EIGER) +# define NR_IRQS 64 + +#elif defined(CONFIG_ALPHA_TITAN) +#define NR_IRQS 80 + +#elif defined(CONFIG_ALPHA_RAWHIDE) || \ + defined(CONFIG_ALPHA_TAKARA) +# define NR_IRQS 128 + +#elif defined(CONFIG_ALPHA_WILDFIRE) +# define NR_IRQS 2048 /* enuff for 8 QBBs */ + +#elif defined(CONFIG_ALPHA_MARVEL) +# define NR_IRQS (32768 + 16) /* marvel - 32 pids*/ + +#else /* everyone else */ +# define NR_IRQS 16 +#endif + +static __inline__ int irq_canonicalize(int irq) +{ + /* + * XXX is this true for all Alpha's? The old serial driver + * did it this way for years without any complaints, so.... + */ + return ((irq == 2) ? 9 : irq); +} + +struct pt_regs; +extern void (*perf_irq)(unsigned long, struct pt_regs *); + +#endif /* _ALPHA_IRQ_H */ diff --git a/arch/alpha/include/asm/irq_regs.h b/arch/alpha/include/asm/irq_regs.h new file mode 100644 index 00000000000..3dd9c0b7027 --- /dev/null +++ b/arch/alpha/include/asm/irq_regs.h @@ -0,0 +1 @@ +#include diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h new file mode 100644 index 00000000000..964b06ead43 --- /dev/null +++ b/arch/alpha/include/asm/jensen.h @@ -0,0 +1,346 @@ +#ifndef __ALPHA_JENSEN_H +#define __ALPHA_JENSEN_H + +#include + +/* + * Defines for the AlphaPC EISA IO and memory address space. + */ + +/* + * NOTE! The memory operations do not set any memory barriers, as it's + * not needed for cases like a frame buffer that is essentially memory-like. + * You need to do them by hand if the operations depend on ordering. + * + * Similarly, the port IO operations do a "mb" only after a write operation: + * if an mb is needed before (as in the case of doing memory mapped IO + * first, and then a port IO operation to the same device), it needs to be + * done by hand. + * + * After the above has bitten me 100 times, I'll give up and just do the + * mb all the time, but right now I'm hoping this will work out. Avoiding + * mb's may potentially be a noticeable speed improvement, but I can't + * honestly say I've tested it. + * + * Handling interrupts that need to do mb's to synchronize to non-interrupts + * is another fun race area. Don't do it (because if you do, I'll have to + * do *everything* with interrupts disabled, ugh). + */ + +/* + * EISA Interrupt Acknowledge address + */ +#define EISA_INTA (IDENT_ADDR + 0x100000000UL) + +/* + * FEPROM addresses + */ +#define EISA_FEPROM0 (IDENT_ADDR + 0x180000000UL) +#define EISA_FEPROM1 (IDENT_ADDR + 0x1A0000000UL) + +/* + * VL82C106 base address + */ +#define EISA_VL82C106 (IDENT_ADDR + 0x1C0000000UL) + +/* + * EISA "Host Address Extension" address (bits 25-31 of the EISA address) + */ +#define EISA_HAE (IDENT_ADDR + 0x1D0000000UL) + +/* + * "SYSCTL" register address + */ +#define EISA_SYSCTL (IDENT_ADDR + 0x1E0000000UL) + +/* + * "spare" register address + */ +#define EISA_SPARE (IDENT_ADDR + 0x1F0000000UL) + +/* + * EISA memory address offset + */ +#define EISA_MEM (IDENT_ADDR + 0x200000000UL) + +/* + * EISA IO address offset + */ +#define EISA_IO (IDENT_ADDR + 0x300000000UL) + + +#ifdef __KERNEL__ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __IO_EXTERN_INLINE +#endif + +/* + * Handle the "host address register". This needs to be set + * to the high 7 bits of the EISA address. This is also needed + * for EISA IO addresses, which are only 16 bits wide (the + * hae needs to be set to 0). + * + * HAE isn't needed for the local IO operations, though. + */ + +#define JENSEN_HAE_ADDRESS EISA_HAE +#define JENSEN_HAE_MASK 0x1ffffff + +__EXTERN_INLINE void jensen_set_hae(unsigned long addr) +{ + /* hae on the Jensen is bits 31:25 shifted right */ + addr >>= 25; + if (addr != alpha_mv.hae_cache) + set_hae(addr); +} + +#define vuip volatile unsigned int * + +/* + * IO functions + * + * The "local" functions are those that don't go out to the EISA bus, + * but instead act on the VL82C106 chip directly.. This is mainly the + * keyboard, RTC, printer and first two serial lines.. + * + * The local stuff makes for some complications, but it seems to be + * gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H + * convinced that I need one of the newer machines. + */ + +static inline unsigned int jensen_local_inb(unsigned long addr) +{ + return 0xff & *(vuip)((addr << 9) + EISA_VL82C106); +} + +static inline void jensen_local_outb(u8 b, unsigned long addr) +{ + *(vuip)((addr << 9) + EISA_VL82C106) = b; + mb(); +} + +static inline unsigned int jensen_bus_inb(unsigned long addr) +{ + long result; + + jensen_set_hae(0); + result = *(volatile int *)((addr << 7) + EISA_IO + 0x00); + return __kernel_extbl(result, addr & 3); +} + +static inline void jensen_bus_outb(u8 b, unsigned long addr) +{ + jensen_set_hae(0); + *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101; + mb(); +} + +/* + * It seems gcc is not very good at optimizing away logical + * operations that result in operations across inline functions. + * Which is why this is a macro. + */ + +#define jensen_is_local(addr) ( \ +/* keyboard */ (addr == 0x60 || addr == 0x64) || \ +/* RTC */ (addr == 0x170 || addr == 0x171) || \ +/* mb COM2 */ (addr >= 0x2f8 && addr <= 0x2ff) || \ +/* mb LPT1 */ (addr >= 0x3bc && addr <= 0x3be) || \ +/* mb COM2 */ (addr >= 0x3f8 && addr <= 0x3ff)) + +__EXTERN_INLINE u8 jensen_inb(unsigned long addr) +{ + if (jensen_is_local(addr)) + return jensen_local_inb(addr); + else + return jensen_bus_inb(addr); +} + +__EXTERN_INLINE void jensen_outb(u8 b, unsigned long addr) +{ + if (jensen_is_local(addr)) + jensen_local_outb(b, addr); + else + jensen_bus_outb(b, addr); +} + +__EXTERN_INLINE u16 jensen_inw(unsigned long addr) +{ + long result; + + jensen_set_hae(0); + result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20); + result >>= (addr & 3) * 8; + return 0xffffUL & result; +} + +__EXTERN_INLINE u32 jensen_inl(unsigned long addr) +{ + jensen_set_hae(0); + return *(vuip) ((addr << 7) + EISA_IO + 0x60); +} + +__EXTERN_INLINE void jensen_outw(u16 b, unsigned long addr) +{ + jensen_set_hae(0); + *(vuip) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001; + mb(); +} + +__EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr) +{ + jensen_set_hae(0); + *(vuip) ((addr << 7) + EISA_IO + 0x60) = b; + mb(); +} + +/* + * Memory functions. + */ + +__EXTERN_INLINE u8 jensen_readb(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + long result; + + jensen_set_hae(addr); + addr &= JENSEN_HAE_MASK; + result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00); + result >>= (addr & 3) * 8; + return 0xffUL & result; +} + +__EXTERN_INLINE u16 jensen_readw(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + long result; + + jensen_set_hae(addr); + addr &= JENSEN_HAE_MASK; + result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20); + result >>= (addr & 3) * 8; + return 0xffffUL & result; +} + +__EXTERN_INLINE u32 jensen_readl(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + jensen_set_hae(addr); + addr &= JENSEN_HAE_MASK; + return *(vuip) ((addr << 7) + EISA_MEM + 0x60); +} + +__EXTERN_INLINE u64 jensen_readq(const volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + unsigned long r0, r1; + + jensen_set_hae(addr); + addr &= JENSEN_HAE_MASK; + addr = (addr << 7) + EISA_MEM + 0x60; + r0 = *(vuip) (addr); + r1 = *(vuip) (addr + (4 << 7)); + return r1 << 32 | r0; +} + +__EXTERN_INLINE void jensen_writeb(u8 b, volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + jensen_set_hae(addr); + addr &= JENSEN_HAE_MASK; + *(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101; +} + +__EXTERN_INLINE void jensen_writew(u16 b, volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + jensen_set_hae(addr); + addr &= JENSEN_HAE_MASK; + *(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001; +} + +__EXTERN_INLINE void jensen_writel(u32 b, volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + jensen_set_hae(addr); + addr &= JENSEN_HAE_MASK; + *(vuip) ((addr << 7) + EISA_MEM + 0x60) = b; +} + +__EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr) +{ + unsigned long addr = (unsigned long) xaddr; + jensen_set_hae(addr); + addr &= JENSEN_HAE_MASK; + addr = (addr << 7) + EISA_MEM + 0x60; + *(vuip) (addr) = b; + *(vuip) (addr + (4 << 7)) = b >> 32; +} + +__EXTERN_INLINE void __iomem *jensen_ioportmap(unsigned long addr) +{ + return (void __iomem *)addr; +} + +__EXTERN_INLINE void __iomem *jensen_ioremap(unsigned long addr, + unsigned long size) +{ + return (void __iomem *)(addr + 0x100000000ul); +} + +__EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr) +{ + return (long)addr >= 0; +} + +__EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr) +{ + return (unsigned long)addr >= 0x100000000ul; +} + +/* New-style ioread interface. All the routines are so ugly for Jensen + that it doesn't make sense to merge them. */ + +#define IOPORT(OS, NS) \ +__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \ +{ \ + if (jensen_is_mmio(xaddr)) \ + return jensen_read##OS(xaddr - 0x100000000ul); \ + else \ + return jensen_in##OS((unsigned long)xaddr); \ +} \ +__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \ +{ \ + if (jensen_is_mmio(xaddr)) \ + jensen_write##OS(b, xaddr - 0x100000000ul); \ + else \ + jensen_out##OS(b, (unsigned long)xaddr); \ +} + +IOPORT(b, 8) +IOPORT(w, 16) +IOPORT(l, 32) + +#undef IOPORT + +#undef vuip + +#undef __IO_PREFIX +#define __IO_PREFIX jensen +#define jensen_trivial_rw_bw 0 +#define jensen_trivial_rw_lq 0 +#define jensen_trivial_io_bw 0 +#define jensen_trivial_io_lq 0 +#define jensen_trivial_iounmap 1 +#include + +#ifdef __IO_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __IO_EXTERN_INLINE +#endif + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_JENSEN_H */ diff --git a/arch/alpha/include/asm/kdebug.h b/arch/alpha/include/asm/kdebug.h new file mode 100644 index 00000000000..6ece1b03766 --- /dev/null +++ b/arch/alpha/include/asm/kdebug.h @@ -0,0 +1 @@ +#include diff --git a/arch/alpha/include/asm/kmap_types.h b/arch/alpha/include/asm/kmap_types.h new file mode 100644 index 00000000000..3e6735a34c5 --- /dev/null +++ b/arch/alpha/include/asm/kmap_types.h @@ -0,0 +1,32 @@ +#ifndef _ASM_KMAP_TYPES_H +#define _ASM_KMAP_TYPES_H + +/* Dummy header just to define km_type. */ + + +#ifdef CONFIG_DEBUG_HIGHMEM +# define D(n) __KM_FENCE_##n , +#else +# define D(n) +#endif + +enum km_type { +D(0) KM_BOUNCE_READ, +D(1) KM_SKB_SUNRPC_DATA, +D(2) KM_SKB_DATA_SOFTIRQ, +D(3) KM_USER0, +D(4) KM_USER1, +D(5) KM_BIO_SRC_IRQ, +D(6) KM_BIO_DST_IRQ, +D(7) KM_PTE0, +D(8) KM_PTE1, +D(9) KM_IRQ0, +D(10) KM_IRQ1, +D(11) KM_SOFTIRQ0, +D(12) KM_SOFTIRQ1, +D(13) KM_TYPE_NR +}; + +#undef D + +#endif diff --git a/arch/alpha/include/asm/linkage.h b/arch/alpha/include/asm/linkage.h new file mode 100644 index 00000000000..291c2d01c44 --- /dev/null +++ b/arch/alpha/include/asm/linkage.h @@ -0,0 +1,6 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +/* Nothing to see here... */ + +#endif diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h new file mode 100644 index 00000000000..6ad3ea69642 --- /dev/null +++ b/arch/alpha/include/asm/local.h @@ -0,0 +1,118 @@ +#ifndef _ALPHA_LOCAL_H +#define _ALPHA_LOCAL_H + +#include +#include + +typedef struct +{ + atomic_long_t a; +} local_t; + +#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } +#define local_read(l) atomic_long_read(&(l)->a) +#define local_set(l,i) atomic_long_set(&(l)->a, (i)) +#define local_inc(l) atomic_long_inc(&(l)->a) +#define local_dec(l) atomic_long_dec(&(l)->a) +#define local_add(i,l) atomic_long_add((i),(&(l)->a)) +#define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) + +static __inline__ long local_add_return(long i, local_t * l) +{ + long temp, result; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " addq %0,%3,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (l->a.counter), "=&r" (result) + :"Ir" (i), "m" (l->a.counter) : "memory"); + return result; +} + +static __inline__ long local_sub_return(long i, local_t * l) +{ + long temp, result; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " subq %0,%3,%2\n" + " subq %0,%3,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (l->a.counter), "=&r" (result) + :"Ir" (i), "m" (l->a.counter) : "memory"); + return result; +} + +#define local_cmpxchg(l, o, n) \ + (cmpxchg_local(&((l)->a.counter), (o), (n))) +#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) + +/** + * local_add_unless - add unless the number is a given value + * @l: pointer of type local_t + * @a: the amount to add to l... + * @u: ...unless l is equal to u. + * + * Atomically adds @a to @l, so long as it was not @u. + * Returns non-zero if @l was not @u, and zero otherwise. + */ +#define local_add_unless(l, a, u) \ +({ \ + long c, old; \ + c = local_read(l); \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = local_cmpxchg((l), c, c + (a)); \ + if (likely(old == c)) \ + break; \ + c = old; \ + } \ + c != (u); \ +}) +#define local_inc_not_zero(l) local_add_unless((l), 1, 0) + +#define local_add_negative(a, l) (local_add_return((a), (l)) < 0) + +#define local_dec_return(l) local_sub_return(1,(l)) + +#define local_inc_return(l) local_add_return(1,(l)) + +#define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0) + +#define local_inc_and_test(l) (local_add_return(1, (l)) == 0) + +#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) + +/* Verify if faster than atomic ops */ +#define __local_inc(l) ((l)->a.counter++) +#define __local_dec(l) ((l)->a.counter++) +#define __local_add(i,l) ((l)->a.counter+=(i)) +#define __local_sub(i,l) ((l)->a.counter-=(i)) + +/* Use these for per-cpu local_t variables: on some archs they are + * much more efficient than these naive implementations. Note they take + * a variable, not an address. + */ +#define cpu_local_read(l) local_read(&__get_cpu_var(l)) +#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) + +#define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) +#define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) +#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) +#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) + +#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) +#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) +#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) +#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) + +#endif /* _ALPHA_LOCAL_H */ diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h new file mode 100644 index 00000000000..a86c083cdf7 --- /dev/null +++ b/arch/alpha/include/asm/machvec.h @@ -0,0 +1,134 @@ +#ifndef __ALPHA_MACHVEC_H +#define __ALPHA_MACHVEC_H 1 + +#include + +/* + * This file gets pulled in by asm/io.h from user space. We don't + * want most of this escaping. + */ + +#ifdef __KERNEL__ + +/* The following structure vectors all of the I/O and IRQ manipulation + from the generic kernel to the hardware specific backend. */ + +struct task_struct; +struct mm_struct; +struct vm_area_struct; +struct linux_hose_info; +struct pci_dev; +struct pci_ops; +struct pci_controller; +struct _alpha_agp_info; + +struct alpha_machine_vector +{ + /* This "belongs" down below with the rest of the runtime + variables, but it is convenient for entry.S if these + two slots are at the beginning of the struct. */ + unsigned long hae_cache; + unsigned long *hae_register; + + int nr_irqs; + int rtc_port; + unsigned int max_asn; + unsigned long max_isa_dma_address; + unsigned long irq_probe_mask; + unsigned long iack_sc; + unsigned long min_io_address; + unsigned long min_mem_address; + unsigned long pci_dac_offset; + + void (*mv_pci_tbi)(struct pci_controller *hose, + dma_addr_t start, dma_addr_t end); + + unsigned int (*mv_ioread8)(void __iomem *); + unsigned int (*mv_ioread16)(void __iomem *); + unsigned int (*mv_ioread32)(void __iomem *); + + void (*mv_iowrite8)(u8, void __iomem *); + void (*mv_iowrite16)(u16, void __iomem *); + void (*mv_iowrite32)(u32, void __iomem *); + + u8 (*mv_readb)(const volatile void __iomem *); + u16 (*mv_readw)(const volatile void __iomem *); + u32 (*mv_readl)(const volatile void __iomem *); + u64 (*mv_readq)(const volatile void __iomem *); + + void (*mv_writeb)(u8, volatile void __iomem *); + void (*mv_writew)(u16, volatile void __iomem *); + void (*mv_writel)(u32, volatile void __iomem *); + void (*mv_writeq)(u64, volatile void __iomem *); + + void __iomem *(*mv_ioportmap)(unsigned long); + void __iomem *(*mv_ioremap)(unsigned long, unsigned long); + void (*mv_iounmap)(volatile void __iomem *); + int (*mv_is_ioaddr)(unsigned long); + int (*mv_is_mmio)(const volatile void __iomem *); + + void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *, + struct task_struct *); + void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *); + + void (*mv_flush_tlb_current)(struct mm_struct *); + void (*mv_flush_tlb_current_page)(struct mm_struct * mm, + struct vm_area_struct *vma, + unsigned long addr); + + void (*update_irq_hw)(unsigned long, unsigned long, int); + void (*ack_irq)(unsigned long); + void (*device_interrupt)(unsigned long vector); + void (*machine_check)(u64 vector, u64 la); + + void (*smp_callin)(void); + void (*init_arch)(void); + void (*init_irq)(void); + void (*init_rtc)(void); + void (*init_pci)(void); + void (*kill_arch)(int); + + u8 (*pci_swizzle)(struct pci_dev *, u8 *); + int (*pci_map_irq)(struct pci_dev *, u8, u8); + struct pci_ops *pci_ops; + + struct _alpha_agp_info *(*agp_info)(void); + + const char *vector_name; + + /* NUMA information */ + int (*pa_to_nid)(unsigned long); + int (*cpuid_to_nid)(int); + unsigned long (*node_mem_start)(int); + unsigned long (*node_mem_size)(int); + + /* System specific parameters. */ + union { + struct { + unsigned long gru_int_req_bits; + } cia; + + struct { + unsigned long gamma_bias; + } t2; + + struct { + unsigned int route_tab; + } sio; + } sys; +}; + +extern struct alpha_machine_vector alpha_mv; + +#ifdef CONFIG_ALPHA_GENERIC +extern int alpha_using_srm; +#else +#ifdef CONFIG_ALPHA_SRM +#define alpha_using_srm 1 +#else +#define alpha_using_srm 0 +#endif +#endif /* GENERIC */ + +#endif +#endif /* __ALPHA_MACHVEC_H */ diff --git a/arch/alpha/include/asm/mc146818rtc.h b/arch/alpha/include/asm/mc146818rtc.h new file mode 100644 index 00000000000..097703f1c8c --- /dev/null +++ b/arch/alpha/include/asm/mc146818rtc.h @@ -0,0 +1,27 @@ +/* + * Machine dependent access functions for RTC registers. + */ +#ifndef __ASM_ALPHA_MC146818RTC_H +#define __ASM_ALPHA_MC146818RTC_H + +#include + +#ifndef RTC_PORT +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ +#endif + +/* + * The yet supported machines all access the RTC index register via + * an ISA port access but the way to access the date register differs ... + */ +#define CMOS_READ(addr) ({ \ +outb_p((addr),RTC_PORT(0)); \ +inb_p(RTC_PORT(1)); \ +}) +#define CMOS_WRITE(val, addr) ({ \ +outb_p((addr),RTC_PORT(0)); \ +outb_p((val),RTC_PORT(1)); \ +}) + +#endif /* __ASM_ALPHA_MC146818RTC_H */ diff --git a/arch/alpha/include/asm/md.h b/arch/alpha/include/asm/md.h new file mode 100644 index 00000000000..6c9b8222a4f --- /dev/null +++ b/arch/alpha/include/asm/md.h @@ -0,0 +1,13 @@ +/* $Id: md.h,v 1.1 1997/12/15 15:11:48 jj Exp $ + * md.h: High speed xor_block operation for RAID4/5 + * + */ + +#ifndef __ASM_MD_H +#define __ASM_MD_H + +/* #define HAVE_ARCH_XORBLOCK */ + +#define MD_XORBLOCK_ALIGNMENT sizeof(long) + +#endif /* __ASM_MD_H */ diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h new file mode 100644 index 00000000000..90d7c35d286 --- /dev/null +++ b/arch/alpha/include/asm/mman.h @@ -0,0 +1,54 @@ +#ifndef __ALPHA_MMAN_H__ +#define __ALPHA_MMAN_H__ + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +#define MAP_SHARED 0x01 /* Share changes */ +#define MAP_PRIVATE 0x02 /* Changes are private */ +#define MAP_TYPE 0x0f /* Mask for type of mapping (OSF/1 is _wrong_) */ +#define MAP_FIXED 0x100 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x10 /* don't use a file */ + +/* not used by linux, but here to make sure we don't clash with OSF/1 defines */ +#define _MAP_HASSEMAPHORE 0x0200 +#define _MAP_INHERIT 0x0400 +#define _MAP_UNALIGNED 0x0800 + +/* These are linux-specific */ +#define MAP_GROWSDOWN 0x01000 /* stack-like segment */ +#define MAP_DENYWRITE 0x02000 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x04000 /* mark it as an executable */ +#define MAP_LOCKED 0x08000 /* lock the mapping */ +#define MAP_NORESERVE 0x10000 /* don't check for reservations */ +#define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x40000 /* do not block on IO */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_SYNC 2 /* synchronous memory sync */ +#define MS_INVALIDATE 4 /* invalidate the caches */ + +#define MCL_CURRENT 8192 /* lock all currently mapped pages */ +#define MCL_FUTURE 16384 /* lock all additions to address space */ + +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_SPACEAVAIL 5 /* ensure resources are available */ +#define MADV_DONTNEED 6 /* don't need these pages */ + +/* common/generic parameters */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ + +/* compatibility flags */ +#define MAP_FILE 0 + +#endif /* __ALPHA_MMAN_H__ */ diff --git a/arch/alpha/include/asm/mmu.h b/arch/alpha/include/asm/mmu.h new file mode 100644 index 00000000000..3dc12777932 --- /dev/null +++ b/arch/alpha/include/asm/mmu.h @@ -0,0 +1,7 @@ +#ifndef __ALPHA_MMU_H +#define __ALPHA_MMU_H + +/* The alpha MMU context is one "unsigned long" bitmap per CPU */ +typedef unsigned long mm_context_t[NR_CPUS]; + +#endif diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h new file mode 100644 index 00000000000..86c08a02d23 --- /dev/null +++ b/arch/alpha/include/asm/mmu_context.h @@ -0,0 +1,260 @@ +#ifndef __ALPHA_MMU_CONTEXT_H +#define __ALPHA_MMU_CONTEXT_H + +/* + * get a new mmu context.. + * + * Copyright (C) 1996, Linus Torvalds + */ + +#include +#include +#include +#include + +/* + * Force a context reload. This is needed when we change the page + * table pointer or when we update the ASN of the current process. + */ + +/* Don't get into trouble with dueling __EXTERN_INLINEs. */ +#ifndef __EXTERN_INLINE +#include +#endif + + +static inline unsigned long +__reload_thread(struct pcb_struct *pcb) +{ + register unsigned long a0 __asm__("$16"); + register unsigned long v0 __asm__("$0"); + + a0 = virt_to_phys(pcb); + __asm__ __volatile__( + "call_pal %2 #__reload_thread" + : "=r"(v0), "=r"(a0) + : "i"(PAL_swpctx), "r"(a0) + : "$1", "$22", "$23", "$24", "$25"); + + return v0; +} + + +/* + * The maximum ASN's the processor supports. On the EV4 this is 63 + * but the PAL-code doesn't actually use this information. On the + * EV5 this is 127, and EV6 has 255. + * + * On the EV4, the ASNs are more-or-less useless anyway, as they are + * only used as an icache tag, not for TB entries. On the EV5 and EV6, + * ASN's also validate the TB entries, and thus make a lot more sense. + * + * The EV4 ASN's don't even match the architecture manual, ugh. And + * I quote: "If a processor implements address space numbers (ASNs), + * and the old PTE has the Address Space Match (ASM) bit clear (ASNs + * in use) and the Valid bit set, then entries can also effectively be + * made coherent by assigning a new, unused ASN to the currently + * running process and not reusing the previous ASN before calling the + * appropriate PALcode routine to invalidate the translation buffer (TB)". + * + * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually + * work correctly and can thus not be used (explaining the lack of PAL-code + * support). + */ +#define EV4_MAX_ASN 63 +#define EV5_MAX_ASN 127 +#define EV6_MAX_ASN 255 + +#ifdef CONFIG_ALPHA_GENERIC +# define MAX_ASN (alpha_mv.max_asn) +#else +# ifdef CONFIG_ALPHA_EV4 +# define MAX_ASN EV4_MAX_ASN +# elif defined(CONFIG_ALPHA_EV5) +# define MAX_ASN EV5_MAX_ASN +# else +# define MAX_ASN EV6_MAX_ASN +# endif +#endif + +/* + * cpu_last_asn(processor): + * 63 0 + * +-------------+----------------+--------------+ + * | asn version | this processor | hardware asn | + * +-------------+----------------+--------------+ + */ + +#include +#ifdef CONFIG_SMP +#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) +#else +extern unsigned long last_asn; +#define cpu_last_asn(cpuid) last_asn +#endif /* CONFIG_SMP */ + +#define WIDTH_HARDWARE_ASN 8 +#define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN) +#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1) + +/* + * NOTE! The way this is set up, the high bits of the "asn_cache" (and + * the "mm->context") are the ASN _version_ code. A version of 0 is + * always considered invalid, so to invalidate another process you only + * need to do "p->mm->context = 0". + * + * If we need more ASN's than the processor has, we invalidate the old + * user TLB's (tbiap()) and start a new ASN version. That will automatically + * force a new asn for any other processes the next time they want to + * run. + */ + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __MMU_EXTERN_INLINE +#endif + +extern inline unsigned long +__get_new_mm_context(struct mm_struct *mm, long cpu) +{ + unsigned long asn = cpu_last_asn(cpu); + unsigned long next = asn + 1; + + if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) { + tbiap(); + imb(); + next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; + } + cpu_last_asn(cpu) = next; + return next; +} + +__EXTERN_INLINE void +ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next) +{ + /* Check if our ASN is of an older version, and thus invalid. */ + unsigned long asn; + unsigned long mmc; + long cpu = smp_processor_id(); + +#ifdef CONFIG_SMP + cpu_data[cpu].asn_lock = 1; + barrier(); +#endif + asn = cpu_last_asn(cpu); + mmc = next_mm->context[cpu]; + if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) { + mmc = __get_new_mm_context(next_mm, cpu); + next_mm->context[cpu] = mmc; + } +#ifdef CONFIG_SMP + else + cpu_data[cpu].need_new_asn = 1; +#endif + + /* Always update the PCB ASN. Another thread may have allocated + a new mm->context (via flush_tlb_mm) without the ASN serial + number wrapping. We have no way to detect when this is needed. */ + task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK; +} + +__EXTERN_INLINE void +ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next) +{ + /* As described, ASN's are broken for TLB usage. But we can + optimize for switching between threads -- if the mm is + unchanged from current we needn't flush. */ + /* ??? May not be needed because EV4 PALcode recognizes that + ASN's are broken and does a tbiap itself on swpctx, under + the "Must set ASN or flush" rule. At least this is true + for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com). + I'm going to leave this here anyway, just to Be Sure. -- r~ */ + if (prev_mm != next_mm) + tbiap(); + + /* Do continue to allocate ASNs, because we can still use them + to avoid flushing the icache. */ + ev5_switch_mm(prev_mm, next_mm, next); +} + +extern void __load_new_mm_context(struct mm_struct *); + +#ifdef CONFIG_SMP +#define check_mmu_context() \ +do { \ + int cpu = smp_processor_id(); \ + cpu_data[cpu].asn_lock = 0; \ + barrier(); \ + if (cpu_data[cpu].need_new_asn) { \ + struct mm_struct * mm = current->active_mm; \ + cpu_data[cpu].need_new_asn = 0; \ + if (!mm->context[cpu]) \ + __load_new_mm_context(mm); \ + } \ +} while(0) +#else +#define check_mmu_context() do { } while(0) +#endif + +__EXTERN_INLINE void +ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) +{ + __load_new_mm_context(next_mm); +} + +__EXTERN_INLINE void +ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) +{ + __load_new_mm_context(next_mm); + tbiap(); +} + +#define deactivate_mm(tsk,mm) do { } while (0) + +#ifdef CONFIG_ALPHA_GENERIC +# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c)) +# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y)) +#else +# ifdef CONFIG_ALPHA_EV4 +# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c)) +# define activate_mm(x,y) ev4_activate_mm((x),(y)) +# else +# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c)) +# define activate_mm(x,y) ev5_activate_mm((x),(y)) +# endif +#endif + +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + int i; + + for_each_online_cpu(i) + mm->context[i] = 0; + if (tsk != current) + task_thread_info(tsk)->pcb.ptbr + = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; + return 0; +} + +extern inline void +destroy_context(struct mm_struct *mm) +{ + /* Nothing to do. */ +} + +static inline void +enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ + task_thread_info(tsk)->pcb.ptbr + = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; +} + +#ifdef __MMU_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __MMU_EXTERN_INLINE +#endif + +#endif /* __ALPHA_MMU_CONTEXT_H */ diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h new file mode 100644 index 00000000000..8af56ce346a --- /dev/null +++ b/arch/alpha/include/asm/mmzone.h @@ -0,0 +1,115 @@ +/* + * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99 + * Adapted for the alpha wildfire architecture Jan 2001. + */ +#ifndef _ASM_MMZONE_H_ +#define _ASM_MMZONE_H_ + +#include + +struct bootmem_data_t; /* stupid forward decl. */ + +/* + * Following are macros that are specific to this numa platform. + */ + +extern pg_data_t node_data[]; + +#define alpha_pa_to_nid(pa) \ + (alpha_mv.pa_to_nid \ + ? alpha_mv.pa_to_nid(pa) \ + : (0)) +#define node_mem_start(nid) \ + (alpha_mv.node_mem_start \ + ? alpha_mv.node_mem_start(nid) \ + : (0UL)) +#define node_mem_size(nid) \ + (alpha_mv.node_mem_size \ + ? alpha_mv.node_mem_size(nid) \ + : ((nid) ? (0UL) : (~0UL))) + +#define pa_to_nid(pa) alpha_pa_to_nid(pa) +#define NODE_DATA(nid) (&node_data[(nid)]) + +#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) + +#if 1 +#define PLAT_NODE_DATA_LOCALNR(p, n) \ + (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn) +#else +static inline unsigned long +PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) +{ + unsigned long temp; + temp = p >> PAGE_SHIFT; + return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn; +} +#endif + +#ifdef CONFIG_DISCONTIGMEM + +/* + * Following are macros that each numa implementation must define. + */ + +/* + * Given a kernel address, find the home node of the underlying memory. + */ +#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) +#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) + +/* + * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory + * and returns the kaddr corresponding to first physical page in the + * node's mem_map. + */ +#define LOCAL_BASE_ADDR(kaddr) \ + ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \ + << PAGE_SHIFT)) + +/* XXX: FIXME -- wli */ +#define kern_addr_valid(kaddr) (0) + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) + +#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) + +#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) +#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32)) +#define pte_pfn(pte) (pte_val(pte) >> 32) + +#define mk_pte(page, pgprot) \ +({ \ + pte_t pte; \ + unsigned long pfn; \ + \ + pfn = page_to_pfn(page) << 32; \ + pte_val(pte) = pfn | pgprot_val(pgprot); \ + \ + pte; \ +}) + +#define pte_page(x) \ +({ \ + unsigned long kvirt; \ + struct page * __xx; \ + \ + kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \ + __xx = virt_to_page(kvirt); \ + \ + __xx; \ +}) + +#define page_to_pa(page) \ + (page_to_pfn(page) << PAGE_SHIFT) + +#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) +#define pfn_valid(pfn) \ + (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \ + node_spanned_pages(pfn_to_nid(pfn))) \ + +#define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT)) + +#endif /* CONFIG_DISCONTIGMEM */ + +#endif /* _ASM_MMZONE_H_ */ diff --git a/arch/alpha/include/asm/module.h b/arch/alpha/include/asm/module.h new file mode 100644 index 00000000000..7b63743c534 --- /dev/null +++ b/arch/alpha/include/asm/module.h @@ -0,0 +1,23 @@ +#ifndef _ALPHA_MODULE_H +#define _ALPHA_MODULE_H + +struct mod_arch_specific +{ + unsigned int gotsecindex; +}; + +#define Elf_Sym Elf64_Sym +#define Elf_Shdr Elf64_Shdr +#define Elf_Ehdr Elf64_Ehdr +#define Elf_Phdr Elf64_Phdr +#define Elf_Dyn Elf64_Dyn +#define Elf_Rel Elf64_Rel +#define Elf_Rela Elf64_Rela + +#define ARCH_SHF_SMALL SHF_ALPHA_GPREL + +#ifdef MODULE +asm(".section .got,\"aws\",@progbits; .align 3; .previous"); +#endif + +#endif /*_ALPHA_MODULE_H*/ diff --git a/arch/alpha/include/asm/msgbuf.h b/arch/alpha/include/asm/msgbuf.h new file mode 100644 index 00000000000..98496501a2b --- /dev/null +++ b/arch/alpha/include/asm/msgbuf.h @@ -0,0 +1,27 @@ +#ifndef _ALPHA_MSGBUF_H +#define _ALPHA_MSGBUF_H + +/* + * The msqid64_ds structure for alpha architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _ALPHA_MSGBUF_H */ diff --git a/arch/alpha/include/asm/mutex.h b/arch/alpha/include/asm/mutex.h new file mode 100644 index 00000000000..458c1f7fbc1 --- /dev/null +++ b/arch/alpha/include/asm/mutex.h @@ -0,0 +1,9 @@ +/* + * Pull in the generic implementation for the mutex fastpath. + * + * TODO: implement optimized primitives instead, or leave the generic + * implementation in place, or pick the atomic_xchg() based generic + * implementation. (see asm-generic/mutex-xchg.h for details) + */ + +#include diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h new file mode 100644 index 00000000000..0995f9d1341 --- /dev/null +++ b/arch/alpha/include/asm/page.h @@ -0,0 +1,98 @@ +#ifndef _ALPHA_PAGE_H +#define _ALPHA_PAGE_H + +#include +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 13 +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifndef __ASSEMBLY__ + +#define STRICT_MM_TYPECHECKS + +extern void clear_page(void *page); +#define clear_user_page(page, vaddr, pg) clear_page(page) + +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +extern void copy_page(void * _to, void * _from); +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef unsigned long pte_t; +typedef unsigned long pmd_t; +typedef unsigned long pgd_t; +typedef unsigned long pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pgd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +typedef struct page *pgtable_t; + +#ifdef USE_48_BIT_KSEG +#define PAGE_OFFSET 0xffff800000000000UL +#else +#define PAGE_OFFSET 0xfffffc0000000000UL +#endif + +#else + +#ifdef USE_48_BIT_KSEG +#define PAGE_OFFSET 0xffff800000000000 +#else +#define PAGE_OFFSET 0xfffffc0000000000 +#endif + +#endif /* !__ASSEMBLY__ */ + +#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) +#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) +#ifndef CONFIG_DISCONTIGMEM +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) + +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#endif /* CONFIG_DISCONTIGMEM */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#include +#include + +#endif /* _ALPHA_PAGE_H */ diff --git a/arch/alpha/include/asm/pal.h b/arch/alpha/include/asm/pal.h new file mode 100644 index 00000000000..9b4ba0d6f00 --- /dev/null +++ b/arch/alpha/include/asm/pal.h @@ -0,0 +1,51 @@ +#ifndef __ALPHA_PAL_H +#define __ALPHA_PAL_H + +/* + * Common PAL-code + */ +#define PAL_halt 0 +#define PAL_cflush 1 +#define PAL_draina 2 +#define PAL_bpt 128 +#define PAL_bugchk 129 +#define PAL_chmk 131 +#define PAL_callsys 131 +#define PAL_imb 134 +#define PAL_rduniq 158 +#define PAL_wruniq 159 +#define PAL_gentrap 170 +#define PAL_nphalt 190 + +/* + * VMS specific PAL-code + */ +#define PAL_swppal 10 +#define PAL_mfpr_vptb 41 + +/* + * OSF specific PAL-code + */ +#define PAL_cserve 9 +#define PAL_wripir 13 +#define PAL_rdmces 16 +#define PAL_wrmces 17 +#define PAL_wrfen 43 +#define PAL_wrvptptr 45 +#define PAL_jtopal 46 +#define PAL_swpctx 48 +#define PAL_wrval 49 +#define PAL_rdval 50 +#define PAL_tbi 51 +#define PAL_wrent 52 +#define PAL_swpipl 53 +#define PAL_rdps 54 +#define PAL_wrkgp 55 +#define PAL_wrusp 56 +#define PAL_wrperfmon 57 +#define PAL_rdusp 58 +#define PAL_whami 60 +#define PAL_retsys 61 +#define PAL_rti 63 + +#endif /* __ALPHA_PAL_H */ diff --git a/arch/alpha/include/asm/param.h b/arch/alpha/include/asm/param.h new file mode 100644 index 00000000000..e691ecfedb2 --- /dev/null +++ b/arch/alpha/include/asm/param.h @@ -0,0 +1,27 @@ +#ifndef _ASM_ALPHA_PARAM_H +#define _ASM_ALPHA_PARAM_H + +/* ??? Gross. I don't want to parameterize this, and supposedly the + hardware ignores reprogramming. We also need userland buy-in to the + change in HZ, since this is visible in the wait4 resources etc. */ + +#ifdef __KERNEL__ +#define HZ CONFIG_HZ +#define USER_HZ HZ +#else +#define HZ 1024 +#endif + +#define EXEC_PAGESIZE 8192 + +#ifndef NOGROUP +#define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#ifdef __KERNEL__ +# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ +#endif + +#endif /* _ASM_ALPHA_PARAM_H */ diff --git a/arch/alpha/include/asm/parport.h b/arch/alpha/include/asm/parport.h new file mode 100644 index 00000000000..c5ee7cbb2fc --- /dev/null +++ b/arch/alpha/include/asm/parport.h @@ -0,0 +1,18 @@ +/* + * parport.h: platform-specific PC-style parport initialisation + * + * Copyright (C) 1999, 2000 Tim Waugh + * + * This file should only be included by drivers/parport/parport_pc.c. + */ + +#ifndef _ASM_AXP_PARPORT_H +#define _ASM_AXP_PARPORT_H 1 + +static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); +static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) +{ + return parport_pc_find_isa_ports (autoirq, autodma); +} + +#endif /* !(_ASM_AXP_PARPORT_H) */ diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h new file mode 100644 index 00000000000..2a14302c17a --- /dev/null +++ b/arch/alpha/include/asm/pci.h @@ -0,0 +1,276 @@ +#ifndef __ALPHA_PCI_H +#define __ALPHA_PCI_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +/* + * The following structure is used to manage multiple PCI busses. + */ + +struct pci_dev; +struct pci_bus; +struct resource; +struct pci_iommu_arena; +struct page; + +/* A controller. Used to manage multiple PCI busses. */ + +struct pci_controller { + struct pci_controller *next; + struct pci_bus *bus; + struct resource *io_space; + struct resource *mem_space; + + /* The following are for reporting to userland. The invariant is + that if we report a BWX-capable dense memory, we do not report + a sparse memory at all, even if it exists. */ + unsigned long sparse_mem_base; + unsigned long dense_mem_base; + unsigned long sparse_io_base; + unsigned long dense_io_base; + + /* This one's for the kernel only. It's in KSEG somewhere. */ + unsigned long config_space_base; + + unsigned int index; + /* For compatibility with current (as of July 2003) pciutils + and XFree86. Eventually will be removed. */ + unsigned int need_domain_info; + + struct pci_iommu_arena *sg_pci; + struct pci_iommu_arena *sg_isa; + + void *sysdata; +}; + +/* Override the logic in pci_scan_bus for skipping already-configured + bus numbers. */ + +#define pcibios_assign_all_busses() 1 +#define pcibios_scan_all_fns(a, b) 0 + +#define PCIBIOS_MIN_IO alpha_mv.min_io_address +#define PCIBIOS_MIN_MEM alpha_mv.min_mem_address + +extern void pcibios_set_master(struct pci_dev *dev); + +extern inline void pcibios_penalize_isa_irq(int irq, int active) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +/* IOMMU controls. */ + +/* The PCI address space does not equal the physical memory address space. + The networking and block device layers use this boolean for bounce buffer + decisions. */ +#define PCI_DMA_BUS_IS_PHYS 0 + +/* Allocate and map kernel buffer using consistent mode DMA for PCI + device. Returns non-NULL cpu-view pointer to the buffer if + successful and sets *DMA_ADDRP to the pci side dma address as well, + else DMA_ADDRP is undefined. */ + +extern void *__pci_alloc_consistent(struct pci_dev *, size_t, + dma_addr_t *, gfp_t); +static inline void * +pci_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *dma) +{ + return __pci_alloc_consistent(dev, size, dma, GFP_ATOMIC); +} + +/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must + be values that were returned from pci_alloc_consistent. SIZE must + be the same as what as passed into pci_alloc_consistent. + References to the memory and mappings associated with CPU_ADDR or + DMA_ADDR past this call are illegal. */ + +extern void pci_free_consistent(struct pci_dev *, size_t, void *, dma_addr_t); + +/* Map a single buffer of the indicate size for PCI DMA in streaming mode. + The 32-bit PCI bus mastering address to use is returned. Once the device + is given the dma address, the device owns this memory until either + pci_unmap_single or pci_dma_sync_single_for_cpu is performed. */ + +extern dma_addr_t pci_map_single(struct pci_dev *, void *, size_t, int); + +/* Likewise, but for a page instead of an address. */ +extern dma_addr_t pci_map_page(struct pci_dev *, struct page *, + unsigned long, size_t, int); + +/* Test for pci_map_single or pci_map_page having generated an error. */ + +static inline int +pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) +{ + return dma_addr == 0; +} + +/* Unmap a single streaming mode DMA translation. The DMA_ADDR and + SIZE must match what was provided for in a previous pci_map_single + call. All other usages are undefined. After this call, reads by + the cpu to the buffer are guaranteed to see whatever the device + wrote there. */ + +extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int); +extern void pci_unmap_page(struct pci_dev *, dma_addr_t, size_t, int); + +/* pci_unmap_{single,page} is not a nop, thus... */ +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME; +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + __u32 LEN_NAME; +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) + +/* Map a set of buffers described by scatterlist in streaming mode for + PCI DMA. This is the scatter-gather version of the above + pci_map_single interface. Here the scatter gather list elements + are each tagged with the appropriate PCI dma address and length. + They are obtained via sg_dma_{address,length}(SG). + + NOTE: An implementation may be able to use a smaller number of DMA + address/length pairs than there are SG table elements. (for + example via virtual mapping capabilities) The routine returns the + number of addr/length pairs actually used, at most nents. + + Device ownership issues as mentioned above for pci_map_single are + the same here. */ + +extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int, int); + +/* Unmap a set of streaming mode DMA translations. Again, cpu read + rules concerning calls here are the same as for pci_unmap_single() + above. */ + +extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int, int); + +/* Make physical memory consistent for a single streaming mode DMA + translation after a transfer and device currently has ownership + of the buffer. + + If you perform a pci_map_single() but wish to interrogate the + buffer using the cpu, yet do not wish to teardown the PCI dma + mapping, you must call this function before doing so. At the next + point you give the PCI dma address back to the card, you must first + perform a pci_dma_sync_for_device, and then the device again owns + the buffer. */ + +static inline void +pci_dma_sync_single_for_cpu(struct pci_dev *dev, dma_addr_t dma_addr, + long size, int direction) +{ + /* Nothing to do. */ +} + +static inline void +pci_dma_sync_single_for_device(struct pci_dev *dev, dma_addr_t dma_addr, + size_t size, int direction) +{ + /* Nothing to do. */ +} + +/* Make physical memory consistent for a set of streaming mode DMA + translations after a transfer. The same as pci_dma_sync_single_* + but for a scatter-gather list, same rules and usage. */ + +static inline void +pci_dma_sync_sg_for_cpu(struct pci_dev *dev, struct scatterlist *sg, + int nents, int direction) +{ + /* Nothing to do. */ +} + +static inline void +pci_dma_sync_sg_for_device(struct pci_dev *dev, struct scatterlist *sg, + int nents, int direction) +{ + /* Nothing to do. */ +} + +/* Return whether the given PCI device DMA address mask can + be supported properly. For example, if your device can + only drive the low 24-bits during PCI bus mastering, then + you would pass 0x00ffffff as the mask to this function. */ + +extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); + +#ifdef CONFIG_PCI +static inline void pci_dma_burst_advice(struct pci_dev *pdev, + enum pci_dma_burst_strategy *strat, + unsigned long *strategy_parameter) +{ + unsigned long cacheline_size; + u8 byte; + + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); + if (byte == 0) + cacheline_size = 1024; + else + cacheline_size = (int) byte * 4; + + *strat = PCI_DMA_BURST_BOUNDARY; + *strategy_parameter = cacheline_size; +} +#endif + +/* TODO: integrate with include/asm-generic/pci.h ? */ +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return channel ? 15 : 14; +} + +extern void pcibios_resource_to_bus(struct pci_dev *, struct pci_bus_region *, + struct resource *); + +extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, + struct pci_bus_region *region); + +static inline struct resource * +pcibios_select_root(struct pci_dev *pdev, struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} + +#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + return hose->need_domain_info; +} + +struct pci_dev *alpha_gendev_to_pci(struct device *dev); + +#endif /* __KERNEL__ */ + +/* Values for the `which' argument to sys_pciconfig_iobase. */ +#define IOBASE_HOSE 0 +#define IOBASE_SPARSE_MEM 1 +#define IOBASE_DENSE_MEM 2 +#define IOBASE_SPARSE_IO 3 +#define IOBASE_DENSE_IO 4 +#define IOBASE_ROOT_BUS 5 +#define IOBASE_FROM_HOSE 0x10000 + +extern struct pci_dev *isa_bridge; + +#endif /* __ALPHA_PCI_H */ diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h new file mode 100644 index 00000000000..3495e8e00d7 --- /dev/null +++ b/arch/alpha/include/asm/percpu.h @@ -0,0 +1,78 @@ +#ifndef __ALPHA_PERCPU_H +#define __ALPHA_PERCPU_H +#include +#include + +/* + * Determine the real variable name from the name visible in the + * kernel sources. + */ +#define per_cpu_var(var) per_cpu__##var + +#ifdef CONFIG_SMP + +/* + * per_cpu_offset() is the offset that has to be added to a + * percpu variable to get to the instance for a certain processor. + */ +extern unsigned long __per_cpu_offset[NR_CPUS]; + +#define per_cpu_offset(x) (__per_cpu_offset[x]) + +#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) +#ifdef CONFIG_DEBUG_PREEMPT +#define my_cpu_offset per_cpu_offset(smp_processor_id()) +#else +#define my_cpu_offset __my_cpu_offset +#endif + +#ifndef MODULE +#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) +#define PER_CPU_ATTRIBUTES +#else +/* + * To calculate addresses of locally defined variables, GCC uses 32-bit + * displacement from the GP. Which doesn't work for per cpu variables in + * modules, as an offset to the kernel per cpu area is way above 4G. + * + * This forces allocation of a GOT entry for per cpu variable using + * ldq instruction with a 'literal' relocation. + */ +#define SHIFT_PERCPU_PTR(var, offset) ({ \ + extern int simple_identifier_##var(void); \ + unsigned long __ptr, tmp_gp; \ + asm ( "br %1, 1f \n\ + 1: ldgp %1, 0(%1) \n\ + ldq %0, per_cpu__" #var"(%1)\t!literal" \ + : "=&r"(__ptr), "=&r"(tmp_gp)); \ + (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) + +#define PER_CPU_ATTRIBUTES __used + +#endif /* MODULE */ + +/* + * A percpu variable may point to a discarded regions. The following are + * established ways to produce a usable pointer from the percpu variable + * offset. + */ +#define per_cpu(var, cpu) \ + (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu))) +#define __get_cpu_var(var) \ + (*SHIFT_PERCPU_PTR(var, my_cpu_offset)) +#define __raw_get_cpu_var(var) \ + (*SHIFT_PERCPU_PTR(var, __my_cpu_offset)) + +#else /* ! SMP */ + +#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) +#define __get_cpu_var(var) per_cpu_var(var) +#define __raw_get_cpu_var(var) per_cpu_var(var) + +#define PER_CPU_ATTRIBUTES + +#endif /* SMP */ + +#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name) + +#endif /* __ALPHA_PERCPU_H */ diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h new file mode 100644 index 00000000000..fd090155dcc --- /dev/null +++ b/arch/alpha/include/asm/pgalloc.h @@ -0,0 +1,83 @@ +#ifndef _ALPHA_PGALLOC_H +#define _ALPHA_PGALLOC_H + +#include +#include + +/* + * Allocate and free page tables. The xxx_kernel() versions are + * used to allocate a kernel page table - this turns on ASN bits + * if any. + */ + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) +{ + pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET)); +} +#define pmd_pgtable(pmd) pmd_page(pmd) + +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) +{ + pmd_set(pmd, pte); +} + +static inline void +pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) +{ + pgd_set(pgd, pmd); +} + +extern pgd_t *pgd_alloc(struct mm_struct *mm); + +static inline void +pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_page((unsigned long)pgd); +} + +static inline pmd_t * +pmd_alloc_one(struct mm_struct *mm, unsigned long address) +{ + pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + return ret; +} + +static inline void +pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + free_page((unsigned long)pmd); +} + +extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); + +static inline void +pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static inline pgtable_t +pte_alloc_one(struct mm_struct *mm, unsigned long address) +{ + pte_t *pte = pte_alloc_one_kernel(mm, address); + struct page *page; + + if (!pte) + return NULL; + page = virt_to_page(pte); + pgtable_page_ctor(page); + return page; +} + +static inline void +pte_free(struct mm_struct *mm, pgtable_t page) +{ + pgtable_page_dtor(page); + __free_page(page); +} + +#define check_pgt_cache() do { } while (0) + +#endif /* _ALPHA_PGALLOC_H */ diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h new file mode 100644 index 00000000000..3f0c59f6d8a --- /dev/null +++ b/arch/alpha/include/asm/pgtable.h @@ -0,0 +1,380 @@ +#ifndef _ALPHA_PGTABLE_H +#define _ALPHA_PGTABLE_H + +#include + +/* + * This file contains the functions and defines necessary to modify and use + * the Alpha page table tree. + * + * This hopefully works with any standard Alpha page-size, as defined + * in (currently 8192). + */ +#include + +#include +#include /* For TASK_SIZE */ +#include + +struct mm_struct; +struct vm_area_struct; + +/* Certain architectures need to do special things when PTEs + * within a page table are directly modified. Thus, the following + * hook is made available. + */ +#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +/* PMD_SHIFT determines the size of the area a second-level page table can map */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * Entries per page directory level: the Alpha is three-level, with + * all levels having a one-page page table. + */ +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) +#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) +#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0 + +/* Number of pointers that fit on a page: this will go away. */ +#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3)) + +#ifdef CONFIG_ALPHA_LARGE_VMALLOC +#define VMALLOC_START 0xfffffe0000000000 +#else +#define VMALLOC_START (-2*PGDIR_SIZE) +#endif +#define VMALLOC_END (-PGDIR_SIZE) + +/* + * OSF/1 PAL-code-imposed page table bits + */ +#define _PAGE_VALID 0x0001 +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_ASM 0x0010 +#define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */ +#define _PAGE_URE 0x0200 /* xxx */ +#define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */ +#define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */ + +/* .. and these are ours ... */ +#define _PAGE_DIRTY 0x20000 +#define _PAGE_ACCESSED 0x40000 +#define _PAGE_FILE 0x80000 /* set:pagecache, unset:swap */ + +/* + * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly + * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it. + * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use + * the KRE/URE bits to watch for it. That way we don't need to overload the + * KWE/UWE bits with both handling dirty and accessed. + * + * Note that the kernel uses the accessed bit just to check whether to page + * out a page or not, so it doesn't have to be exact anyway. + */ + +#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE) +#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) + +#define _PFN_MASK 0xFFFFFFFF00000000UL + +#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) +#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS) + +/* + * All the normal masks have the "page accessed" bits on, as any time they are used, + * the page is accessed. They are cleared only by the page-out routines + */ +#define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE) +#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) +#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) + +#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) + +#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW)) +#define _PAGE_S(x) _PAGE_NORMAL(x) + +/* + * The hardware can handle write-only mappings, but as the Alpha + * architecture does byte-wide writes with a read-modify-write + * sequence, it's not practical to have write-without-read privs. + * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in + * arch/alpha/mm/fault.c) + */ + /* xwr */ +#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) +#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW) +#define __P010 _PAGE_P(_PAGE_FOE) +#define __P011 _PAGE_P(_PAGE_FOE) +#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR) +#define __P101 _PAGE_P(_PAGE_FOW) +#define __P110 _PAGE_P(0) +#define __P111 _PAGE_P(0) + +#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) +#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW) +#define __S010 _PAGE_S(_PAGE_FOE) +#define __S011 _PAGE_S(_PAGE_FOE) +#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR) +#define __S101 _PAGE_S(_PAGE_FOW) +#define __S110 _PAGE_S(0) +#define __S111 _PAGE_S(0) + +/* + * pgprot_noncached() is only for infiniband pci support, and a real + * implementation for RAM would be more complicated. + */ +#define pgprot_noncached(prot) (prot) + +/* + * BAD_PAGETABLE is used when we need a bogus page-table, while + * BAD_PAGE is used for a bogus page. + * + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern pte_t __bad_page(void); +extern pmd_t * __bad_pagetable(void); + +extern unsigned long __zero_page(void); + +#define BAD_PAGETABLE __bad_pagetable() +#define BAD_PAGE __bad_page() +#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE)) + +/* number of bits that fit into a memory pointer */ +#define BITS_PER_PTR (8*sizeof(unsigned long)) + +/* to align the pointer to a pointer address */ +#define PTR_MASK (~(sizeof(void*)-1)) + +/* sizeof(void*)==1<>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) + +/* + * On certain platforms whose physical address space can overlap KSEG, + * namely EV6 and above, we must re-twiddle the physaddr to restore the + * correct high-order bits. + * + * This is extremely confusing until you realize that this is actually + * just working around a userspace bug. The X server was intending to + * provide the physical address but instead provided the KSEG address. + * Or tried to, except it's not representable. + * + * On Tsunami there's nothing meaningful at 0x40000000000, so this is + * a safe thing to do. Come the first core logic that does put something + * in this area -- memory or whathaveyou -- then this hack will have + * to go away. So be prepared! + */ + +#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG) +#error "EV6-only feature in a generic kernel" +#endif +#if defined(CONFIG_ALPHA_GENERIC) || \ + (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG)) +#define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT) +#define PHYS_TWIDDLE(pfn) \ + ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \ + ? ((pfn) ^= KSEG_PFN) : (pfn)) +#else +#define PHYS_TWIDDLE(pfn) (pfn) +#endif + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#ifndef CONFIG_DISCONTIGMEM +#define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT) + +#define pte_pfn(pte) (pte_val(pte) >> 32) +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) +#define mk_pte(page, pgprot) \ +({ \ + pte_t pte; \ + \ + pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \ + pte; \ +}) +#endif + +extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot) +{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; } + +extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } + +extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) +{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } + +extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) +{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } + + +extern inline unsigned long +pmd_page_vaddr(pmd_t pmd) +{ + return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET; +} + +#ifndef CONFIG_DISCONTIGMEM +#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32)) +#define pgd_page(pgd) (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32)) +#endif + +extern inline unsigned long pgd_page_vaddr(pgd_t pgd) +{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); } + +extern inline int pte_none(pte_t pte) { return !pte_val(pte); } +extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } +extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_val(*ptep) = 0; +} + +extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } +extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; } +extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; } +extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; } + +extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); } +extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; } +extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; } +extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; } + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); } +extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +extern inline int pte_special(pte_t pte) { return 0; } + +extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; } +extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; } +extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; } +extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; } +extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; } +extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } +extern inline pte_t pte_mkspecial(pte_t pte) { return pte; } + +#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) + +/* to find an entry in a page-table-directory. */ +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) + +/* + * The smp_read_barrier_depends() in the following functions are required to + * order the load of *dir (the pointer in the top level page table) with any + * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir). + * + * If this ordering is not enforced, the CPU might load an older value of + * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for + * more details. + * + * Note that we never change the mm->pgd pointer after the task is running, so + * pgd_offset does not require such a barrier. + */ + +/* Find an entry in the second-level page table.. */ +extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) +{ + pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); + smp_read_barrier_depends(); /* see above */ + return ret; +} + +/* Find an entry in the third-level page table.. */ +extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address) +{ + pte_t *ret = (pte_t *) pmd_page_vaddr(*dir) + + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1)); + smp_read_barrier_depends(); /* see above */ + return ret; +} + +#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) +#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) + +extern pgd_t swapper_pg_dir[1024]; + +/* + * The Alpha doesn't have any external MMU info: the kernel page + * tables contain all the necessary information. + */ +extern inline void update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte) +{ +} + +/* + * Non-present pages: high 24 bits are offset, next 8 bits type, + * low 32 bits zero. + */ +extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) +{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } + +#define __swp_type(x) (((x).val >> 32) & 0xff) +#define __swp_offset(x) ((x).val >> 40) +#define __swp_entry(type, off) ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define pte_to_pgoff(pte) (pte_val(pte) >> 32) +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE }) + +#define PTE_FILE_MAX_BITS 32 + +#ifndef CONFIG_DISCONTIGMEM +#define kern_addr_valid(addr) (1) +#endif + +#define io_remap_pfn_range(vma, start, pfn, size, prot) \ + remap_pfn_range(vma, start, pfn, size, prot) + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) + +extern void paging_init(void); + +#include + +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) + +/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ +#define HAVE_ARCH_UNMAPPED_AREA + +#endif /* _ALPHA_PGTABLE_H */ diff --git a/arch/alpha/include/asm/poll.h b/arch/alpha/include/asm/poll.h new file mode 100644 index 00000000000..c98509d3149 --- /dev/null +++ b/arch/alpha/include/asm/poll.h @@ -0,0 +1 @@ +#include diff --git a/arch/alpha/include/asm/posix_types.h b/arch/alpha/include/asm/posix_types.h new file mode 100644 index 00000000000..db167413300 --- /dev/null +++ b/arch/alpha/include/asm/posix_types.h @@ -0,0 +1,123 @@ +#ifndef _ALPHA_POSIX_TYPES_H +#define _ALPHA_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned int __kernel_ino_t; +typedef unsigned int __kernel_mode_t; +typedef unsigned int __kernel_nlink_t; +typedef long __kernel_off_t; +typedef long long __kernel_loff_t; +typedef int __kernel_pid_t; +typedef int __kernel_ipc_pid_t; +typedef unsigned int __kernel_uid_t; +typedef unsigned int __kernel_gid_t; +typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; +typedef long __kernel_ptrdiff_t; +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_daddr_t; +typedef char * __kernel_caddr_t; +typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef int __kernel_clockid_t; +typedef int __kernel_timer_t; + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +typedef __kernel_uid_t __kernel_old_uid_t; +typedef __kernel_gid_t __kernel_old_gid_t; +typedef __kernel_uid_t __kernel_uid32_t; +typedef __kernel_gid_t __kernel_gid32_t; + +typedef unsigned int __kernel_old_dev_t; + +#ifdef __KERNEL__ + +#ifndef __GNUC__ + +#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d)) +#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d)) +#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0) +#define __FD_ZERO(set) \ + ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set))) + +#else /* __GNUC__ */ + +/* With GNU C, use inline functions instead so args are evaluated only once: */ + +#undef __FD_SET +static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) +{ + unsigned long _tmp = fd / __NFDBITS; + unsigned long _rem = fd % __NFDBITS; + fdsetp->fds_bits[_tmp] |= (1UL<<_rem); +} + +#undef __FD_CLR +static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) +{ + unsigned long _tmp = fd / __NFDBITS; + unsigned long _rem = fd % __NFDBITS; + fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem); +} + +#undef __FD_ISSET +static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p) +{ + unsigned long _tmp = fd / __NFDBITS; + unsigned long _rem = fd % __NFDBITS; + return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0; +} + +/* + * This will unroll the loop for the normal constant case (8 ints, + * for a 256-bit fd_set) + */ +#undef __FD_ZERO +static __inline__ void __FD_ZERO(__kernel_fd_set *p) +{ + unsigned long *tmp = p->fds_bits; + int i; + + if (__builtin_constant_p(__FDSET_LONGS)) { + switch (__FDSET_LONGS) { + case 16: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; + tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; + return; + + case 8: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + return; + + case 4: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + return; + } + } + i = __FDSET_LONGS; + while (i) { + i--; + *tmp = 0; + tmp++; + } +} + +#endif /* __GNUC__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ALPHA_POSIX_TYPES_H */ diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h new file mode 100644 index 00000000000..94afe585930 --- /dev/null +++ b/arch/alpha/include/asm/processor.h @@ -0,0 +1,93 @@ +/* + * include/asm-alpha/processor.h + * + * Copyright (C) 1994 Linus Torvalds + */ + +#ifndef __ASM_ALPHA_PROCESSOR_H +#define __ASM_ALPHA_PROCESSOR_H + +#include /* for ADDR_LIMIT_32BIT */ + +/* + * Returns current instruction pointer ("program counter"). + */ +#define current_text_addr() \ + ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; }) + +/* + * We have a 42-bit user address space: 4TB user VM... + */ +#define TASK_SIZE (0x40000000000UL) + +#define STACK_TOP \ + (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) + +#define STACK_TOP_MAX 0x00120000000UL + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE \ + ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2) + +typedef struct { + unsigned long seg; +} mm_segment_t; + +/* This is dead. Everything has been moved to thread_info. */ +struct thread_struct { }; +#define INIT_THREAD { } + +/* Return saved PC of a blocked thread. */ +struct task_struct; +extern unsigned long thread_saved_pc(struct task_struct *); + +/* Do necessary setup to start up a newly executed thread. */ +extern void start_thread(struct pt_regs *, unsigned long, unsigned long); + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *); + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +/* Create a kernel thread without removing it from tasklists. */ +extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); + +unsigned long get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) + +#define KSTK_ESP(tsk) \ + ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp) + +#define cpu_relax() barrier() + +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +#ifndef CONFIG_SMP +/* Nothing to prefetch. */ +#define spin_lock_prefetch(lock) do { } while (0) +#endif + +extern inline void prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 0, 3); +} + +extern inline void prefetchw(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} + +#ifdef CONFIG_SMP +extern inline void spin_lock_prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} +#endif + +#endif /* __ASM_ALPHA_PROCESSOR_H */ diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h new file mode 100644 index 00000000000..32c7a5cddd5 --- /dev/null +++ b/arch/alpha/include/asm/ptrace.h @@ -0,0 +1,83 @@ +#ifndef _ASMAXP_PTRACE_H +#define _ASMAXP_PTRACE_H + + +/* + * This struct defines the way the registers are stored on the + * kernel stack during a system call or other kernel entry + * + * NOTE! I want to minimize the overhead of system calls, so this + * struct has as little information as possible. I does not have + * + * - floating point regs: the kernel doesn't change those + * - r9-15: saved by the C compiler + * + * This makes "fork()" and "exec()" a bit more complex, but should + * give us low system call latency. + */ + +struct pt_regs { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + unsigned long r23; + unsigned long r24; + unsigned long r25; + unsigned long r26; + unsigned long r27; + unsigned long r28; + unsigned long hae; +/* JRP - These are the values provided to a0-a2 by PALcode */ + unsigned long trap_a0; + unsigned long trap_a1; + unsigned long trap_a2; +/* These are saved by PAL-code: */ + unsigned long ps; + unsigned long pc; + unsigned long gp; + unsigned long r16; + unsigned long r17; + unsigned long r18; +}; + +/* + * This is the extended stack used by signal handlers and the context + * switcher: it's pushed after the normal "struct pt_regs". + */ +struct switch_stack { + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long r26; + unsigned long fp[32]; /* fp[31] is fpcr */ +}; + +#ifdef __KERNEL__ + +#define user_mode(regs) (((regs)->ps & 8) != 0) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +extern void show_regs(struct pt_regs *); + +#define task_pt_regs(task) \ + ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) + +#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0) + +#endif + +#endif diff --git a/arch/alpha/include/asm/reg.h b/arch/alpha/include/asm/reg.h new file mode 100644 index 00000000000..86ff916fb06 --- /dev/null +++ b/arch/alpha/include/asm/reg.h @@ -0,0 +1,52 @@ +#ifndef __reg_h__ +#define __reg_h__ + +/* + * Exception frame offsets. + */ +#define EF_V0 0 +#define EF_T0 1 +#define EF_T1 2 +#define EF_T2 3 +#define EF_T3 4 +#define EF_T4 5 +#define EF_T5 6 +#define EF_T6 7 +#define EF_T7 8 +#define EF_S0 9 +#define EF_S1 10 +#define EF_S2 11 +#define EF_S3 12 +#define EF_S4 13 +#define EF_S5 14 +#define EF_S6 15 +#define EF_A3 16 +#define EF_A4 17 +#define EF_A5 18 +#define EF_T8 19 +#define EF_T9 20 +#define EF_T10 21 +#define EF_T11 22 +#define EF_RA 23 +#define EF_T12 24 +#define EF_AT 25 +#define EF_SP 26 +#define EF_PS 27 +#define EF_PC 28 +#define EF_GP 29 +#define EF_A0 30 +#define EF_A1 31 +#define EF_A2 32 + +#define EF_SIZE (33*8) +#define HWEF_SIZE (6*8) /* size of PAL frame (PS-A2) */ + +#define EF_SSIZE (EF_SIZE - HWEF_SIZE) + +/* + * Map register number into core file offset. + */ +#define CORE_REG(reg, ubase) \ + (((unsigned long *)((unsigned long)(ubase)))[reg]) + +#endif /* __reg_h__ */ diff --git a/arch/alpha/include/asm/regdef.h b/arch/alpha/include/asm/regdef.h new file mode 100644 index 00000000000..142df9c4f8b --- /dev/null +++ b/arch/alpha/include/asm/regdef.h @@ -0,0 +1,44 @@ +#ifndef __alpha_regdef_h__ +#define __alpha_regdef_h__ + +#define v0 $0 /* function return value */ + +#define t0 $1 /* temporary registers (caller-saved) */ +#define t1 $2 +#define t2 $3 +#define t3 $4 +#define t4 $5 +#define t5 $6 +#define t6 $7 +#define t7 $8 + +#define s0 $9 /* saved-registers (callee-saved registers) */ +#define s1 $10 +#define s2 $11 +#define s3 $12 +#define s4 $13 +#define s5 $14 +#define s6 $15 +#define fp s6 /* frame-pointer (s6 in frame-less procedures) */ + +#define a0 $16 /* argument registers (caller-saved) */ +#define a1 $17 +#define a2 $18 +#define a3 $19 +#define a4 $20 +#define a5 $21 + +#define t8 $22 /* more temps (caller-saved) */ +#define t9 $23 +#define t10 $24 +#define t11 $25 +#define ra $26 /* return address register */ +#define t12 $27 + +#define pv t12 /* procedure-variable register */ +#define AT $at /* assembler temporary */ +#define gp $29 /* global pointer */ +#define sp $30 /* stack pointer */ +#define zero $31 /* reads as zero, writes are noops */ + +#endif /* __alpha_regdef_h__ */ diff --git a/arch/alpha/include/asm/resource.h b/arch/alpha/include/asm/resource.h new file mode 100644 index 00000000000..c10874ff597 --- /dev/null +++ b/arch/alpha/include/asm/resource.h @@ -0,0 +1,22 @@ +#ifndef _ALPHA_RESOURCE_H +#define _ALPHA_RESOURCE_H + +/* + * Alpha/Linux-specific ordering of these four resource limit IDs, + * the rest comes from the generic header: + */ +#define RLIMIT_NOFILE 6 /* max number of open files */ +#define RLIMIT_AS 7 /* address space limit */ +#define RLIMIT_NPROC 8 /* max number of processes */ +#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ + +/* + * SuS says limits have to be unsigned. Fine, it's unsigned, but + * we retain the old value for compatibility, especially with DU. + * When you run into the 2^63 barrier, you call me. + */ +#define RLIM_INFINITY 0x7ffffffffffffffful + +#include + +#endif /* _ALPHA_RESOURCE_H */ diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h new file mode 100644 index 00000000000..4e854b1333e --- /dev/null +++ b/arch/alpha/include/asm/rtc.h @@ -0,0 +1,10 @@ +#ifndef _ALPHA_RTC_H +#define _ALPHA_RTC_H + +/* + * Alpha uses the default access methods for the RTC. + */ + +#include + +#endif diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h new file mode 100644 index 00000000000..1570c0b5433 --- /dev/null +++ b/arch/alpha/include/asm/rwsem.h @@ -0,0 +1,259 @@ +#ifndef _ALPHA_RWSEM_H +#define _ALPHA_RWSEM_H + +/* + * Written by Ivan Kokshaysky , 2001. + * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h + */ + +#ifndef _LINUX_RWSEM_H +#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ + +#include +#include +#include + +struct rwsem_waiter; + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +/* + * the semaphore definition + */ +struct rw_semaphore { + long count; +#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L +#define RWSEM_ACTIVE_BIAS 0x0000000000000001L +#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL +#define RWSEM_WAITING_BIAS (-0x0000000100000000L) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + spinlock_t wait_lock; + struct list_head wait_list; +}; + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +} + +static inline void __down_read(struct rw_semaphore *sem) +{ + long oldcount; +#ifndef CONFIG_SMP + oldcount = sem->count; + sem->count += RWSEM_ACTIVE_READ_BIAS; +#else + long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + " mb\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) + :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(oldcount < 0)) + rwsem_down_read_failed(sem); +} + +/* + * trylock for reading -- returns 1 if successful, 0 if contention + */ +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + long old, new, res; + + res = sem->count; + do { + new = res + RWSEM_ACTIVE_READ_BIAS; + if (new <= 0) + break; + old = res; + res = cmpxchg(&sem->count, old, new); + } while (res != old); + return res >= 0 ? 1 : 0; +} + +static inline void __down_write(struct rw_semaphore *sem) +{ + long oldcount; +#ifndef CONFIG_SMP + oldcount = sem->count; + sem->count += RWSEM_ACTIVE_WRITE_BIAS; +#else + long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + " mb\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) + :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(oldcount)) + rwsem_down_write_failed(sem); +} + +/* + * trylock for writing -- returns 1 if successful, 0 if contention + */ +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + if (ret == RWSEM_UNLOCKED_VALUE) + return 1; + return 0; +} + +static inline void __up_read(struct rw_semaphore *sem) +{ + long oldcount; +#ifndef CONFIG_SMP + oldcount = sem->count; + sem->count -= RWSEM_ACTIVE_READ_BIAS; +#else + long temp; + __asm__ __volatile__( + " mb\n" + "1: ldq_l %0,%1\n" + " subq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) + :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(oldcount < 0)) + if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0) + rwsem_wake(sem); +} + +static inline void __up_write(struct rw_semaphore *sem) +{ + long count; +#ifndef CONFIG_SMP + sem->count -= RWSEM_ACTIVE_WRITE_BIAS; + count = sem->count; +#else + long temp; + __asm__ __volatile__( + " mb\n" + "1: ldq_l %0,%1\n" + " subq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + " subq %0,%3,%0\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (count), "=m" (sem->count), "=&r" (temp) + :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(count)) + if ((int)count == 0) + rwsem_wake(sem); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long oldcount; +#ifndef CONFIG_SMP + oldcount = sem->count; + sem->count -= RWSEM_WAITING_BIAS; +#else + long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + " mb\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) + :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(oldcount < 0)) + rwsem_downgrade_wake(sem); +} + +static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem) +{ +#ifndef CONFIG_SMP + sem->count += val; +#else + long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%2,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (sem->count) + :"Ir" (val), "m" (sem->count)); +#endif +} + +static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem) +{ +#ifndef CONFIG_SMP + sem->count += val; + return sem->count; +#else + long ret, temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " addq %0,%3,%0\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (ret), "=m" (sem->count), "=&r" (temp) + :"Ir" (val), "m" (sem->count)); + + return ret; +#endif +} + +static inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return (sem->count != 0); +} + +#endif /* __KERNEL__ */ +#endif /* _ALPHA_RWSEM_H */ diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h new file mode 100644 index 00000000000..440747ca634 --- /dev/null +++ b/arch/alpha/include/asm/scatterlist.h @@ -0,0 +1,25 @@ +#ifndef _ALPHA_SCATTERLIST_H +#define _ALPHA_SCATTERLIST_H + +#include +#include + +struct scatterlist { +#ifdef CONFIG_DEBUG_SG + unsigned long sg_magic; +#endif + unsigned long page_link; + unsigned int offset; + + unsigned int length; + + dma_addr_t dma_address; + __u32 dma_length; +}; + +#define sg_dma_address(sg) ((sg)->dma_address) +#define sg_dma_len(sg) ((sg)->dma_length) + +#define ISA_DMA_THRESHOLD (~0UL) + +#endif /* !(_ALPHA_SCATTERLIST_H) */ diff --git a/arch/alpha/include/asm/sections.h b/arch/alpha/include/asm/sections.h new file mode 100644 index 00000000000..43b40edd6e4 --- /dev/null +++ b/arch/alpha/include/asm/sections.h @@ -0,0 +1,7 @@ +#ifndef _ALPHA_SECTIONS_H +#define _ALPHA_SECTIONS_H + +/* nothing to see, move along */ +#include + +#endif diff --git a/arch/alpha/include/asm/segment.h b/arch/alpha/include/asm/segment.h new file mode 100644 index 00000000000..0453d97daae --- /dev/null +++ b/arch/alpha/include/asm/segment.h @@ -0,0 +1,6 @@ +#ifndef __ALPHA_SEGMENT_H +#define __ALPHA_SEGMENT_H + +/* Only here because we have some old header files that expect it.. */ + +#endif diff --git a/arch/alpha/include/asm/sembuf.h b/arch/alpha/include/asm/sembuf.h new file mode 100644 index 00000000000..7b38b153478 --- /dev/null +++ b/arch/alpha/include/asm/sembuf.h @@ -0,0 +1,22 @@ +#ifndef _ALPHA_SEMBUF_H +#define _ALPHA_SEMBUF_H + +/* + * The semid64_ds structure for alpha architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + __kernel_time_t sem_ctime; /* last change time */ + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _ALPHA_SEMBUF_H */ diff --git a/arch/alpha/include/asm/serial.h b/arch/alpha/include/asm/serial.h new file mode 100644 index 00000000000..9d263e8d8cc --- /dev/null +++ b/arch/alpha/include/asm/serial.h @@ -0,0 +1,29 @@ +/* + * include/asm-alpha/serial.h + */ + + +/* + * This assumes you have a 1.8432 MHz clock for your UART. + * + * It'd be nice if someone built a serial card with a 24.576 MHz + * clock, since the 16550A is capable of handling a top speed of 1.5 + * megabits/second; but this requires the faster clock. + */ +#define BASE_BAUD ( 1843200 / 16 ) + +/* Standard COM flags (except for COM4, because of the 8514 problem) */ +#ifdef CONFIG_SERIAL_DETECT_IRQ +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) +#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) +#else +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) +#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF +#endif + +#define SERIAL_PORT_DFNS \ + /* UART CLK PORT IRQ FLAGS */ \ + { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ + { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ + { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ + { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/asm/setup.h new file mode 100644 index 00000000000..2e023a4aa31 --- /dev/null +++ b/arch/alpha/include/asm/setup.h @@ -0,0 +1,6 @@ +#ifndef __ALPHA_SETUP_H +#define __ALPHA_SETUP_H + +#define COMMAND_LINE_SIZE 256 + +#endif diff --git a/arch/alpha/include/asm/sfp-machine.h b/arch/alpha/include/asm/sfp-machine.h new file mode 100644 index 00000000000..5fe63afbd47 --- /dev/null +++ b/arch/alpha/include/asm/sfp-machine.h @@ -0,0 +1,82 @@ +/* Machine-dependent software floating-point definitions. + Alpha kernel version. + Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Richard Henderson (rth@cygnus.com), + Jakub Jelinek (jakub@redhat.com) and + David S. Miller (davem@redhat.com). + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public License as + published by the Free Software Foundation; either version 2 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with the GNU C Library; see the file COPYING.LIB. If + not, write to the Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ + +#ifndef _SFP_MACHINE_H +#define _SFP_MACHINE_H + +#define _FP_W_TYPE_SIZE 64 +#define _FP_W_TYPE unsigned long +#define _FP_WS_TYPE signed long +#define _FP_I_TYPE long + +#define _FP_MUL_MEAT_S(R,X,Y) \ + _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y) +#define _FP_MUL_MEAT_D(R,X,Y) \ + _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) +#define _FP_MUL_MEAT_Q(R,X,Y) \ + _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) + +#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) +#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y) +#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y) + +#define _FP_NANFRAC_S _FP_QNANBIT_S +#define _FP_NANFRAC_D _FP_QNANBIT_D +#define _FP_NANFRAC_Q _FP_QNANBIT_Q +#define _FP_NANSIGN_S 1 +#define _FP_NANSIGN_D 1 +#define _FP_NANSIGN_Q 1 + +#define _FP_KEEPNANFRACP 1 + +/* Alpha Architecture Handbook, 4.7.10.4 sais that + * we should prefer any type of NaN in Fb, then Fa. + */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ + do { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R,X); \ + R##_c = FP_CLS_NAN; \ + } while (0) + +/* Obtain the current rounding mode. */ +#define FP_ROUNDMODE mode +#define FP_RND_NEAREST (FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT) +#define FP_RND_ZERO (FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT) +#define FP_RND_PINF (FPCR_DYN_PLUS >> FPCR_DYN_SHIFT) +#define FP_RND_MINF (FPCR_DYN_MINUS >> FPCR_DYN_SHIFT) + +/* Exception flags. */ +#define FP_EX_INVALID IEEE_TRAP_ENABLE_INV +#define FP_EX_OVERFLOW IEEE_TRAP_ENABLE_OVF +#define FP_EX_UNDERFLOW IEEE_TRAP_ENABLE_UNF +#define FP_EX_DIVZERO IEEE_TRAP_ENABLE_DZE +#define FP_EX_INEXACT IEEE_TRAP_ENABLE_INE +#define FP_EX_DENORM IEEE_TRAP_ENABLE_DNO + +#define FP_DENORM_ZERO (swcr & IEEE_MAP_DMZ) + +/* We write the results always */ +#define FP_INHIBIT_RESULTS 0 + +#endif diff --git a/arch/alpha/include/asm/shmbuf.h b/arch/alpha/include/asm/shmbuf.h new file mode 100644 index 00000000000..37ee84f0508 --- /dev/null +++ b/arch/alpha/include/asm/shmbuf.h @@ -0,0 +1,38 @@ +#ifndef _ALPHA_SHMBUF_H +#define _ALPHA_SHMBUF_H + +/* + * The shmid64_ds structure for alpha architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused1; + unsigned long __unused2; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _ALPHA_SHMBUF_H */ diff --git a/arch/alpha/include/asm/shmparam.h b/arch/alpha/include/asm/shmparam.h new file mode 100644 index 00000000000..cc901d58aeb --- /dev/null +++ b/arch/alpha/include/asm/shmparam.h @@ -0,0 +1,6 @@ +#ifndef _ASMAXP_SHMPARAM_H +#define _ASMAXP_SHMPARAM_H + +#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ + +#endif /* _ASMAXP_SHMPARAM_H */ diff --git a/arch/alpha/include/asm/sigcontext.h b/arch/alpha/include/asm/sigcontext.h new file mode 100644 index 00000000000..323cdb02619 --- /dev/null +++ b/arch/alpha/include/asm/sigcontext.h @@ -0,0 +1,34 @@ +#ifndef _ASMAXP_SIGCONTEXT_H +#define _ASMAXP_SIGCONTEXT_H + +struct sigcontext { + /* + * What should we have here? I'd probably better use the same + * stack layout as OSF/1, just in case we ever want to try + * running their binaries.. + * + * This is the basic layout, but I don't know if we'll ever + * actually fill in all the values.. + */ + long sc_onstack; + long sc_mask; + long sc_pc; + long sc_ps; + long sc_regs[32]; + long sc_ownedfp; + long sc_fpregs[32]; + unsigned long sc_fpcr; + unsigned long sc_fp_control; + unsigned long sc_reserved1, sc_reserved2; + unsigned long sc_ssize; + char * sc_sbase; + unsigned long sc_traparg_a0; + unsigned long sc_traparg_a1; + unsigned long sc_traparg_a2; + unsigned long sc_fp_trap_pc; + unsigned long sc_fp_trigger_sum; + unsigned long sc_fp_trigger_inst; +}; + + +#endif diff --git a/arch/alpha/include/asm/siginfo.h b/arch/alpha/include/asm/siginfo.h new file mode 100644 index 00000000000..9822362a842 --- /dev/null +++ b/arch/alpha/include/asm/siginfo.h @@ -0,0 +1,9 @@ +#ifndef _ALPHA_SIGINFO_H +#define _ALPHA_SIGINFO_H + +#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) +#define __ARCH_SI_TRAPNO + +#include + +#endif diff --git a/arch/alpha/include/asm/signal.h b/arch/alpha/include/asm/signal.h new file mode 100644 index 00000000000..13c2305d35e --- /dev/null +++ b/arch/alpha/include/asm/signal.h @@ -0,0 +1,172 @@ +#ifndef _ASMAXP_SIGNAL_H +#define _ASMAXP_SIGNAL_H + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifdef __KERNEL__ +/* Digital Unix defines 64 signals. Most things should be clean enough + to redefine this at will, if care is taken to make libc match. */ + +#define _NSIG 64 +#define _NSIG_BPW 64 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + + +/* + * Linux/AXP has different signal numbers that Linux/i386: I'm trying + * to make it OSF/1 binary compatible, at least for normal binaries. + */ +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGEMT 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGBUS 10 +#define SIGSEGV 11 +#define SIGSYS 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGURG 16 +#define SIGSTOP 17 +#define SIGTSTP 18 +#define SIGCONT 19 +#define SIGCHLD 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGIO 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGINFO 29 +#define SIGUSR1 30 +#define SIGUSR2 31 + +#define SIGPOLL SIGIO +#define SIGPWR SIGINFO +#define SIGIOT SIGABRT + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ + +#define SA_ONSTACK 0x00000001 +#define SA_RESTART 0x00000002 +#define SA_NOCLDSTOP 0x00000004 +#define SA_NODEFER 0x00000008 +#define SA_RESETHAND 0x00000010 +#define SA_NOCLDWAIT 0x00000020 +#define SA_SIGINFO 0x00000040 + +#define SA_ONESHOT SA_RESETHAND +#define SA_NOMASK SA_NODEFER + +/* + * sigaltstack controls + */ +#define SS_ONSTACK 1 +#define SS_DISABLE 2 + +#define MINSIGSTKSZ 4096 +#define SIGSTKSZ 16384 + +#define SIG_BLOCK 1 /* for blocking signals */ +#define SIG_UNBLOCK 2 /* for unblocking signals */ +#define SIG_SETMASK 3 /* for setting the signal mask */ + +#include + +#ifdef __KERNEL__ +struct osf_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + int sa_flags; +}; + +struct sigaction { + __sighandler_t sa_handler; + unsigned long sa_flags; + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; + __sigrestore_t ka_restorer; +}; +#else +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int, struct siginfo *, void *); + } _u; + sigset_t sa_mask; + int sa_flags; +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +/* sigstack(2) is deprecated, and will be withdrawn in a future version + of the X/Open CAE Specification. Use sigaltstack instead. It is only + implemented here for OSF/1 compatibility. */ + +struct sigstack { + void __user *ss_sp; + int ss_onstack; +}; + +#ifdef __KERNEL__ +#include + +#define ptrace_signal_deliver(regs, cookie) do { } while (0) + +#endif + +#endif diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h new file mode 100644 index 00000000000..544c69af816 --- /dev/null +++ b/arch/alpha/include/asm/smp.h @@ -0,0 +1,62 @@ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include +#include +#include +#include + +/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */ + +static __inline__ unsigned char +__hard_smp_processor_id(void) +{ + register unsigned char __r0 __asm__("$0"); + __asm__ __volatile__( + "call_pal %1 #whami" + : "=r"(__r0) + :"i" (PAL_whami) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} + +#ifdef CONFIG_SMP + +#include + +struct cpuinfo_alpha { + unsigned long loops_per_jiffy; + unsigned long last_asn; + int need_new_asn; + int asn_lock; + unsigned long ipi_count; + unsigned long prof_multiplier; + unsigned long prof_counter; + unsigned char mcheck_expected; + unsigned char mcheck_taken; + unsigned char mcheck_extra; +} __attribute__((aligned(64))); + +extern struct cpuinfo_alpha cpu_data[NR_CPUS]; + +#define PROC_CHANGE_PENALTY 20 + +#define hard_smp_processor_id() __hard_smp_processor_id() +#define raw_smp_processor_id() (current_thread_info()->cpu) + +extern int smp_num_cpus; +#define cpu_possible_map cpu_present_map + +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi(cpumask_t mask); + +#else /* CONFIG_SMP */ + +#define hard_smp_processor_id() 0 +#define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; }) + +#endif /* CONFIG_SMP */ + +#define NO_PROC_ID (-1) + +#endif diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h new file mode 100644 index 00000000000..a1057c2d95e --- /dev/null +++ b/arch/alpha/include/asm/socket.h @@ -0,0 +1,70 @@ +#ifndef _ASM_SOCKET_H +#define _ASM_SOCKET_H + +#include + +/* For setsockopt(2) */ +/* + * Note: we only bother about making the SOL_SOCKET options + * same as OSF/1, as that's all that "normal" programs are + * likely to set. We don't necessarily want to be binary + * compatible with _everything_. + */ +#define SOL_SOCKET 0xffff + +#define SO_DEBUG 0x0001 +#define SO_REUSEADDR 0x0004 +#define SO_KEEPALIVE 0x0008 +#define SO_DONTROUTE 0x0010 +#define SO_BROADCAST 0x0020 +#define SO_LINGER 0x0080 +#define SO_OOBINLINE 0x0100 +/* To add :#define SO_REUSEPORT 0x0200 */ + +#define SO_TYPE 0x1008 +#define SO_ERROR 0x1007 +#define SO_SNDBUF 0x1001 +#define SO_RCVBUF 0x1002 +#define SO_SNDBUFFORCE 0x100a +#define SO_RCVBUFFORCE 0x100b +#define SO_RCVLOWAT 0x1010 +#define SO_SNDLOWAT 0x1011 +#define SO_RCVTIMEO 0x1012 +#define SO_SNDTIMEO 0x1013 +#define SO_ACCEPTCONN 0x1014 + +/* linux-specific, might as well be the same as on i386 */ +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_BSDCOMPAT 14 + +#define SO_PASSCRED 17 +#define SO_PEERCRED 18 +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 + +#define SO_PEERNAME 28 +#define SO_TIMESTAMP 29 +#define SCM_TIMESTAMP SO_TIMESTAMP + +#define SO_PEERSEC 30 +#define SO_PASSSEC 34 +#define SO_TIMESTAMPNS 35 +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 19 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 20 +#define SO_SECURITY_ENCRYPTION_NETWORK 21 + +#define SO_MARK 36 + +/* O_NONBLOCK clashes with the bits used for socket types. Therefore we + * have to define SOCK_NONBLOCK to a different value here. + */ +#define SOCK_NONBLOCK 0x40000000 + +#endif /* _ASM_SOCKET_H */ diff --git a/arch/alpha/include/asm/sockios.h b/arch/alpha/include/asm/sockios.h new file mode 100644 index 00000000000..7932c7ab4a4 --- /dev/null +++ b/arch/alpha/include/asm/sockios.h @@ -0,0 +1,16 @@ +#ifndef _ASM_ALPHA_SOCKIOS_H +#define _ASM_ALPHA_SOCKIOS_H + +/* Socket-level I/O control calls. */ + +#define FIOGETOWN _IOR('f', 123, int) +#define FIOSETOWN _IOW('f', 124, int) + +#define SIOCATMARK _IOR('s', 7, int) +#define SIOCSPGRP _IOW('s', 8, pid_t) +#define SIOCGPGRP _IOR('s', 9, pid_t) + +#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ + +#endif /* _ASM_ALPHA_SOCKIOS_H */ diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h new file mode 100644 index 00000000000..aeeb125f685 --- /dev/null +++ b/arch/alpha/include/asm/spinlock.h @@ -0,0 +1,173 @@ +#ifndef _ALPHA_SPINLOCK_H +#define _ALPHA_SPINLOCK_H + +#include +#include +#include + +/* + * Simple spin lock operations. There are two variants, one clears IRQ's + * on the local processor, one does not. + * + * We make no fairness assumptions. They have a cost. + */ + +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define __raw_spin_is_locked(x) ((x)->lock != 0) +#define __raw_spin_unlock_wait(x) \ + do { cpu_relax(); } while ((x)->lock) + +static inline void __raw_spin_unlock(raw_spinlock_t * lock) +{ + mb(); + lock->lock = 0; +} + +static inline void __raw_spin_lock(raw_spinlock_t * lock) +{ + long tmp; + + __asm__ __volatile__( + "1: ldl_l %0,%1\n" + " bne %0,2f\n" + " lda %0,1\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + " mb\n" + ".subsection 2\n" + "2: ldl %0,%1\n" + " bne %0,2b\n" + " br 1b\n" + ".previous" + : "=&r" (tmp), "=m" (lock->lock) + : "m"(lock->lock) : "memory"); +} + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + return !test_and_set_bit(0, &lock->lock); +} + +/***********************************************************/ + +static inline int __raw_read_can_lock(raw_rwlock_t *lock) +{ + return (lock->lock & 1) == 0; +} + +static inline int __raw_write_can_lock(raw_rwlock_t *lock) +{ + return lock->lock == 0; +} + +static inline void __raw_read_lock(raw_rwlock_t *lock) +{ + long regx; + + __asm__ __volatile__( + "1: ldl_l %1,%0\n" + " blbs %1,6f\n" + " subl %1,2,%1\n" + " stl_c %1,%0\n" + " beq %1,6f\n" + " mb\n" + ".subsection 2\n" + "6: ldl %1,%0\n" + " blbs %1,6b\n" + " br 1b\n" + ".previous" + : "=m" (*lock), "=&r" (regx) + : "m" (*lock) : "memory"); +} + +static inline void __raw_write_lock(raw_rwlock_t *lock) +{ + long regx; + + __asm__ __volatile__( + "1: ldl_l %1,%0\n" + " bne %1,6f\n" + " lda %1,1\n" + " stl_c %1,%0\n" + " beq %1,6f\n" + " mb\n" + ".subsection 2\n" + "6: ldl %1,%0\n" + " bne %1,6b\n" + " br 1b\n" + ".previous" + : "=m" (*lock), "=&r" (regx) + : "m" (*lock) : "memory"); +} + +static inline int __raw_read_trylock(raw_rwlock_t * lock) +{ + long regx; + int success; + + __asm__ __volatile__( + "1: ldl_l %1,%0\n" + " lda %2,0\n" + " blbs %1,2f\n" + " subl %1,2,%2\n" + " stl_c %2,%0\n" + " beq %2,6f\n" + "2: mb\n" + ".subsection 2\n" + "6: br 1b\n" + ".previous" + : "=m" (*lock), "=&r" (regx), "=&r" (success) + : "m" (*lock) : "memory"); + + return success; +} + +static inline int __raw_write_trylock(raw_rwlock_t * lock) +{ + long regx; + int success; + + __asm__ __volatile__( + "1: ldl_l %1,%0\n" + " lda %2,0\n" + " bne %1,2f\n" + " lda %2,1\n" + " stl_c %2,%0\n" + " beq %2,6f\n" + "2: mb\n" + ".subsection 2\n" + "6: br 1b\n" + ".previous" + : "=m" (*lock), "=&r" (regx), "=&r" (success) + : "m" (*lock) : "memory"); + + return success; +} + +static inline void __raw_read_unlock(raw_rwlock_t * lock) +{ + long regx; + __asm__ __volatile__( + " mb\n" + "1: ldl_l %1,%0\n" + " addl %1,2,%1\n" + " stl_c %1,%0\n" + " beq %1,6f\n" + ".subsection 2\n" + "6: br 1b\n" + ".previous" + : "=m" (*lock), "=&r" (regx) + : "m" (*lock) : "memory"); +} + +static inline void __raw_write_unlock(raw_rwlock_t * lock) +{ + mb(); + lock->lock = 0; +} + +#define _raw_spin_relax(lock) cpu_relax() +#define _raw_read_relax(lock) cpu_relax() +#define _raw_write_relax(lock) cpu_relax() + +#endif /* _ALPHA_SPINLOCK_H */ diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h new file mode 100644 index 00000000000..8141eb5ebf0 --- /dev/null +++ b/arch/alpha/include/asm/spinlock_types.h @@ -0,0 +1,20 @@ +#ifndef _ALPHA_SPINLOCK_TYPES_H +#define _ALPHA_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned int lock; +} raw_spinlock_t; + +#define __RAW_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock; +} raw_rwlock_t; + +#define __RAW_RW_LOCK_UNLOCKED { 0 } + +#endif diff --git a/arch/alpha/include/asm/stat.h b/arch/alpha/include/asm/stat.h new file mode 100644 index 00000000000..07ad3e6b3f3 --- /dev/null +++ b/arch/alpha/include/asm/stat.h @@ -0,0 +1,48 @@ +#ifndef _ALPHA_STAT_H +#define _ALPHA_STAT_H + +struct stat { + unsigned int st_dev; + unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_rdev; + long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; + unsigned int st_blksize; + unsigned int st_blocks; + unsigned int st_flags; + unsigned int st_gen; +}; + +/* The stat64 structure increases the size of dev_t, blkcnt_t, adds + nanosecond resolution times, and padding for expansion. */ + +struct stat64 { + unsigned long st_dev; + unsigned long st_ino; + unsigned long st_rdev; + long st_size; + unsigned long st_blocks; + + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_blksize; + unsigned int st_nlink; + unsigned int __pad0; + + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + long __unused[3]; +}; + +#endif diff --git a/arch/alpha/include/asm/statfs.h b/arch/alpha/include/asm/statfs.h new file mode 100644 index 00000000000..ad15830baef --- /dev/null +++ b/arch/alpha/include/asm/statfs.h @@ -0,0 +1,6 @@ +#ifndef _ALPHA_STATFS_H +#define _ALPHA_STATFS_H + +#include + +#endif diff --git a/arch/alpha/include/asm/string.h b/arch/alpha/include/asm/string.h new file mode 100644 index 00000000000..b02b8a28294 --- /dev/null +++ b/arch/alpha/include/asm/string.h @@ -0,0 +1,66 @@ +#ifndef __ALPHA_STRING_H__ +#define __ALPHA_STRING_H__ + +#ifdef __KERNEL__ + +/* + * GCC of any recent vintage doesn't do stupid things with bcopy. + * EGCS 1.1 knows all about expanding memcpy inline, others don't. + * + * Similarly for a memset with data = 0. + */ + +#define __HAVE_ARCH_MEMCPY +extern void * memcpy(void *, const void *, size_t); +#define __HAVE_ARCH_MEMMOVE +extern void * memmove(void *, const void *, size_t); + +/* For backward compatibility with modules. Unused otherwise. */ +extern void * __memcpy(void *, const void *, size_t); + +#define memcpy __builtin_memcpy + +#define __HAVE_ARCH_MEMSET +extern void * __constant_c_memset(void *, unsigned long, size_t); +extern void * __memset(void *, int, size_t); +extern void * memset(void *, int, size_t); + +#define memset(s, c, n) \ +(__builtin_constant_p(c) \ + ? (__builtin_constant_p(n) && (c) == 0 \ + ? __builtin_memset((s),0,(n)) \ + : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \ + : __memset((s),(c),(n))) + +#define __HAVE_ARCH_STRCPY +extern char * strcpy(char *,const char *); +#define __HAVE_ARCH_STRNCPY +extern char * strncpy(char *, const char *, size_t); +#define __HAVE_ARCH_STRCAT +extern char * strcat(char *, const char *); +#define __HAVE_ARCH_STRNCAT +extern char * strncat(char *, const char *, size_t); +#define __HAVE_ARCH_STRCHR +extern char * strchr(const char *,int); +#define __HAVE_ARCH_STRRCHR +extern char * strrchr(const char *,int); +#define __HAVE_ARCH_STRLEN +extern size_t strlen(const char *); +#define __HAVE_ARCH_MEMCHR +extern void * memchr(const void *, int, size_t); + +/* The following routine is like memset except that it writes 16-bit + aligned values. The DEST and COUNT parameters must be even for + correct operation. */ + +#define __HAVE_ARCH_MEMSETW +extern void * __memsetw(void *dest, unsigned short, size_t count); + +#define memsetw(s, c, n) \ +(__builtin_constant_p(c) \ + ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \ + : __memsetw((s),(c),(n))) + +#endif /* __KERNEL__ */ + +#endif /* __ALPHA_STRING_H__ */ diff --git a/arch/alpha/include/asm/suspend.h b/arch/alpha/include/asm/suspend.h new file mode 100644 index 00000000000..c7042d57585 --- /dev/null +++ b/arch/alpha/include/asm/suspend.h @@ -0,0 +1,6 @@ +#ifndef __ALPHA_SUSPEND_H +#define __ALPHA_SUSPEND_H + +/* Dummy include. */ + +#endif /* __ALPHA_SUSPEND_H */ diff --git a/arch/alpha/include/asm/sysinfo.h b/arch/alpha/include/asm/sysinfo.h new file mode 100644 index 00000000000..086aba284df --- /dev/null +++ b/arch/alpha/include/asm/sysinfo.h @@ -0,0 +1,39 @@ +/* + * include/asm-alpha/sysinfo.h + */ + +#ifndef __ASM_ALPHA_SYSINFO_H +#define __ASM_ALPHA_SYSINFO_H + +/* This defines the subset of the OSF/1 getsysinfo/setsysinfo calls + that we support. */ + +#define GSI_UACPROC 8 +#define GSI_IEEE_FP_CONTROL 45 +#define GSI_IEEE_STATE_AT_SIGNAL 46 +#define GSI_PROC_TYPE 60 +#define GSI_GET_HWRPB 101 + +#define SSI_NVPAIRS 1 +#define SSI_IEEE_FP_CONTROL 14 +#define SSI_IEEE_STATE_AT_SIGNAL 15 +#define SSI_IEEE_IGNORE_STATE_AT_SIGNAL 16 +#define SSI_IEEE_RAISE_EXCEPTION 1001 /* linux specific */ + +#define SSIN_UACPROC 6 + +#define UAC_BITMASK 7 +#define UAC_NOPRINT 1 +#define UAC_NOFIX 2 +#define UAC_SIGBUS 4 + + +#ifdef __KERNEL__ + +/* This is the shift that is applied to the UAC bits as stored in the + per-thread flags. See thread_info.h. */ +#define UAC_SHIFT 6 + +#endif + +#endif /* __ASM_ALPHA_SYSINFO_H */ diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h new file mode 100644 index 00000000000..afe20fa58c9 --- /dev/null +++ b/arch/alpha/include/asm/system.h @@ -0,0 +1,829 @@ +#ifndef __ALPHA_SYSTEM_H +#define __ALPHA_SYSTEM_H + +#include +#include +#include + +/* + * System defines.. Note that this is included both from .c and .S + * files, so it does only defines, not any C code. + */ + +/* + * We leave one page for the initial stack page, and one page for + * the initial process structure. Also, the console eats 3 MB for + * the initial bootloader (one of which we can reclaim later). + */ +#define BOOT_PCB 0x20000000 +#define BOOT_ADDR 0x20000000 +/* Remove when official MILO sources have ELF support: */ +#define BOOT_SIZE (16*1024) + +#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS +#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ +#else +#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ +#endif + +#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) +#define SWAPPER_PGD KERNEL_START +#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) +#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) +#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) +#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) + +#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) + +/* + * This is setup by the secondary bootstrap loader. Because + * the zero page is zeroed out as soon as the vm system is + * initialized, we need to copy things out into a more permanent + * place. + */ +#define PARAM ZERO_PGE +#define COMMAND_LINE ((char*)(PARAM + 0x0000)) +#define INITRD_START (*(unsigned long *) (PARAM+0x100)) +#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) + +#ifndef __ASSEMBLY__ +#include +#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */ + +/* + * This is the logout header that should be common to all platforms + * (assuming they are running OSF/1 PALcode, I guess). + */ +struct el_common { + unsigned int size; /* size in bytes of logout area */ + unsigned int sbz1 : 30; /* should be zero */ + unsigned int err2 : 1; /* second error */ + unsigned int retry : 1; /* retry flag */ + unsigned int proc_offset; /* processor-specific offset */ + unsigned int sys_offset; /* system-specific offset */ + unsigned int code; /* machine check code */ + unsigned int frame_rev; /* frame revision */ +}; + +/* Machine Check Frame for uncorrectable errors (Large format) + * --- This is used to log uncorrectable errors such as + * double bit ECC errors. + * --- These errors are detected by both processor and systems. + */ +struct el_common_EV5_uncorrectable_mcheck { + unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ + unsigned long paltemp[24]; /* PAL TEMP REGS. */ + unsigned long exc_addr; /* Address of excepting instruction*/ + unsigned long exc_sum; /* Summary of arithmetic traps. */ + unsigned long exc_mask; /* Exception mask (from exc_sum). */ + unsigned long pal_base; /* Base address for PALcode. */ + unsigned long isr; /* Interrupt Status Reg. */ + unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ + unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity + <12> set TAG parity*/ + unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: + <2> Data error in bank 0 + <3> Data error in bank 1 + <4> Tag error in bank 0 + <5> Tag error in bank 1 */ + unsigned long va; /* Effective VA of fault or miss. */ + unsigned long mm_stat; /* Holds the reason for D-stream + fault or D-cache parity errors */ + unsigned long sc_addr; /* Address that was being accessed + when EV5 detected Secondary cache + failure. */ + unsigned long sc_stat; /* Helps determine if the error was + TAG/Data parity(Secondary Cache)*/ + unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ + unsigned long ei_addr; /* Physical address of any transfer + that is logged in EV5 EI_STAT */ + unsigned long fill_syndrome; /* For correcting ECC errors. */ + unsigned long ei_stat; /* Helps identify reason of any + processor uncorrectable error + at its external interface. */ + unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ +}; + +struct el_common_EV6_mcheck { + unsigned int FrameSize; /* Bytes, including this field */ + unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ + unsigned int CpuOffset; /* Offset to CPU-specific info */ + unsigned int SystemOffset; /* Offset to system-specific info */ + unsigned int MCHK_Code; + unsigned int MCHK_Frame_Rev; + unsigned long I_STAT; /* EV6 Internal Processor Registers */ + unsigned long DC_STAT; /* (See the 21264 Spec) */ + unsigned long C_ADDR; + unsigned long DC1_SYNDROME; + unsigned long DC0_SYNDROME; + unsigned long C_STAT; + unsigned long C_STS; + unsigned long MM_STAT; + unsigned long EXC_ADDR; + unsigned long IER_CM; + unsigned long ISUM; + unsigned long RESERVED0; + unsigned long PAL_BASE; + unsigned long I_CTL; + unsigned long PCTX; +}; + +extern void halt(void) __attribute__((noreturn)); +#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) + +#define switch_to(P,N,L) \ + do { \ + (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \ + check_mmu_context(); \ + } while (0) + +struct task_struct; +extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); + +#define imb() \ +__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") + +#define draina() \ +__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") + +enum implver_enum { + IMPLVER_EV4, + IMPLVER_EV5, + IMPLVER_EV6 +}; + +#ifdef CONFIG_ALPHA_GENERIC +#define implver() \ +({ unsigned long __implver; \ + __asm__ ("implver %0" : "=r"(__implver)); \ + (enum implver_enum) __implver; }) +#else +/* Try to eliminate some dead code. */ +#ifdef CONFIG_ALPHA_EV4 +#define implver() IMPLVER_EV4 +#endif +#ifdef CONFIG_ALPHA_EV5 +#define implver() IMPLVER_EV5 +#endif +#if defined(CONFIG_ALPHA_EV6) +#define implver() IMPLVER_EV6 +#endif +#endif + +enum amask_enum { + AMASK_BWX = (1UL << 0), + AMASK_FIX = (1UL << 1), + AMASK_CIX = (1UL << 2), + AMASK_MAX = (1UL << 8), + AMASK_PRECISE_TRAP = (1UL << 9), +}; + +#define amask(mask) \ +({ unsigned long __amask, __input = (mask); \ + __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ + __amask; }) + +#define __CALL_PAL_R0(NAME, TYPE) \ +extern inline TYPE NAME(void) \ +{ \ + register TYPE __r0 __asm__("$0"); \ + __asm__ __volatile__( \ + "call_pal %1 # " #NAME \ + :"=r" (__r0) \ + :"i" (PAL_ ## NAME) \ + :"$1", "$16", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_PAL_W1(NAME, TYPE0) \ +extern inline void NAME(TYPE0 arg0) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "call_pal %1 # "#NAME \ + : "=r"(__r16) \ + : "i"(PAL_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ +extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "call_pal %2 # "#NAME \ + : "=r"(__r16), "=r"(__r17) \ + : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ +extern inline RTYPE NAME(TYPE0 arg0) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "call_pal %2 # "#NAME \ + : "=r"(__r16), "=r"(__r0) \ + : "i"(PAL_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ +extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "call_pal %3 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ + : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +__CALL_PAL_W1(cflush, unsigned long); +__CALL_PAL_R0(rdmces, unsigned long); +__CALL_PAL_R0(rdps, unsigned long); +__CALL_PAL_R0(rdusp, unsigned long); +__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); +__CALL_PAL_R0(whami, unsigned long); +__CALL_PAL_W2(wrent, void*, unsigned long); +__CALL_PAL_W1(wripir, unsigned long); +__CALL_PAL_W1(wrkgp, unsigned long); +__CALL_PAL_W1(wrmces, unsigned long); +__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); +__CALL_PAL_W1(wrusp, unsigned long); +__CALL_PAL_W1(wrvptptr, unsigned long); + +#define IPL_MIN 0 +#define IPL_SW0 1 +#define IPL_SW1 2 +#define IPL_DEV0 3 +#define IPL_DEV1 4 +#define IPL_TIMER 5 +#define IPL_PERF 6 +#define IPL_POWERFAIL 6 +#define IPL_MCHECK 7 +#define IPL_MAX 7 + +#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK +#undef IPL_MIN +#define IPL_MIN __min_ipl +extern int __min_ipl; +#endif + +#define getipl() (rdps() & 7) +#define setipl(ipl) ((void) swpipl(ipl)) + +#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) +#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) +#define local_save_flags(flags) ((flags) = rdps()) +#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) +#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) + +#define irqs_disabled() (getipl() == IPL_MAX) + +/* + * TB routines.. + */ +#define __tbi(nr,arg,arg1...) \ +({ \ + register unsigned long __r16 __asm__("$16") = (nr); \ + register unsigned long __r17 __asm__("$17"); arg; \ + __asm__ __volatile__( \ + "call_pal %3 #__tbi" \ + :"=r" (__r16),"=r" (__r17) \ + :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ + :"$0", "$1", "$22", "$23", "$24", "$25"); \ +}) + +#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) +#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) +#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) +#define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) +#define tbiap() __tbi(-1, /* no second argument */) +#define tbia() __tbi(-2, /* no second argument */) + +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +__xchg_u8(volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " insbl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extbl %2,%4,%0\n" + " mskbl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +__xchg_u16(volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " inswl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extwl %2,%4,%0\n" + " mskwl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +__xchg_u32(volatile int *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldl_l %0,%4\n" + " bis $31,%3,%1\n" + " stl_c %1,%2\n" + " beq %1,2f\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +__xchg_u64(volatile long *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldq_l %0,%4\n" + " bis $31,%3,%1\n" + " stq_c %1,%2\n" + " beq %1,2f\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid xchg(). */ +extern void __xchg_called_with_bad_pointer(void); + +#define __xchg(ptr, x, size) \ +({ \ + unsigned long __xchg__res; \ + volatile void *__xchg__ptr = (ptr); \ + switch (size) { \ + case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ + case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ + case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ + case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ + default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ + } \ + __xchg__res; \ +}) + +#define xchg(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ + }) + +static inline unsigned long +__xchg_u8_local(volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " insbl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extbl %2,%4,%0\n" + " mskbl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +__xchg_u16_local(volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4,7,%3\n" + " inswl %1,%4,%1\n" + "1: ldq_l %2,0(%3)\n" + " extwl %2,%4,%0\n" + " mskwl %2,%4,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%3)\n" + " beq %2,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +__xchg_u32_local(volatile int *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldl_l %0,%4\n" + " bis $31,%3,%1\n" + " stl_c %1,%2\n" + " beq %1,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +__xchg_u64_local(volatile long *m, unsigned long val) +{ + unsigned long dummy; + + __asm__ __volatile__( + "1: ldq_l %0,%4\n" + " bis $31,%3,%1\n" + " stq_c %1,%2\n" + " beq %1,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +#define __xchg_local(ptr, x, size) \ +({ \ + unsigned long __xchg__res; \ + volatile void *__xchg__ptr = (ptr); \ + switch (size) { \ + case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \ + case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \ + case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \ + case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \ + default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ + } \ + __xchg__res; \ +}) + +#define xchg_local(ptr,x) \ + ({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ + }) + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ + +#define __HAVE_ARCH_CMPXCHG 1 + +static inline unsigned long +__cmpxchg_u8(volatile char *m, long old, long new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " insbl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extbl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskbl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +__cmpxchg_u16(volatile short *m, long old, long new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " inswl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extwl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskwl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +__cmpxchg_u32(volatile int *m, int old, int new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldl_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stl_c %1,%2\n" + " beq %1,3f\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldq_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stq_c %1,%2\n" + " beq %1,3f\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) +{ + switch (size) { + case 1: + return __cmpxchg_u8(ptr, old, new); + case 2: + return __cmpxchg_u16(ptr, old, new); + case 4: + return __cmpxchg_u32(ptr, old, new); + case 8: + return __cmpxchg_u64(ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) +#define cmpxchg64(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg((ptr), (o), (n)); \ + }) + +static inline unsigned long +__cmpxchg_u8_local(volatile char *m, long old, long new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " insbl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extbl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskbl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +__cmpxchg_u16_local(volatile short *m, long old, long new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5,7,%4\n" + " inswl %1,%5,%1\n" + "1: ldq_l %2,0(%4)\n" + " extwl %2,%5,%0\n" + " cmpeq %0,%6,%3\n" + " beq %3,2f\n" + " mskwl %2,%5,%2\n" + " or %1,%2,%2\n" + " stq_c %2,0(%4)\n" + " beq %2,3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +__cmpxchg_u32_local(volatile int *m, int old, int new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldl_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stl_c %1,%2\n" + " beq %1,3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp; + + __asm__ __volatile__( + "1: ldq_l %0,%5\n" + " cmpeq %0,%3,%1\n" + " beq %1,2f\n" + " mov %4,%1\n" + " stq_c %1,%2\n" + " beq %1,3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static __always_inline unsigned long +__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, + int size) +{ + switch (size) { + case 1: + return __cmpxchg_u8_local(ptr, old, new); + case 2: + return __cmpxchg_u16_local(ptr, old, new); + case 4: + return __cmpxchg_u32_local(ptr, old, new); + case 8: + return __cmpxchg_u64_local(ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg_local(ptr, o, n) \ + ({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr))); \ + }) +#define cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ + }) + + +#endif /* __ASSEMBLY__ */ + +#define arch_align_stack(x) (x) + +#endif diff --git a/arch/alpha/include/asm/termbits.h b/arch/alpha/include/asm/termbits.h new file mode 100644 index 00000000000..ad854a4a3af --- /dev/null +++ b/arch/alpha/include/asm/termbits.h @@ -0,0 +1,200 @@ +#ifndef _ALPHA_TERMBITS_H +#define _ALPHA_TERMBITS_H + +#include + +typedef unsigned char cc_t; +typedef unsigned int speed_t; +typedef unsigned int tcflag_t; + +/* + * termios type and macro definitions. Be careful about adding stuff + * to this file since it's used in GNU libc and there are strict rules + * concerning namespace pollution. + */ + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* Alpha has matching termios and ktermios */ + +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define VEOF 0 +#define VEOL 1 +#define VEOL2 2 +#define VERASE 3 +#define VWERASE 4 +#define VKILL 5 +#define VREPRINT 6 +#define VSWTC 7 +#define VINTR 8 +#define VQUIT 9 +#define VSUSP 10 +#define VSTART 12 +#define VSTOP 13 +#define VLNEXT 14 +#define VDISCARD 15 +#define VMIN 16 +#define VTIME 17 + +/* c_iflag bits */ +#define IGNBRK 0000001 +#define BRKINT 0000002 +#define IGNPAR 0000004 +#define PARMRK 0000010 +#define INPCK 0000020 +#define ISTRIP 0000040 +#define INLCR 0000100 +#define IGNCR 0000200 +#define ICRNL 0000400 +#define IXON 0001000 +#define IXOFF 0002000 +#define IXANY 0004000 +#define IUCLC 0010000 +#define IMAXBEL 0020000 +#define IUTF8 0040000 + +/* c_oflag bits */ +#define OPOST 0000001 +#define ONLCR 0000002 +#define OLCUC 0000004 + +#define OCRNL 0000010 +#define ONOCR 0000020 +#define ONLRET 0000040 + +#define OFILL 00000100 +#define OFDEL 00000200 +#define NLDLY 00001400 +#define NL0 00000000 +#define NL1 00000400 +#define NL2 00001000 +#define NL3 00001400 +#define TABDLY 00006000 +#define TAB0 00000000 +#define TAB1 00002000 +#define TAB2 00004000 +#define TAB3 00006000 +#define CRDLY 00030000 +#define CR0 00000000 +#define CR1 00010000 +#define CR2 00020000 +#define CR3 00030000 +#define FFDLY 00040000 +#define FF0 00000000 +#define FF1 00040000 +#define BSDLY 00100000 +#define BS0 00000000 +#define BS1 00100000 +#define VTDLY 00200000 +#define VT0 00000000 +#define VT1 00200000 +#define XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */ + +/* c_cflag bit meaning */ +#define CBAUD 0000037 +#define B0 0000000 /* hang up */ +#define B50 0000001 +#define B75 0000002 +#define B110 0000003 +#define B134 0000004 +#define B150 0000005 +#define B200 0000006 +#define B300 0000007 +#define B600 0000010 +#define B1200 0000011 +#define B1800 0000012 +#define B2400 0000013 +#define B4800 0000014 +#define B9600 0000015 +#define B19200 0000016 +#define B38400 0000017 +#define EXTA B19200 +#define EXTB B38400 +#define CBAUDEX 0000000 +#define B57600 00020 +#define B115200 00021 +#define B230400 00022 +#define B460800 00023 +#define B500000 00024 +#define B576000 00025 +#define B921600 00026 +#define B1000000 00027 +#define B1152000 00030 +#define B1500000 00031 +#define B2000000 00032 +#define B2500000 00033 +#define B3000000 00034 +#define B3500000 00035 +#define B4000000 00036 + +#define CSIZE 00001400 +#define CS5 00000000 +#define CS6 00000400 +#define CS7 00001000 +#define CS8 00001400 + +#define CSTOPB 00002000 +#define CREAD 00004000 +#define PARENB 00010000 +#define PARODD 00020000 +#define HUPCL 00040000 + +#define CLOCAL 00100000 +#define CMSPAR 010000000000 /* mark or space (stick) parity */ +#define CRTSCTS 020000000000 /* flow control */ + +/* c_lflag bits */ +#define ISIG 0x00000080 +#define ICANON 0x00000100 +#define XCASE 0x00004000 +#define ECHO 0x00000008 +#define ECHOE 0x00000002 +#define ECHOK 0x00000004 +#define ECHONL 0x00000010 +#define NOFLSH 0x80000000 +#define TOSTOP 0x00400000 +#define ECHOCTL 0x00000040 +#define ECHOPRT 0x00000020 +#define ECHOKE 0x00000001 +#define FLUSHO 0x00800000 +#define PENDIN 0x20000000 +#define IEXTEN 0x00000400 + +/* Values for the ACTION argument to `tcflow'. */ +#define TCOOFF 0 +#define TCOON 1 +#define TCIOFF 2 +#define TCION 3 + +/* Values for the QUEUE_SELECTOR argument to `tcflush'. */ +#define TCIFLUSH 0 +#define TCOFLUSH 1 +#define TCIOFLUSH 2 + +/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* _ALPHA_TERMBITS_H */ diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h new file mode 100644 index 00000000000..fa13716a11c --- /dev/null +++ b/arch/alpha/include/asm/termios.h @@ -0,0 +1,146 @@ +#ifndef _ALPHA_TERMIOS_H +#define _ALPHA_TERMIOS_H + +#include +#include + +struct sgttyb { + char sg_ispeed; + char sg_ospeed; + char sg_erase; + char sg_kill; + short sg_flags; +}; + +struct tchars { + char t_intrc; + char t_quitc; + char t_startc; + char t_stopc; + char t_eofc; + char t_brkc; +}; + +struct ltchars { + char t_suspc; + char t_dsuspc; + char t_rprntc; + char t_flushc; + char t_werasc; + char t_lnextc; +}; + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* + * c_cc characters in the termio structure. Oh, how I love being + * backwardly compatible. Notice that character 4 and 5 are + * interpreted differently depending on whether ICANON is set in + * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise + * as _VMIN and V_TIME. This is for compatibility with OSF/1 (which + * is compatible with sysV)... + */ +#define _VINTR 0 +#define _VQUIT 1 +#define _VERASE 2 +#define _VKILL 3 +#define _VEOF 4 +#define _VMIN 4 +#define _VEOL 5 +#define _VTIME 5 +#define _VEOL2 6 +#define _VSWTC 7 + +#ifdef __KERNEL__ +/* eof=^D eol=\0 eol2=\0 erase=del + werase=^W kill=^U reprint=^R sxtc=\0 + intr=^C quit=^\ susp=^Z + start=^Q stop=^S lnext=^V discard=^U + vmin=\1 vtime=\0 +*/ +#define INIT_C_CC "\004\000\000\177\027\025\022\000\003\034\032\000\021\023\026\025\001\000" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ + +#define user_termio_to_kernel_termios(a_termios, u_termio) \ +({ \ + struct ktermios *k_termios = (a_termios); \ + struct termio k_termio; \ + int canon, ret; \ + \ + ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio)); \ + if (!ret) { \ + /* Overwrite only the low bits. */ \ + *(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag; \ + *(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag; \ + *(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag; \ + *(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag; \ + canon = k_termio.c_lflag & ICANON; \ + \ + k_termios->c_cc[VINTR] = k_termio.c_cc[_VINTR]; \ + k_termios->c_cc[VQUIT] = k_termio.c_cc[_VQUIT]; \ + k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE]; \ + k_termios->c_cc[VKILL] = k_termio.c_cc[_VKILL]; \ + k_termios->c_cc[VEOL2] = k_termio.c_cc[_VEOL2]; \ + k_termios->c_cc[VSWTC] = k_termio.c_cc[_VSWTC]; \ + k_termios->c_cc[canon ? VEOF : VMIN] = k_termio.c_cc[_VEOF]; \ + k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL]; \ + } \ + ret; \ +}) + +/* + * Translate a "termios" structure into a "termio". Ugh. + * + * Note the "fun" _VMIN overloading. + */ +#define kernel_termios_to_user_termio(u_termio, a_termios) \ +({ \ + struct ktermios *k_termios = (a_termios); \ + struct termio k_termio; \ + int canon; \ + \ + k_termio.c_iflag = k_termios->c_iflag; \ + k_termio.c_oflag = k_termios->c_oflag; \ + k_termio.c_cflag = k_termios->c_cflag; \ + canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON; \ + \ + k_termio.c_line = k_termios->c_line; \ + k_termio.c_cc[_VINTR] = k_termios->c_cc[VINTR]; \ + k_termio.c_cc[_VQUIT] = k_termios->c_cc[VQUIT]; \ + k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE]; \ + k_termio.c_cc[_VKILL] = k_termios->c_cc[VKILL]; \ + k_termio.c_cc[_VEOF] = k_termios->c_cc[canon ? VEOF : VMIN]; \ + k_termio.c_cc[_VEOL] = k_termios->c_cc[canon ? VEOL : VTIME]; \ + k_termio.c_cc[_VEOL2] = k_termios->c_cc[VEOL2]; \ + k_termio.c_cc[_VSWTC] = k_termios->c_cc[VSWTC]; \ + \ + copy_to_user(u_termio, &k_termio, sizeof(k_termio)); \ +}) + +#define user_termios_to_kernel_termios(k, u) \ + copy_from_user(k, u, sizeof(struct termios)) + +#define kernel_termios_to_user_termios(u, k) \ + copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __KERNEL__ */ + +#endif /* _ALPHA_TERMIOS_H */ diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h new file mode 100644 index 00000000000..15fda434442 --- /dev/null +++ b/arch/alpha/include/asm/thread_info.h @@ -0,0 +1,114 @@ +#ifndef _ALPHA_THREAD_INFO_H +#define _ALPHA_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include +#include +#endif + +#ifndef __ASSEMBLY__ +struct thread_info { + struct pcb_struct pcb; /* palcode state */ + + struct task_struct *task; /* main task structure */ + unsigned int flags; /* low level flags */ + unsigned int ieee_state; /* see fpu.h */ + + struct exec_domain *exec_domain; /* execution domain */ + mm_segment_t addr_limit; /* thread address space */ + unsigned cpu; /* current CPU */ + int preempt_count; /* 0 => preemptable, <0 => BUG */ + + int bpt_nsaved; + unsigned long bpt_addr[2]; /* breakpoint handling */ + unsigned int bpt_insn[2]; + + struct restart_block restart_block; +}; + +/* + * Macros/functions for gaining access to the thread information structure. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* How to get the thread information struct from C. */ +register struct thread_info *__current_thread_info __asm__("$8"); +#define current_thread_info() __current_thread_info + +/* Thread information allocation. */ +#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE (2*PAGE_SIZE) + +#endif /* __ASSEMBLY__ */ + +#define PREEMPT_ACTIVE 0x40000000 + +/* + * Thread information flags: + * - these are process state flags and used from assembly + * - pending work-to-be-done flags come first to fit in and immediate operand. + * + * TIF_SYSCALL_TRACE is known to be 0 via blbs. + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_POLLING_NRFLAG 3 /* poll_idle is polling NEED_RESCHED */ +#define TIF_DIE_IF_KERNEL 4 /* dik recursion lock */ +#define TIF_UAC_NOPRINT 5 /* see sysinfo.h */ +#define TIF_UAC_NOFIX 6 +#define TIF_UAC_SIGBUS 7 +#define TIF_MEMDIE 8 +#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ + +#define _TIF_SYSCALL_TRACE (1<flags = ((task_thread_info(task)->flags & \ + ~ALPHA_UAC_MASK) \ + | (((value) << ALPHA_UAC_SHIFT) & (1<flags & (1 << TIF_UAC_NOPRINT))\ + >> ALPHA_UAC_SHIFT \ + | (task_thread_info(task)->flags & (1 << TIF_UAC_SIGBUS))\ + >> (ALPHA_UAC_SHIFT + 1) \ + | (task_thread_info(task)->flags & (1 << TIF_UAC_NOFIX))\ + >> (ALPHA_UAC_SHIFT - 1), \ + (int __user *)(value)); \ + }) + +#endif /* __KERNEL__ */ +#endif /* _ALPHA_THREAD_INFO_H */ diff --git a/arch/alpha/include/asm/timex.h b/arch/alpha/include/asm/timex.h new file mode 100644 index 00000000000..afa0c45e3e9 --- /dev/null +++ b/arch/alpha/include/asm/timex.h @@ -0,0 +1,31 @@ +/* + * linux/include/asm-alpha/timex.h + * + * ALPHA architecture timex specifications + */ +#ifndef _ASMALPHA_TIMEX_H +#define _ASMALPHA_TIMEX_H + +/* With only one or two oddballs, we use the RTC as the ticker, selecting + the 32.768kHz reference clock, which nicely divides down to our HZ. */ +#define CLOCK_TICK_RATE 32768 + +/* + * Standard way to access the cycle counter. + * Currently only used on SMP for scheduling. + * + * Only the low 32 bits are available as a continuously counting entity. + * But this only means we'll force a reschedule every 8 seconds or so, + * which isn't an evil thing. + */ + +typedef unsigned int cycles_t; + +static inline cycles_t get_cycles (void) +{ + cycles_t ret; + __asm__ __volatile__ ("rpcc %0" : "=r"(ret)); + return ret; +} + +#endif diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h new file mode 100644 index 00000000000..c13636575fb --- /dev/null +++ b/arch/alpha/include/asm/tlb.h @@ -0,0 +1,15 @@ +#ifndef _ALPHA_TLB_H +#define _ALPHA_TLB_H + +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) +#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0) + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include + +#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) + +#endif diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h new file mode 100644 index 00000000000..9d87aaa08c0 --- /dev/null +++ b/arch/alpha/include/asm/tlbflush.h @@ -0,0 +1,151 @@ +#ifndef _ALPHA_TLBFLUSH_H +#define _ALPHA_TLBFLUSH_H + +#include +#include +#include + +#ifndef __EXTERN_INLINE +#define __EXTERN_INLINE extern inline +#define __MMU_EXTERN_INLINE +#endif + +extern void __load_new_mm_context(struct mm_struct *); + + +/* Use a few helper functions to hide the ugly broken ASN + numbers on early Alphas (ev4 and ev45). */ + +__EXTERN_INLINE void +ev4_flush_tlb_current(struct mm_struct *mm) +{ + __load_new_mm_context(mm); + tbiap(); +} + +__EXTERN_INLINE void +ev5_flush_tlb_current(struct mm_struct *mm) +{ + __load_new_mm_context(mm); +} + +/* Flush just one page in the current TLB set. We need to be very + careful about the icache here, there is no way to invalidate a + specific icache page. */ + +__EXTERN_INLINE void +ev4_flush_tlb_current_page(struct mm_struct * mm, + struct vm_area_struct *vma, + unsigned long addr) +{ + int tbi_flag = 2; + if (vma->vm_flags & VM_EXEC) { + __load_new_mm_context(mm); + tbi_flag = 3; + } + tbi(tbi_flag, addr); +} + +__EXTERN_INLINE void +ev5_flush_tlb_current_page(struct mm_struct * mm, + struct vm_area_struct *vma, + unsigned long addr) +{ + if (vma->vm_flags & VM_EXEC) + __load_new_mm_context(mm); + else + tbi(2, addr); +} + + +#ifdef CONFIG_ALPHA_GENERIC +# define flush_tlb_current alpha_mv.mv_flush_tlb_current +# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page +#else +# ifdef CONFIG_ALPHA_EV4 +# define flush_tlb_current ev4_flush_tlb_current +# define flush_tlb_current_page ev4_flush_tlb_current_page +# else +# define flush_tlb_current ev5_flush_tlb_current +# define flush_tlb_current_page ev5_flush_tlb_current_page +# endif +#endif + +#ifdef __MMU_EXTERN_INLINE +#undef __EXTERN_INLINE +#undef __MMU_EXTERN_INLINE +#endif + +/* Flush current user mapping. */ +static inline void +flush_tlb(void) +{ + flush_tlb_current(current->active_mm); +} + +/* Flush someone else's user mapping. */ +static inline void +flush_tlb_other(struct mm_struct *mm) +{ + unsigned long *mmc = &mm->context[smp_processor_id()]; + /* Check it's not zero first to avoid cacheline ping pong + when possible. */ + if (*mmc) *mmc = 0; +} + +#ifndef CONFIG_SMP +/* Flush everything (kernel mapping may also have changed + due to vmalloc/vfree). */ +static inline void flush_tlb_all(void) +{ + tbia(); +} + +/* Flush a specified user mapping. */ +static inline void +flush_tlb_mm(struct mm_struct *mm) +{ + if (mm == current->active_mm) + flush_tlb_current(mm); + else + flush_tlb_other(mm); +} + +/* Page-granular tlb flush. */ +static inline void +flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + struct mm_struct *mm = vma->vm_mm; + + if (mm == current->active_mm) + flush_tlb_current_page(mm, vma, addr); + else + flush_tlb_other(mm); +} + +/* Flush a specified range of user mapping. On the Alpha we flush + the whole user tlb. */ +static inline void +flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + flush_tlb_mm(vma->vm_mm); +} + +#else /* CONFIG_SMP */ + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *); +extern void flush_tlb_page(struct vm_area_struct *, unsigned long); +extern void flush_tlb_range(struct vm_area_struct *, unsigned long, + unsigned long); + +#endif /* CONFIG_SMP */ + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_all(); +} + +#endif /* _ALPHA_TLBFLUSH_H */ diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h new file mode 100644 index 00000000000..149532e162c --- /dev/null +++ b/arch/alpha/include/asm/topology.h @@ -0,0 +1,47 @@ +#ifndef _ASM_ALPHA_TOPOLOGY_H +#define _ASM_ALPHA_TOPOLOGY_H + +#include +#include +#include + +#ifdef CONFIG_NUMA +static inline int cpu_to_node(int cpu) +{ + int node; + + if (!alpha_mv.cpuid_to_nid) + return 0; + + node = alpha_mv.cpuid_to_nid(cpu); + +#ifdef DEBUG_NUMA + BUG_ON(node < 0); +#endif + + return node; +} + +static inline cpumask_t node_to_cpumask(int node) +{ + cpumask_t node_cpu_mask = CPU_MASK_NONE; + int cpu; + + for_each_online_cpu(cpu) { + if (cpu_to_node(cpu) == node) + cpu_set(cpu, node_cpu_mask); + } + +#ifdef DEBUG_NUMA + printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask); +#endif + + return node_cpu_mask; +} + +#define pcibus_to_cpumask(bus) (cpu_online_map) + +#endif /* !CONFIG_NUMA */ +# include + +#endif /* _ASM_ALPHA_TOPOLOGY_H */ diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h new file mode 100644 index 00000000000..c1541353cce --- /dev/null +++ b/arch/alpha/include/asm/types.h @@ -0,0 +1,33 @@ +#ifndef _ALPHA_TYPES_H +#define _ALPHA_TYPES_H + +/* + * This file is never included by application software unless + * explicitly requested (e.g., via linux/types.h) in which case the + * application is Linux specific so (user-) name space pollution is + * not a major issue. However, for interoperability, libraries still + * need to be careful to avoid a name clashes. + */ +#include + +#ifndef __ASSEMBLY__ + +typedef unsigned int umode_t; + +#endif /* __ASSEMBLY__ */ + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +#define BITS_PER_LONG 64 + +#ifndef __ASSEMBLY__ + +typedef u64 dma_addr_t; +typedef u64 dma64_addr_t; + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ALPHA_TYPES_H */ diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h new file mode 100644 index 00000000000..22de3b434a2 --- /dev/null +++ b/arch/alpha/include/asm/uaccess.h @@ -0,0 +1,511 @@ +#ifndef __ALPHA_UACCESS_H +#define __ALPHA_UACCESS_H + +#include +#include + + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * Or at least it did once upon a time. Nowadays it is a mask that + * defines which bits of the address space are off limits. This is a + * wee bit faster than the above. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define KERNEL_DS ((mm_segment_t) { 0UL }) +#define USER_DS ((mm_segment_t) { -0x40000000000UL }) + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +#define get_fs() (current_thread_info()->addr_limit) +#define get_ds() (KERNEL_DS) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define segment_eq(a,b) ((a).seg == (b).seg) + +/* + * Is a address valid? This does a straightforward calculation rather + * than tests. + * + * Address valid if: + * - "addr" doesn't have any high-bits set + * - AND "size" doesn't have any high-bits set + * - AND "addr+size" doesn't have any high-bits set + * - OR we are in kernel mode. + */ +#define __access_ok(addr,size,segment) \ + (((segment).seg & (addr | size | (addr+size))) == 0) + +#define access_ok(type,addr,size) \ +({ \ + __chk_user_ptr(addr); \ + __access_ok(((unsigned long)(addr)),(size),get_fs()); \ +}) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * As the alpha uses the same address space for kernel and user + * data, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + * + * Careful to not + * (a) re-use the arguments for side effects (sizeof/typeof is ok) + * (b) require any knowledge of processes at this stage + */ +#define put_user(x,ptr) \ + __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs()) +#define get_user(x,ptr) \ + __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs()) + +/* + * The "__xxx" versions do not do address space checking, useful when + * doing multiple accesses to the same area (the programmer has to do the + * checks by hand with "access_ok()") + */ +#define __put_user(x,ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define __get_user(x,ptr) \ + __get_user_nocheck((x),(ptr),sizeof(*(ptr))) + +/* + * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to + * encode the bits we need for resolving the exception. See the + * more extensive comments with fixup_inline_exception below for + * more information. + */ + +extern void __get_user_unknown(void); + +#define __get_user_nocheck(x,ptr,size) \ +({ \ + long __gu_err = 0; \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: __get_user_8(ptr); break; \ + case 2: __get_user_16(ptr); break; \ + case 4: __get_user_32(ptr); break; \ + case 8: __get_user_64(ptr); break; \ + default: __get_user_unknown(); break; \ + } \ + (x) = (__typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x,ptr,size,segment) \ +({ \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + if (__access_ok((unsigned long)__gu_addr,size,segment)) { \ + __gu_err = 0; \ + switch (size) { \ + case 1: __get_user_8(__gu_addr); break; \ + case 2: __get_user_16(__gu_addr); break; \ + case 4: __get_user_32(__gu_addr); break; \ + case 8: __get_user_64(__gu_addr); break; \ + default: __get_user_unknown(); break; \ + } \ + } \ + (x) = (__typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +#define __get_user_64(addr) \ + __asm__("1: ldq %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_32(addr) \ + __asm__("1: ldl %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#ifdef __alpha_bwx__ +/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ + +#define __get_user_16(addr) \ + __asm__("1: ldwu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_8(addr) \ + __asm__("1: ldbu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) +#else +/* Unfortunately, we can't get an unaligned access trap for the sub-word + load, so we have to do a general unaligned operation. */ + +#define __get_user_16(addr) \ +{ \ + long __gu_tmp; \ + __asm__("1: ldq_u %0,0(%3)\n" \ + "2: ldq_u %1,1(%3)\n" \ + " extwl %0,%3,%0\n" \ + " extwh %1,%3,%1\n" \ + " or %0,%1,%0\n" \ + "3:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda %0, 3b-1b(%2)\n" \ + " .long 2b - .\n" \ + " lda %0, 3b-2b(%2)\n" \ + ".previous" \ + : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ + : "r"(addr), "2"(__gu_err)); \ +} + +#define __get_user_8(addr) \ + __asm__("1: ldq_u %0,0(%2)\n" \ + " extbl %0,%2,%0\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=&r"(__gu_val), "=r"(__gu_err) \ + : "r"(addr), "1"(__gu_err)) +#endif + +extern void __put_user_unknown(void); + +#define __put_user_nocheck(x,ptr,size) \ +({ \ + long __pu_err = 0; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: __put_user_8(x,ptr); break; \ + case 2: __put_user_16(x,ptr); break; \ + case 4: __put_user_32(x,ptr); break; \ + case 8: __put_user_64(x,ptr); break; \ + default: __put_user_unknown(); break; \ + } \ + __pu_err; \ +}) + +#define __put_user_check(x,ptr,size,segment) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (__access_ok((unsigned long)__pu_addr,size,segment)) { \ + __pu_err = 0; \ + switch (size) { \ + case 1: __put_user_8(x,__pu_addr); break; \ + case 2: __put_user_16(x,__pu_addr); break; \ + case 4: __put_user_32(x,__pu_addr); break; \ + case 8: __put_user_64(x,__pu_addr); break; \ + default: __put_user_unknown(); break; \ + } \ + } \ + __pu_err; \ +}) + +/* + * The "__put_user_xx()" macros tell gcc they read from memory + * instead of writing: this is because they do not write to + * any memory gcc knows about, so there are no aliasing issues + */ +#define __put_user_64(x,addr) \ +__asm__ __volatile__("1: stq %r2,%1\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda $31,2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) + +#define __put_user_32(x,addr) \ +__asm__ __volatile__("1: stl %r2,%1\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda $31,2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#ifdef __alpha_bwx__ +/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ + +#define __put_user_16(x,addr) \ +__asm__ __volatile__("1: stw %r2,%1\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda $31,2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#define __put_user_8(x,addr) \ +__asm__ __volatile__("1: stb %r2,%1\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda $31,2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) +#else +/* Unfortunately, we can't get an unaligned access trap for the sub-word + write, so we have to do a general unaligned operation. */ + +#define __put_user_16(x,addr) \ +{ \ + long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ + __asm__ __volatile__( \ + "1: ldq_u %2,1(%5)\n" \ + "2: ldq_u %1,0(%5)\n" \ + " inswh %6,%5,%4\n" \ + " inswl %6,%5,%3\n" \ + " mskwh %2,%5,%2\n" \ + " mskwl %1,%5,%1\n" \ + " or %2,%4,%2\n" \ + " or %1,%3,%1\n" \ + "3: stq_u %2,1(%5)\n" \ + "4: stq_u %1,0(%5)\n" \ + "5:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda $31, 5b-1b(%0)\n" \ + " .long 2b - .\n" \ + " lda $31, 5b-2b(%0)\n" \ + " .long 3b - .\n" \ + " lda $31, 5b-3b(%0)\n" \ + " .long 4b - .\n" \ + " lda $31, 5b-4b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err), "=&r"(__pu_tmp1), \ + "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ + "=&r"(__pu_tmp4) \ + : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ +} + +#define __put_user_8(x,addr) \ +{ \ + long __pu_tmp1, __pu_tmp2; \ + __asm__ __volatile__( \ + "1: ldq_u %1,0(%4)\n" \ + " insbl %3,%4,%2\n" \ + " mskbl %1,%4,%1\n" \ + " or %1,%2,%1\n" \ + "2: stq_u %1,0(%4)\n" \ + "3:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " lda $31, 3b-1b(%0)\n" \ + " .long 2b - .\n" \ + " lda $31, 3b-2b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err), \ + "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ + : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ +} +#endif + + +/* + * Complex access routines + */ + +/* This little bit of silliness is to get the GP loaded for a function + that ordinarily wouldn't. Otherwise we could have it done by the macro + directly, which can be optimized the linker. */ +#ifdef MODULE +#define __module_address(sym) "r"(sym), +#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym +#else +#define __module_address(sym) +#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" +#endif + +extern void __copy_user(void); + +extern inline long +__copy_tofrom_user_nocheck(void *to, const void *from, long len) +{ + register void * __cu_to __asm__("$6") = to; + register const void * __cu_from __asm__("$7") = from; + register long __cu_len __asm__("$0") = len; + + __asm__ __volatile__( + __module_call(28, 3, __copy_user) + : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) + : __module_address(__copy_user) + "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) + : "$1","$2","$3","$4","$5","$28","memory"); + + return __cu_len; +} + +extern inline long +__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate) +{ + if (__access_ok((unsigned long)validate, len, get_fs())) + len = __copy_tofrom_user_nocheck(to, from, len); + return len; +} + +#define __copy_to_user(to,from,n) \ +({ \ + __chk_user_ptr(to); \ + __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \ +}) +#define __copy_from_user(to,from,n) \ +({ \ + __chk_user_ptr(from); \ + __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \ +}) + +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + + +extern inline long +copy_to_user(void __user *to, const void *from, long n) +{ + return __copy_tofrom_user((__force void *)to, from, n, to); +} + +extern inline long +copy_from_user(void *to, const void __user *from, long n) +{ + return __copy_tofrom_user(to, (__force void *)from, n, from); +} + +extern void __do_clear_user(void); + +extern inline long +__clear_user(void __user *to, long len) +{ + register void __user * __cl_to __asm__("$6") = to; + register long __cl_len __asm__("$0") = len; + __asm__ __volatile__( + __module_call(28, 2, __do_clear_user) + : "=r"(__cl_len), "=r"(__cl_to) + : __module_address(__do_clear_user) + "0"(__cl_len), "1"(__cl_to) + : "$1","$2","$3","$4","$5","$28","memory"); + return __cl_len; +} + +extern inline long +clear_user(void __user *to, long len) +{ + if (__access_ok((unsigned long)to, len, get_fs())) + len = __clear_user(to, len); + return len; +} + +#undef __module_address +#undef __module_call + +/* Returns: -EFAULT if exception before terminator, N if the entire + buffer filled, else strlen. */ + +extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len); + +extern inline long +strncpy_from_user(char *to, const char __user *from, long n) +{ + long ret = -EFAULT; + if (__access_ok((unsigned long)from, 0, get_fs())) + ret = __strncpy_from_user(to, from, n); + return ret; +} + +/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ +extern long __strlen_user(const char __user *); + +extern inline long strlen_user(const char __user *str) +{ + return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0; +} + +/* Returns: 0 if exception before NUL or reaching the supplied limit (N), + * a value greater than N if the limit would be exceeded, else strlen. */ +extern long __strnlen_user(const char __user *, long); + +extern inline long strnlen_user(const char __user *str, long n) +{ + return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; +} + +/* + * About the exception table: + * + * - insn is a 32-bit pc-relative offset from the faulting insn. + * - nextinsn is a 16-bit offset off of the faulting instruction + * (not off of the *next* instruction as branches are). + * - errreg is the register in which to place -EFAULT. + * - valreg is the final target register for the load sequence + * and will be zeroed. + * + * Either errreg or valreg may be $31, in which case nothing happens. + * + * The exception fixup information "just so happens" to be arranged + * as in a MEM format instruction. This lets us emit our three + * values like so: + * + * lda valreg, nextinsn(errreg) + * + */ + +struct exception_table_entry +{ + signed int insn; + union exception_fixup { + unsigned unit; + struct { + signed int nextinsn : 16; + unsigned int errreg : 5; + unsigned int valreg : 5; + } bits; + } fixup; +}; + +/* Returns the new pc */ +#define fixup_exception(map_reg, fixup, pc) \ +({ \ + if ((fixup)->fixup.bits.valreg != 31) \ + map_reg((fixup)->fixup.bits.valreg) = 0; \ + if ((fixup)->fixup.bits.errreg != 31) \ + map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \ + (pc) + (fixup)->fixup.bits.nextinsn; \ +}) + + +#endif /* __ALPHA_UACCESS_H */ diff --git a/arch/alpha/include/asm/ucontext.h b/arch/alpha/include/asm/ucontext.h new file mode 100644 index 00000000000..47578ab4215 --- /dev/null +++ b/arch/alpha/include/asm/ucontext.h @@ -0,0 +1,13 @@ +#ifndef _ASMAXP_UCONTEXT_H +#define _ASMAXP_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + old_sigset_t uc_osf_sigmask; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* !_ASMAXP_UCONTEXT_H */ diff --git a/arch/alpha/include/asm/unaligned.h b/arch/alpha/include/asm/unaligned.h new file mode 100644 index 00000000000..3787c60aed3 --- /dev/null +++ b/arch/alpha/include/asm/unaligned.h @@ -0,0 +1,11 @@ +#ifndef _ASM_ALPHA_UNALIGNED_H +#define _ASM_ALPHA_UNALIGNED_H + +#include +#include +#include + +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le + +#endif /* _ASM_ALPHA_UNALIGNED_H */ diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h new file mode 100644 index 00000000000..5b5c1748594 --- /dev/null +++ b/arch/alpha/include/asm/unistd.h @@ -0,0 +1,464 @@ +#ifndef _ALPHA_UNISTD_H +#define _ALPHA_UNISTD_H + +#define __NR_osf_syscall 0 /* not implemented */ +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_osf_old_open 5 /* not implemented */ +#define __NR_close 6 +#define __NR_osf_wait4 7 +#define __NR_osf_old_creat 8 /* not implemented */ +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_osf_execve 11 /* not implemented */ +#define __NR_chdir 12 +#define __NR_fchdir 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_chown 16 +#define __NR_brk 17 +#define __NR_osf_getfsstat 18 /* not implemented */ +#define __NR_lseek 19 +#define __NR_getxpid 20 +#define __NR_osf_mount 21 +#define __NR_umount 22 +#define __NR_setuid 23 +#define __NR_getxuid 24 +#define __NR_exec_with_loader 25 /* not implemented */ +#define __NR_ptrace 26 +#define __NR_osf_nrecvmsg 27 /* not implemented */ +#define __NR_osf_nsendmsg 28 /* not implemented */ +#define __NR_osf_nrecvfrom 29 /* not implemented */ +#define __NR_osf_naccept 30 /* not implemented */ +#define __NR_osf_ngetpeername 31 /* not implemented */ +#define __NR_osf_ngetsockname 32 /* not implemented */ +#define __NR_access 33 +#define __NR_osf_chflags 34 /* not implemented */ +#define __NR_osf_fchflags 35 /* not implemented */ +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_osf_old_stat 38 /* not implemented */ +#define __NR_setpgid 39 +#define __NR_osf_old_lstat 40 /* not implemented */ +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_osf_set_program_attributes 43 +#define __NR_osf_profil 44 /* not implemented */ +#define __NR_open 45 +#define __NR_osf_old_sigaction 46 /* not implemented */ +#define __NR_getxgid 47 +#define __NR_osf_sigprocmask 48 +#define __NR_osf_getlogin 49 /* not implemented */ +#define __NR_osf_setlogin 50 /* not implemented */ +#define __NR_acct 51 +#define __NR_sigpending 52 + +#define __NR_ioctl 54 +#define __NR_osf_reboot 55 /* not implemented */ +#define __NR_osf_revoke 56 /* not implemented */ +#define __NR_symlink 57 +#define __NR_readlink 58 +#define __NR_execve 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_osf_old_fstat 62 /* not implemented */ +#define __NR_getpgrp 63 +#define __NR_getpagesize 64 +#define __NR_osf_mremap 65 /* not implemented */ +#define __NR_vfork 66 +#define __NR_stat 67 +#define __NR_lstat 68 +#define __NR_osf_sbrk 69 /* not implemented */ +#define __NR_osf_sstk 70 /* not implemented */ +#define __NR_mmap 71 /* OSF/1 mmap is superset of Linux */ +#define __NR_osf_old_vadvise 72 /* not implemented */ +#define __NR_munmap 73 +#define __NR_mprotect 74 +#define __NR_madvise 75 +#define __NR_vhangup 76 +#define __NR_osf_kmodcall 77 /* not implemented */ +#define __NR_osf_mincore 78 /* not implemented */ +#define __NR_getgroups 79 +#define __NR_setgroups 80 +#define __NR_osf_old_getpgrp 81 /* not implemented */ +#define __NR_setpgrp 82 /* BSD alias for setpgid */ +#define __NR_osf_setitimer 83 +#define __NR_osf_old_wait 84 /* not implemented */ +#define __NR_osf_table 85 /* not implemented */ +#define __NR_osf_getitimer 86 +#define __NR_gethostname 87 +#define __NR_sethostname 88 +#define __NR_getdtablesize 89 +#define __NR_dup2 90 +#define __NR_fstat 91 +#define __NR_fcntl 92 +#define __NR_osf_select 93 +#define __NR_poll 94 +#define __NR_fsync 95 +#define __NR_setpriority 96 +#define __NR_socket 97 +#define __NR_connect 98 +#define __NR_accept 99 +#define __NR_getpriority 100 +#define __NR_send 101 +#define __NR_recv 102 +#define __NR_sigreturn 103 +#define __NR_bind 104 +#define __NR_setsockopt 105 +#define __NR_listen 106 +#define __NR_osf_plock 107 /* not implemented */ +#define __NR_osf_old_sigvec 108 /* not implemented */ +#define __NR_osf_old_sigblock 109 /* not implemented */ +#define __NR_osf_old_sigsetmask 110 /* not implemented */ +#define __NR_sigsuspend 111 +#define __NR_osf_sigstack 112 +#define __NR_recvmsg 113 +#define __NR_sendmsg 114 +#define __NR_osf_old_vtrace 115 /* not implemented */ +#define __NR_osf_gettimeofday 116 +#define __NR_osf_getrusage 117 +#define __NR_getsockopt 118 + +#define __NR_readv 120 +#define __NR_writev 121 +#define __NR_osf_settimeofday 122 +#define __NR_fchown 123 +#define __NR_fchmod 124 +#define __NR_recvfrom 125 +#define __NR_setreuid 126 +#define __NR_setregid 127 +#define __NR_rename 128 +#define __NR_truncate 129 +#define __NR_ftruncate 130 +#define __NR_flock 131 +#define __NR_setgid 132 +#define __NR_sendto 133 +#define __NR_shutdown 134 +#define __NR_socketpair 135 +#define __NR_mkdir 136 +#define __NR_rmdir 137 +#define __NR_osf_utimes 138 +#define __NR_osf_old_sigreturn 139 /* not implemented */ +#define __NR_osf_adjtime 140 /* not implemented */ +#define __NR_getpeername 141 +#define __NR_osf_gethostid 142 /* not implemented */ +#define __NR_osf_sethostid 143 /* not implemented */ +#define __NR_getrlimit 144 +#define __NR_setrlimit 145 +#define __NR_osf_old_killpg 146 /* not implemented */ +#define __NR_setsid 147 +#define __NR_quotactl 148 +#define __NR_osf_oldquota 149 /* not implemented */ +#define __NR_getsockname 150 + +#define __NR_osf_pid_block 153 /* not implemented */ +#define __NR_osf_pid_unblock 154 /* not implemented */ + +#define __NR_sigaction 156 +#define __NR_osf_sigwaitprim 157 /* not implemented */ +#define __NR_osf_nfssvc 158 /* not implemented */ +#define __NR_osf_getdirentries 159 +#define __NR_osf_statfs 160 +#define __NR_osf_fstatfs 161 + +#define __NR_osf_asynch_daemon 163 /* not implemented */ +#define __NR_osf_getfh 164 /* not implemented */ +#define __NR_osf_getdomainname 165 +#define __NR_setdomainname 166 + +#define __NR_osf_exportfs 169 /* not implemented */ + +#define __NR_osf_alt_plock 181 /* not implemented */ + +#define __NR_osf_getmnt 184 /* not implemented */ + +#define __NR_osf_alt_sigpending 187 /* not implemented */ +#define __NR_osf_alt_setsid 188 /* not implemented */ + +#define __NR_osf_swapon 199 +#define __NR_msgctl 200 +#define __NR_msgget 201 +#define __NR_msgrcv 202 +#define __NR_msgsnd 203 +#define __NR_semctl 204 +#define __NR_semget 205 +#define __NR_semop 206 +#define __NR_osf_utsname 207 +#define __NR_lchown 208 +#define __NR_osf_shmat 209 +#define __NR_shmctl 210 +#define __NR_shmdt 211 +#define __NR_shmget 212 +#define __NR_osf_mvalid 213 /* not implemented */ +#define __NR_osf_getaddressconf 214 /* not implemented */ +#define __NR_osf_msleep 215 /* not implemented */ +#define __NR_osf_mwakeup 216 /* not implemented */ +#define __NR_msync 217 +#define __NR_osf_signal 218 /* not implemented */ +#define __NR_osf_utc_gettime 219 /* not implemented */ +#define __NR_osf_utc_adjtime 220 /* not implemented */ + +#define __NR_osf_security 222 /* not implemented */ +#define __NR_osf_kloadcall 223 /* not implemented */ + +#define __NR_getpgid 233 +#define __NR_getsid 234 +#define __NR_sigaltstack 235 +#define __NR_osf_waitid 236 /* not implemented */ +#define __NR_osf_priocntlset 237 /* not implemented */ +#define __NR_osf_sigsendset 238 /* not implemented */ +#define __NR_osf_set_speculative 239 /* not implemented */ +#define __NR_osf_msfs_syscall 240 /* not implemented */ +#define __NR_osf_sysinfo 241 +#define __NR_osf_uadmin 242 /* not implemented */ +#define __NR_osf_fuser 243 /* not implemented */ +#define __NR_osf_proplist_syscall 244 +#define __NR_osf_ntp_adjtime 245 /* not implemented */ +#define __NR_osf_ntp_gettime 246 /* not implemented */ +#define __NR_osf_pathconf 247 /* not implemented */ +#define __NR_osf_fpathconf 248 /* not implemented */ + +#define __NR_osf_uswitch 250 /* not implemented */ +#define __NR_osf_usleep_thread 251 +#define __NR_osf_audcntl 252 /* not implemented */ +#define __NR_osf_audgen 253 /* not implemented */ +#define __NR_sysfs 254 +#define __NR_osf_subsys_info 255 /* not implemented */ +#define __NR_osf_getsysinfo 256 +#define __NR_osf_setsysinfo 257 +#define __NR_osf_afs_syscall 258 /* not implemented */ +#define __NR_osf_swapctl 259 /* not implemented */ +#define __NR_osf_memcntl 260 /* not implemented */ +#define __NR_osf_fdatasync 261 /* not implemented */ + +/* + * Ignore legacy syscalls that we don't use. + */ +#define __IGNORE_alarm +#define __IGNORE_creat +#define __IGNORE_getegid +#define __IGNORE_geteuid +#define __IGNORE_getgid +#define __IGNORE_getpid +#define __IGNORE_getppid +#define __IGNORE_getuid +#define __IGNORE_pause +#define __IGNORE_time +#define __IGNORE_utime + +/* + * Linux-specific system calls begin at 300 + */ +#define __NR_bdflush 300 +#define __NR_sethae 301 +#define __NR_mount 302 +#define __NR_old_adjtimex 303 +#define __NR_swapoff 304 +#define __NR_getdents 305 +#define __NR_create_module 306 +#define __NR_init_module 307 +#define __NR_delete_module 308 +#define __NR_get_kernel_syms 309 +#define __NR_syslog 310 +#define __NR_reboot 311 +#define __NR_clone 312 +#define __NR_uselib 313 +#define __NR_mlock 314 +#define __NR_munlock 315 +#define __NR_mlockall 316 +#define __NR_munlockall 317 +#define __NR_sysinfo 318 +#define __NR__sysctl 319 +/* 320 was sys_idle. */ +#define __NR_oldumount 321 +#define __NR_swapon 322 +#define __NR_times 323 +#define __NR_personality 324 +#define __NR_setfsuid 325 +#define __NR_setfsgid 326 +#define __NR_ustat 327 +#define __NR_statfs 328 +#define __NR_fstatfs 329 +#define __NR_sched_setparam 330 +#define __NR_sched_getparam 331 +#define __NR_sched_setscheduler 332 +#define __NR_sched_getscheduler 333 +#define __NR_sched_yield 334 +#define __NR_sched_get_priority_max 335 +#define __NR_sched_get_priority_min 336 +#define __NR_sched_rr_get_interval 337 +#define __NR_afs_syscall 338 +#define __NR_uname 339 +#define __NR_nanosleep 340 +#define __NR_mremap 341 +#define __NR_nfsservctl 342 +#define __NR_setresuid 343 +#define __NR_getresuid 344 +#define __NR_pciconfig_read 345 +#define __NR_pciconfig_write 346 +#define __NR_query_module 347 +#define __NR_prctl 348 +#define __NR_pread64 349 +#define __NR_pwrite64 350 +#define __NR_rt_sigreturn 351 +#define __NR_rt_sigaction 352 +#define __NR_rt_sigprocmask 353 +#define __NR_rt_sigpending 354 +#define __NR_rt_sigtimedwait 355 +#define __NR_rt_sigqueueinfo 356 +#define __NR_rt_sigsuspend 357 +#define __NR_select 358 +#define __NR_gettimeofday 359 +#define __NR_settimeofday 360 +#define __NR_getitimer 361 +#define __NR_setitimer 362 +#define __NR_utimes 363 +#define __NR_getrusage 364 +#define __NR_wait4 365 +#define __NR_adjtimex 366 +#define __NR_getcwd 367 +#define __NR_capget 368 +#define __NR_capset 369 +#define __NR_sendfile 370 +#define __NR_setresgid 371 +#define __NR_getresgid 372 +#define __NR_dipc 373 +#define __NR_pivot_root 374 +#define __NR_mincore 375 +#define __NR_pciconfig_iobase 376 +#define __NR_getdents64 377 +#define __NR_gettid 378 +#define __NR_readahead 379 +/* 380 is unused */ +#define __NR_tkill 381 +#define __NR_setxattr 382 +#define __NR_lsetxattr 383 +#define __NR_fsetxattr 384 +#define __NR_getxattr 385 +#define __NR_lgetxattr 386 +#define __NR_fgetxattr 387 +#define __NR_listxattr 388 +#define __NR_llistxattr 389 +#define __NR_flistxattr 390 +#define __NR_removexattr 391 +#define __NR_lremovexattr 392 +#define __NR_fremovexattr 393 +#define __NR_futex 394 +#define __NR_sched_setaffinity 395 +#define __NR_sched_getaffinity 396 +#define __NR_tuxcall 397 +#define __NR_io_setup 398 +#define __NR_io_destroy 399 +#define __NR_io_getevents 400 +#define __NR_io_submit 401 +#define __NR_io_cancel 402 +#define __NR_exit_group 405 +#define __NR_lookup_dcookie 406 +#define __NR_epoll_create 407 +#define __NR_epoll_ctl 408 +#define __NR_epoll_wait 409 +/* Feb 2007: These three sys_epoll defines shouldn't be here but culling + * them would break userspace apps ... we'll kill them off in 2010 :) */ +#define __NR_sys_epoll_create __NR_epoll_create +#define __NR_sys_epoll_ctl __NR_epoll_ctl +#define __NR_sys_epoll_wait __NR_epoll_wait +#define __NR_remap_file_pages 410 +#define __NR_set_tid_address 411 +#define __NR_restart_syscall 412 +#define __NR_fadvise64 413 +#define __NR_timer_create 414 +#define __NR_timer_settime 415 +#define __NR_timer_gettime 416 +#define __NR_timer_getoverrun 417 +#define __NR_timer_delete 418 +#define __NR_clock_settime 419 +#define __NR_clock_gettime 420 +#define __NR_clock_getres 421 +#define __NR_clock_nanosleep 422 +#define __NR_semtimedop 423 +#define __NR_tgkill 424 +#define __NR_stat64 425 +#define __NR_lstat64 426 +#define __NR_fstat64 427 +#define __NR_vserver 428 +#define __NR_mbind 429 +#define __NR_get_mempolicy 430 +#define __NR_set_mempolicy 431 +#define __NR_mq_open 432 +#define __NR_mq_unlink 433 +#define __NR_mq_timedsend 434 +#define __NR_mq_timedreceive 435 +#define __NR_mq_notify 436 +#define __NR_mq_getsetattr 437 +#define __NR_waitid 438 +#define __NR_add_key 439 +#define __NR_request_key 440 +#define __NR_keyctl 441 +#define __NR_ioprio_set 442 +#define __NR_ioprio_get 443 +#define __NR_inotify_init 444 +#define __NR_inotify_add_watch 445 +#define __NR_inotify_rm_watch 446 +#define __NR_fdatasync 447 +#define __NR_kexec_load 448 +#define __NR_migrate_pages 449 +#define __NR_openat 450 +#define __NR_mkdirat 451 +#define __NR_mknodat 452 +#define __NR_fchownat 453 +#define __NR_futimesat 454 +#define __NR_fstatat64 455 +#define __NR_unlinkat 456 +#define __NR_renameat 457 +#define __NR_linkat 458 +#define __NR_symlinkat 459 +#define __NR_readlinkat 460 +#define __NR_fchmodat 461 +#define __NR_faccessat 462 +#define __NR_pselect6 463 +#define __NR_ppoll 464 +#define __NR_unshare 465 +#define __NR_set_robust_list 466 +#define __NR_get_robust_list 467 +#define __NR_splice 468 +#define __NR_sync_file_range 469 +#define __NR_tee 470 +#define __NR_vmsplice 471 +#define __NR_move_pages 472 +#define __NR_getcpu 473 +#define __NR_epoll_pwait 474 +#define __NR_utimensat 475 +#define __NR_signalfd 476 +#define __NR_timerfd 477 +#define __NR_eventfd 478 + +#ifdef __KERNEL__ + +#define NR_SYSCALLS 479 + +#define __ARCH_WANT_IPC_PARSE_VERSION +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING + +/* "Conditional" syscalls. What we want is + + __attribute__((weak,alias("sys_ni_syscall"))) + + but that raises the problem of what type to give the symbol. If we use + a prototype, it'll conflict with the definition given in this file and + others. If we use __typeof, we discover that not all symbols actually + have declarations. If we use no prototype, then we get warnings from + -Wstrict-prototypes. Ho hum. */ + +#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall") + +#endif /* __KERNEL__ */ +#endif /* _ALPHA_UNISTD_H */ diff --git a/arch/alpha/include/asm/user.h b/arch/alpha/include/asm/user.h new file mode 100644 index 00000000000..a4eb6a4ca8d --- /dev/null +++ b/arch/alpha/include/asm/user.h @@ -0,0 +1,53 @@ +#ifndef _ALPHA_USER_H +#define _ALPHA_USER_H + +#include +#include + +#include +#include + +/* + * Core file format: The core file is written in such a way that gdb + * can understand it and provide useful information to the user (under + * linux we use the `trad-core' bfd, NOT the osf-core). The file contents + * are as follows: + * + * upage: 1 page consisting of a user struct that tells gdb + * what is present in the file. Directly after this is a + * copy of the task_struct, which is currently not used by gdb, + * but it may come in handy at some point. All of the registers + * are stored as part of the upage. The upage should always be + * only one page long. + * data: The data segment follows next. We use current->end_text to + * current->brk to pick up all of the user variables, plus any memory + * that may have been sbrk'ed. No attempt is made to determine if a + * page is demand-zero or if a page is totally unused, we just cover + * the entire range. All of the addresses are rounded in such a way + * that an integral number of pages is written. + * stack: We need the stack information in order to get a meaningful + * backtrace. We need to write the data from usp to + * current->start_stack, so we round each of these in order to be able + * to write an integer number of pages. + */ +struct user { + unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */ + size_t u_tsize; /* text size (pages) */ + size_t u_dsize; /* data size (pages) */ + size_t u_ssize; /* stack size (pages) */ + unsigned long start_code; /* text starting address */ + unsigned long start_data; /* data starting address */ + unsigned long start_stack; /* stack starting address */ + long int signal; /* signal causing core dump */ + unsigned long u_ar0; /* help gdb find registers */ + unsigned long magic; /* identifies a core file */ + char u_comm[32]; /* user command name */ +}; + +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_DATA_START_ADDR (u.start_data) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif /* _ALPHA_USER_H */ diff --git a/arch/alpha/include/asm/vga.h b/arch/alpha/include/asm/vga.h new file mode 100644 index 00000000000..c00106bac52 --- /dev/null +++ b/arch/alpha/include/asm/vga.h @@ -0,0 +1,82 @@ +/* + * Access to VGA videoram + * + * (c) 1998 Martin Mares + */ + +#ifndef _LINUX_ASM_VGA_H_ +#define _LINUX_ASM_VGA_H_ + +#include + +#define VT_BUF_HAVE_RW +#define VT_BUF_HAVE_MEMSETW +#define VT_BUF_HAVE_MEMCPYW + +static inline void scr_writew(u16 val, volatile u16 *addr) +{ + if (__is_ioaddr(addr)) + __raw_writew(val, (volatile u16 __iomem *) addr); + else + *addr = val; +} + +static inline u16 scr_readw(volatile const u16 *addr) +{ + if (__is_ioaddr(addr)) + return __raw_readw((volatile const u16 __iomem *) addr); + else + return *addr; +} + +static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) +{ + if (__is_ioaddr(s)) + memsetw_io((u16 __iomem *) s, c, count); + else + memsetw(s, c, count); +} + +/* Do not trust that the usage will be correct; analyze the arguments. */ +extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count); + +/* ??? These are currently only used for downloading character sets. As + such, they don't need memory barriers. Is this all they are intended + to be used for? */ +#define vga_readb(a) readb((u8 __iomem *)(a)) +#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a)) + +#ifdef CONFIG_VGA_HOSE +#include +#include + +extern struct pci_controller *pci_vga_hose; + +# define __is_port_vga(a) \ + (((a) >= 0x3b0) && ((a) < 0x3e0) && \ + ((a) != 0x3b3) && ((a) != 0x3d3)) + +# define __is_mem_vga(a) \ + (((a) >= 0xa0000) && ((a) <= 0xc0000)) + +# define FIXUP_IOADDR_VGA(a) do { \ + if (pci_vga_hose && __is_port_vga(a)) \ + (a) += pci_vga_hose->io_space->start; \ + } while(0) + +# define FIXUP_MEMADDR_VGA(a) do { \ + if (pci_vga_hose && __is_mem_vga(a)) \ + (a) += pci_vga_hose->mem_space->start; \ + } while(0) + +#else /* CONFIG_VGA_HOSE */ +# define pci_vga_hose 0 +# define __is_port_vga(a) 0 +# define __is_mem_vga(a) 0 +# define FIXUP_IOADDR_VGA(a) +# define FIXUP_MEMADDR_VGA(a) +#endif /* CONFIG_VGA_HOSE */ + +#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(x, s)) + +#endif diff --git a/arch/alpha/include/asm/xor.h b/arch/alpha/include/asm/xor.h new file mode 100644 index 00000000000..5ee1c2bc049 --- /dev/null +++ b/arch/alpha/include/asm/xor.h @@ -0,0 +1,855 @@ +/* + * include/asm-alpha/xor.h + * + * Optimized RAID-5 checksumming functions for alpha EV5 and EV6 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +extern void xor_alpha_2(unsigned long, unsigned long *, unsigned long *); +extern void xor_alpha_3(unsigned long, unsigned long *, unsigned long *, + unsigned long *); +extern void xor_alpha_4(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *); +extern void xor_alpha_5(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *, unsigned long *); + +extern void xor_alpha_prefetch_2(unsigned long, unsigned long *, + unsigned long *); +extern void xor_alpha_prefetch_3(unsigned long, unsigned long *, + unsigned long *, unsigned long *); +extern void xor_alpha_prefetch_4(unsigned long, unsigned long *, + unsigned long *, unsigned long *, + unsigned long *); +extern void xor_alpha_prefetch_5(unsigned long, unsigned long *, + unsigned long *, unsigned long *, + unsigned long *, unsigned long *); + +asm(" \n\ + .text \n\ + .align 3 \n\ + .ent xor_alpha_2 \n\ +xor_alpha_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +2: \n\ + ldq $0,0($17) \n\ + ldq $1,0($18) \n\ + ldq $2,8($17) \n\ + ldq $3,8($18) \n\ + \n\ + ldq $4,16($17) \n\ + ldq $5,16($18) \n\ + ldq $6,24($17) \n\ + ldq $7,24($18) \n\ + \n\ + ldq $19,32($17) \n\ + ldq $20,32($18) \n\ + ldq $21,40($17) \n\ + ldq $22,40($18) \n\ + \n\ + ldq $23,48($17) \n\ + ldq $24,48($18) \n\ + ldq $25,56($17) \n\ + xor $0,$1,$0 # 7 cycles from $1 load \n\ + \n\ + ldq $27,56($18) \n\ + xor $2,$3,$2 \n\ + stq $0,0($17) \n\ + xor $4,$5,$4 \n\ + \n\ + stq $2,8($17) \n\ + xor $6,$7,$6 \n\ + stq $4,16($17) \n\ + xor $19,$20,$19 \n\ + \n\ + stq $6,24($17) \n\ + xor $21,$22,$21 \n\ + stq $19,32($17) \n\ + xor $23,$24,$23 \n\ + \n\ + stq $21,40($17) \n\ + xor $25,$27,$25 \n\ + stq $23,48($17) \n\ + subq $16,1,$16 \n\ + \n\ + stq $25,56($17) \n\ + addq $17,64,$17 \n\ + addq $18,64,$18 \n\ + bgt $16,2b \n\ + \n\ + ret \n\ + .end xor_alpha_2 \n\ + \n\ + .align 3 \n\ + .ent xor_alpha_3 \n\ +xor_alpha_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +3: \n\ + ldq $0,0($17) \n\ + ldq $1,0($18) \n\ + ldq $2,0($19) \n\ + ldq $3,8($17) \n\ + \n\ + ldq $4,8($18) \n\ + ldq $6,16($17) \n\ + ldq $7,16($18) \n\ + ldq $21,24($17) \n\ + \n\ + ldq $22,24($18) \n\ + ldq $24,32($17) \n\ + ldq $25,32($18) \n\ + ldq $5,8($19) \n\ + \n\ + ldq $20,16($19) \n\ + ldq $23,24($19) \n\ + ldq $27,32($19) \n\ + nop \n\ + \n\ + xor $0,$1,$1 # 8 cycles from $0 load \n\ + xor $3,$4,$4 # 6 cycles from $4 load \n\ + xor $6,$7,$7 # 6 cycles from $7 load \n\ + xor $21,$22,$22 # 5 cycles from $22 load \n\ + \n\ + xor $1,$2,$2 # 9 cycles from $2 load \n\ + xor $24,$25,$25 # 5 cycles from $25 load \n\ + stq $2,0($17) \n\ + xor $4,$5,$5 # 6 cycles from $5 load \n\ + \n\ + stq $5,8($17) \n\ + xor $7,$20,$20 # 7 cycles from $20 load \n\ + stq $20,16($17) \n\ + xor $22,$23,$23 # 7 cycles from $23 load \n\ + \n\ + stq $23,24($17) \n\ + xor $25,$27,$27 # 7 cycles from $27 load \n\ + stq $27,32($17) \n\ + nop \n\ + \n\ + ldq $0,40($17) \n\ + ldq $1,40($18) \n\ + ldq $3,48($17) \n\ + ldq $4,48($18) \n\ + \n\ + ldq $6,56($17) \n\ + ldq $7,56($18) \n\ + ldq $2,40($19) \n\ + ldq $5,48($19) \n\ + \n\ + ldq $20,56($19) \n\ + xor $0,$1,$1 # 4 cycles from $1 load \n\ + xor $3,$4,$4 # 5 cycles from $4 load \n\ + xor $6,$7,$7 # 5 cycles from $7 load \n\ + \n\ + xor $1,$2,$2 # 4 cycles from $2 load \n\ + xor $4,$5,$5 # 5 cycles from $5 load \n\ + stq $2,40($17) \n\ + xor $7,$20,$20 # 4 cycles from $20 load \n\ + \n\ + stq $5,48($17) \n\ + subq $16,1,$16 \n\ + stq $20,56($17) \n\ + addq $19,64,$19 \n\ + \n\ + addq $18,64,$18 \n\ + addq $17,64,$17 \n\ + bgt $16,3b \n\ + ret \n\ + .end xor_alpha_3 \n\ + \n\ + .align 3 \n\ + .ent xor_alpha_4 \n\ +xor_alpha_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +4: \n\ + ldq $0,0($17) \n\ + ldq $1,0($18) \n\ + ldq $2,0($19) \n\ + ldq $3,0($20) \n\ + \n\ + ldq $4,8($17) \n\ + ldq $5,8($18) \n\ + ldq $6,8($19) \n\ + ldq $7,8($20) \n\ + \n\ + ldq $21,16($17) \n\ + ldq $22,16($18) \n\ + ldq $23,16($19) \n\ + ldq $24,16($20) \n\ + \n\ + ldq $25,24($17) \n\ + xor $0,$1,$1 # 6 cycles from $1 load \n\ + ldq $27,24($18) \n\ + xor $2,$3,$3 # 6 cycles from $3 load \n\ + \n\ + ldq $0,24($19) \n\ + xor $1,$3,$3 \n\ + ldq $1,24($20) \n\ + xor $4,$5,$5 # 7 cycles from $5 load \n\ + \n\ + stq $3,0($17) \n\ + xor $6,$7,$7 \n\ + xor $21,$22,$22 # 7 cycles from $22 load \n\ + xor $5,$7,$7 \n\ + \n\ + stq $7,8($17) \n\ + xor $23,$24,$24 # 7 cycles from $24 load \n\ + ldq $2,32($17) \n\ + xor $22,$24,$24 \n\ + \n\ + ldq $3,32($18) \n\ + ldq $4,32($19) \n\ + ldq $5,32($20) \n\ + xor $25,$27,$27 # 8 cycles from $27 load \n\ + \n\ + ldq $6,40($17) \n\ + ldq $7,40($18) \n\ + ldq $21,40($19) \n\ + ldq $22,40($20) \n\ + \n\ + stq $24,16($17) \n\ + xor $0,$1,$1 # 9 cycles from $1 load \n\ + xor $2,$3,$3 # 5 cycles from $3 load \n\ + xor $27,$1,$1 \n\ + \n\ + stq $1,24($17) \n\ + xor $4,$5,$5 # 5 cycles from $5 load \n\ + ldq $23,48($17) \n\ + ldq $24,48($18) \n\ + \n\ + ldq $25,48($19) \n\ + xor $3,$5,$5 \n\ + ldq $27,48($20) \n\ + ldq $0,56($17) \n\ + \n\ + ldq $1,56($18) \n\ + ldq $2,56($19) \n\ + xor $6,$7,$7 # 8 cycles from $6 load \n\ + ldq $3,56($20) \n\ + \n\ + stq $5,32($17) \n\ + xor $21,$22,$22 # 8 cycles from $22 load \n\ + xor $7,$22,$22 \n\ + xor $23,$24,$24 # 5 cycles from $24 load \n\ + \n\ + stq $22,40($17) \n\ + xor $25,$27,$27 # 5 cycles from $27 load \n\ + xor $24,$27,$27 \n\ + xor $0,$1,$1 # 5 cycles from $1 load \n\ + \n\ + stq $27,48($17) \n\ + xor $2,$3,$3 # 4 cycles from $3 load \n\ + xor $1,$3,$3 \n\ + subq $16,1,$16 \n\ + \n\ + stq $3,56($17) \n\ + addq $20,64,$20 \n\ + addq $19,64,$19 \n\ + addq $18,64,$18 \n\ + \n\ + addq $17,64,$17 \n\ + bgt $16,4b \n\ + ret \n\ + .end xor_alpha_4 \n\ + \n\ + .align 3 \n\ + .ent xor_alpha_5 \n\ +xor_alpha_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +5: \n\ + ldq $0,0($17) \n\ + ldq $1,0($18) \n\ + ldq $2,0($19) \n\ + ldq $3,0($20) \n\ + \n\ + ldq $4,0($21) \n\ + ldq $5,8($17) \n\ + ldq $6,8($18) \n\ + ldq $7,8($19) \n\ + \n\ + ldq $22,8($20) \n\ + ldq $23,8($21) \n\ + ldq $24,16($17) \n\ + ldq $25,16($18) \n\ + \n\ + ldq $27,16($19) \n\ + xor $0,$1,$1 # 6 cycles from $1 load \n\ + ldq $28,16($20) \n\ + xor $2,$3,$3 # 6 cycles from $3 load \n\ + \n\ + ldq $0,16($21) \n\ + xor $1,$3,$3 \n\ + ldq $1,24($17) \n\ + xor $3,$4,$4 # 7 cycles from $4 load \n\ + \n\ + stq $4,0($17) \n\ + xor $5,$6,$6 # 7 cycles from $6 load \n\ + xor $7,$22,$22 # 7 cycles from $22 load \n\ + xor $6,$23,$23 # 7 cycles from $23 load \n\ + \n\ + ldq $2,24($18) \n\ + xor $22,$23,$23 \n\ + ldq $3,24($19) \n\ + xor $24,$25,$25 # 8 cycles from $25 load \n\ + \n\ + stq $23,8($17) \n\ + xor $25,$27,$27 # 8 cycles from $27 load \n\ + ldq $4,24($20) \n\ + xor $28,$0,$0 # 7 cycles from $0 load \n\ + \n\ + ldq $5,24($21) \n\ + xor $27,$0,$0 \n\ + ldq $6,32($17) \n\ + ldq $7,32($18) \n\ + \n\ + stq $0,16($17) \n\ + xor $1,$2,$2 # 6 cycles from $2 load \n\ + ldq $22,32($19) \n\ + xor $3,$4,$4 # 4 cycles from $4 load \n\ + \n\ + ldq $23,32($20) \n\ + xor $2,$4,$4 \n\ + ldq $24,32($21) \n\ + ldq $25,40($17) \n\ + \n\ + ldq $27,40($18) \n\ + ldq $28,40($19) \n\ + ldq $0,40($20) \n\ + xor $4,$5,$5 # 7 cycles from $5 load \n\ + \n\ + stq $5,24($17) \n\ + xor $6,$7,$7 # 7 cycles from $7 load \n\ + ldq $1,40($21) \n\ + ldq $2,48($17) \n\ + \n\ + ldq $3,48($18) \n\ + xor $7,$22,$22 # 7 cycles from $22 load \n\ + ldq $4,48($19) \n\ + xor $23,$24,$24 # 6 cycles from $24 load \n\ + \n\ + ldq $5,48($20) \n\ + xor $22,$24,$24 \n\ + ldq $6,48($21) \n\ + xor $25,$27,$27 # 7 cycles from $27 load \n\ + \n\ + stq $24,32($17) \n\ + xor $27,$28,$28 # 8 cycles from $28 load \n\ + ldq $7,56($17) \n\ + xor $0,$1,$1 # 6 cycles from $1 load \n\ + \n\ + ldq $22,56($18) \n\ + ldq $23,56($19) \n\ + ldq $24,56($20) \n\ + ldq $25,56($21) \n\ + \n\ + xor $28,$1,$1 \n\ + xor $2,$3,$3 # 9 cycles from $3 load \n\ + xor $3,$4,$4 # 9 cycles from $4 load \n\ + xor $5,$6,$6 # 8 cycles from $6 load \n\ + \n\ + stq $1,40($17) \n\ + xor $4,$6,$6 \n\ + xor $7,$22,$22 # 7 cycles from $22 load \n\ + xor $23,$24,$24 # 6 cycles from $24 load \n\ + \n\ + stq $6,48($17) \n\ + xor $22,$24,$24 \n\ + subq $16,1,$16 \n\ + xor $24,$25,$25 # 8 cycles from $25 load \n\ + \n\ + stq $25,56($17) \n\ + addq $21,64,$21 \n\ + addq $20,64,$20 \n\ + addq $19,64,$19 \n\ + \n\ + addq $18,64,$18 \n\ + addq $17,64,$17 \n\ + bgt $16,5b \n\ + ret \n\ + .end xor_alpha_5 \n\ + \n\ + .align 3 \n\ + .ent xor_alpha_prefetch_2 \n\ +xor_alpha_prefetch_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + ldq $31, 0($17) \n\ + ldq $31, 0($18) \n\ + \n\ + ldq $31, 64($17) \n\ + ldq $31, 64($18) \n\ + \n\ + ldq $31, 128($17) \n\ + ldq $31, 128($18) \n\ + \n\ + ldq $31, 192($17) \n\ + ldq $31, 192($18) \n\ + .align 4 \n\ +2: \n\ + ldq $0,0($17) \n\ + ldq $1,0($18) \n\ + ldq $2,8($17) \n\ + ldq $3,8($18) \n\ + \n\ + ldq $4,16($17) \n\ + ldq $5,16($18) \n\ + ldq $6,24($17) \n\ + ldq $7,24($18) \n\ + \n\ + ldq $19,32($17) \n\ + ldq $20,32($18) \n\ + ldq $21,40($17) \n\ + ldq $22,40($18) \n\ + \n\ + ldq $23,48($17) \n\ + ldq $24,48($18) \n\ + ldq $25,56($17) \n\ + ldq $27,56($18) \n\ + \n\ + ldq $31,256($17) \n\ + xor $0,$1,$0 # 8 cycles from $1 load \n\ + ldq $31,256($18) \n\ + xor $2,$3,$2 \n\ + \n\ + stq $0,0($17) \n\ + xor $4,$5,$4 \n\ + stq $2,8($17) \n\ + xor $6,$7,$6 \n\ + \n\ + stq $4,16($17) \n\ + xor $19,$20,$19 \n\ + stq $6,24($17) \n\ + xor $21,$22,$21 \n\ + \n\ + stq $19,32($17) \n\ + xor $23,$24,$23 \n\ + stq $21,40($17) \n\ + xor $25,$27,$25 \n\ + \n\ + stq $23,48($17) \n\ + subq $16,1,$16 \n\ + stq $25,56($17) \n\ + addq $17,64,$17 \n\ + \n\ + addq $18,64,$18 \n\ + bgt $16,2b \n\ + ret \n\ + .end xor_alpha_prefetch_2 \n\ + \n\ + .align 3 \n\ + .ent xor_alpha_prefetch_3 \n\ +xor_alpha_prefetch_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + ldq $31, 0($17) \n\ + ldq $31, 0($18) \n\ + ldq $31, 0($19) \n\ + \n\ + ldq $31, 64($17) \n\ + ldq $31, 64($18) \n\ + ldq $31, 64($19) \n\ + \n\ + ldq $31, 128($17) \n\ + ldq $31, 128($18) \n\ + ldq $31, 128($19) \n\ + \n\ + ldq $31, 192($17) \n\ + ldq $31, 192($18) \n\ + ldq $31, 192($19) \n\ + .align 4 \n\ +3: \n\ + ldq $0,0($17) \n\ + ldq $1,0($18) \n\ + ldq $2,0($19) \n\ + ldq $3,8($17) \n\ + \n\ + ldq $4,8($18) \n\ + ldq $6,16($17) \n\ + ldq $7,16($18) \n\ + ldq $21,24($17) \n\ + \n\ + ldq $22,24($18) \n\ + ldq $24,32($17) \n\ + ldq $25,32($18) \n\ + ldq $5,8($19) \n\ + \n\ + ldq $20,16($19) \n\ + ldq $23,24($19) \n\ + ldq $27,32($19) \n\ + nop \n\ + \n\ + xor $0,$1,$1 # 8 cycles from $0 load \n\ + xor $3,$4,$4 # 7 cycles from $4 load \n\ + xor $6,$7,$7 # 6 cycles from $7 load \n\ + xor $21,$22,$22 # 5 cycles from $22 load \n\ + \n\ + xor $1,$2,$2 # 9 cycles from $2 load \n\ + xor $24,$25,$25 # 5 cycles from $25 load \n\ + stq $2,0($17) \n\ + xor $4,$5,$5 # 6 cycles from $5 load \n\ + \n\ + stq $5,8($17) \n\ + xor $7,$20,$20 # 7 cycles from $20 load \n\ + stq $20,16($17) \n\ + xor $22,$23,$23 # 7 cycles from $23 load \n\ + \n\ + stq $23,24($17) \n\ + xor $25,$27,$27 # 7 cycles from $27 load \n\ + stq $27,32($17) \n\ + nop \n\ + \n\ + ldq $0,40($17) \n\ + ldq $1,40($18) \n\ + ldq $3,48($17) \n\ + ldq $4,48($18) \n\ + \n\ + ldq $6,56($17) \n\ + ldq $7,56($18) \n\ + ldq $2,40($19) \n\ + ldq $5,48($19) \n\ + \n\ + ldq $20,56($19) \n\ + ldq $31,256($17) \n\ + ldq $31,256($18) \n\ + ldq $31,256($19) \n\ + \n\ + xor $0,$1,$1 # 6 cycles from $1 load \n\ + xor $3,$4,$4 # 5 cycles from $4 load \n\ + xor $6,$7,$7 # 5 cycles from $7 load \n\ + xor $1,$2,$2 # 4 cycles from $2 load \n\ + \n\ + xor $4,$5,$5 # 5 cycles from $5 load \n\ + xor $7,$20,$20 # 4 cycles from $20 load \n\ + stq $2,40($17) \n\ + subq $16,1,$16 \n\ + \n\ + stq $5,48($17) \n\ + addq $19,64,$19 \n\ + stq $20,56($17) \n\ + addq $18,64,$18 \n\ + \n\ + addq $17,64,$17 \n\ + bgt $16,3b \n\ + ret \n\ + .end xor_alpha_prefetch_3 \n\ + \n\ + .align 3 \n\ + .ent xor_alpha_prefetch_4 \n\ +xor_alpha_prefetch_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + ldq $31, 0($17) \n\ + ldq $31, 0($18) \n\ + ldq $31, 0($19) \n\ + ldq $31, 0($20) \n\ + \n\ + ldq $31, 64($17) \n\ + ldq $31, 64($18) \n\ + ldq $31, 64($19) \n\ + ldq $31, 64($20) \n\ + \n\ + ldq $31, 128($17) \n\ + ldq $31, 128($18) \n\ + ldq $31, 128($19) \n\ + ldq $31, 128($20) \n\ + \n\ + ldq $31, 192($17) \n\ + ldq $31, 192($18) \n\ + ldq $31, 192($19) \n\ + ldq $31, 192($20) \n\ + .align 4 \n\ +4: \n\ + ldq $0,0($17) \n\ + ldq $1,0($18) \n\ + ldq $2,0($19) \n\ + ldq $3,0($20) \n\ + \n\ + ldq $4,8($17) \n\ + ldq $5,8($18) \n\ + ldq $6,8($19) \n\ + ldq $7,8($20) \n\ + \n\ + ldq $21,16($17) \n\ + ldq $22,16($18) \n\ + ldq $23,16($19) \n\ + ldq $24,16($20) \n\ + \n\ + ldq $25,24($17) \n\ + xor $0,$1,$1 # 6 cycles from $1 load \n\ + ldq $27,24($18) \n\ + xor $2,$3,$3 # 6 cycles from $3 load \n\ + \n\ + ldq $0,24($19) \n\ + xor $1,$3,$3 \n\ + ldq $1,24($20) \n\ + xor $4,$5,$5 # 7 cycles from $5 load \n\ + \n\ + stq $3,0($17) \n\ + xor $6,$7,$7 \n\ + xor $21,$22,$22 # 7 cycles from $22 load \n\ + xor $5,$7,$7 \n\ + \n\ + stq $7,8($17) \n\ + xor $23,$24,$24 # 7 cycles from $24 load \n\ + ldq $2,32($17) \n\ + xor $22,$24,$24 \n\ + \n\ + ldq $3,32($18) \n\ + ldq $4,32($19) \n\ + ldq $5,32($20) \n\ + xor $25,$27,$27 # 8 cycles from $27 load \n\ + \n\ + ldq $6,40($17) \n\ + ldq $7,40($18) \n\ + ldq $21,40($19) \n\ + ldq $22,40($20) \n\ + \n\ + stq $24,16($17) \n\ + xor $0,$1,$1 # 9 cycles from $1 load \n\ + xor $2,$3,$3 # 5 cycles from $3 load \n\ + xor $27,$1,$1 \n\ + \n\ + stq $1,24($17) \n\ + xor $4,$5,$5 # 5 cycles from $5 load \n\ + ldq $23,48($17) \n\ + xor $3,$5,$5 \n\ + \n\ + ldq $24,48($18) \n\ + ldq $25,48($19) \n\ + ldq $27,48($20) \n\ + ldq $0,56($17) \n\ + \n\ + ldq $1,56($18) \n\ + ldq $2,56($19) \n\ + ldq $3,56($20) \n\ + xor $6,$7,$7 # 8 cycles from $6 load \n\ + \n\ + ldq $31,256($17) \n\ + xor $21,$22,$22 # 8 cycles from $22 load \n\ + ldq $31,256($18) \n\ + xor $7,$22,$22 \n\ + \n\ + ldq $31,256($19) \n\ + xor $23,$24,$24 # 6 cycles from $24 load \n\ + ldq $31,256($20) \n\ + xor $25,$27,$27 # 6 cycles from $27 load \n\ + \n\ + stq $5,32($17) \n\ + xor $24,$27,$27 \n\ + xor $0,$1,$1 # 7 cycles from $1 load \n\ + xor $2,$3,$3 # 6 cycles from $3 load \n\ + \n\ + stq $22,40($17) \n\ + xor $1,$3,$3 \n\ + stq $27,48($17) \n\ + subq $16,1,$16 \n\ + \n\ + stq $3,56($17) \n\ + addq $20,64,$20 \n\ + addq $19,64,$19 \n\ + addq $18,64,$18 \n\ + \n\ + addq $17,64,$17 \n\ + bgt $16,4b \n\ + ret \n\ + .end xor_alpha_prefetch_4 \n\ + \n\ + .align 3 \n\ + .ent xor_alpha_prefetch_5 \n\ +xor_alpha_prefetch_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + ldq $31, 0($17) \n\ + ldq $31, 0($18) \n\ + ldq $31, 0($19) \n\ + ldq $31, 0($20) \n\ + ldq $31, 0($21) \n\ + \n\ + ldq $31, 64($17) \n\ + ldq $31, 64($18) \n\ + ldq $31, 64($19) \n\ + ldq $31, 64($20) \n\ + ldq $31, 64($21) \n\ + \n\ + ldq $31, 128($17) \n\ + ldq $31, 128($18) \n\ + ldq $31, 128($19) \n\ + ldq $31, 128($20) \n\ + ldq $31, 128($21) \n\ + \n\ + ldq $31, 192($17) \n\ + ldq $31, 192($18) \n\ + ldq $31, 192($19) \n\ + ldq $31, 192($20) \n\ + ldq $31, 192($21) \n\ + .align 4 \n\ +5: \n\ + ldq $0,0($17) \n\ + ldq $1,0($18) \n\ + ldq $2,0($19) \n\ + ldq $3,0($20) \n\ + \n\ + ldq $4,0($21) \n\ + ldq $5,8($17) \n\ + ldq $6,8($18) \n\ + ldq $7,8($19) \n\ + \n\ + ldq $22,8($20) \n\ + ldq $23,8($21) \n\ + ldq $24,16($17) \n\ + ldq $25,16($18) \n\ + \n\ + ldq $27,16($19) \n\ + xor $0,$1,$1 # 6 cycles from $1 load \n\ + ldq $28,16($20) \n\ + xor $2,$3,$3 # 6 cycles from $3 load \n\ + \n\ + ldq $0,16($21) \n\ + xor $1,$3,$3 \n\ + ldq $1,24($17) \n\ + xor $3,$4,$4 # 7 cycles from $4 load \n\ + \n\ + stq $4,0($17) \n\ + xor $5,$6,$6 # 7 cycles from $6 load \n\ + xor $7,$22,$22 # 7 cycles from $22 load \n\ + xor $6,$23,$23 # 7 cycles from $23 load \n\ + \n\ + ldq $2,24($18) \n\ + xor $22,$23,$23 \n\ + ldq $3,24($19) \n\ + xor $24,$25,$25 # 8 cycles from $25 load \n\ + \n\ + stq $23,8($17) \n\ + xor $25,$27,$27 # 8 cycles from $27 load \n\ + ldq $4,24($20) \n\ + xor $28,$0,$0 # 7 cycles from $0 load \n\ + \n\ + ldq $5,24($21) \n\ + xor $27,$0,$0 \n\ + ldq $6,32($17) \n\ + ldq $7,32($18) \n\ + \n\ + stq $0,16($17) \n\ + xor $1,$2,$2 # 6 cycles from $2 load \n\ + ldq $22,32($19) \n\ + xor $3,$4,$4 # 4 cycles from $4 load \n\ + \n\ + ldq $23,32($20) \n\ + xor $2,$4,$4 \n\ + ldq $24,32($21) \n\ + ldq $25,40($17) \n\ + \n\ + ldq $27,40($18) \n\ + ldq $28,40($19) \n\ + ldq $0,40($20) \n\ + xor $4,$5,$5 # 7 cycles from $5 load \n\ + \n\ + stq $5,24($17) \n\ + xor $6,$7,$7 # 7 cycles from $7 load \n\ + ldq $1,40($21) \n\ + ldq $2,48($17) \n\ + \n\ + ldq $3,48($18) \n\ + xor $7,$22,$22 # 7 cycles from $22 load \n\ + ldq $4,48($19) \n\ + xor $23,$24,$24 # 6 cycles from $24 load \n\ + \n\ + ldq $5,48($20) \n\ + xor $22,$24,$24 \n\ + ldq $6,48($21) \n\ + xor $25,$27,$27 # 7 cycles from $27 load \n\ + \n\ + stq $24,32($17) \n\ + xor $27,$28,$28 # 8 cycles from $28 load \n\ + ldq $7,56($17) \n\ + xor $0,$1,$1 # 6 cycles from $1 load \n\ + \n\ + ldq $22,56($18) \n\ + ldq $23,56($19) \n\ + ldq $24,56($20) \n\ + ldq $25,56($21) \n\ + \n\ + ldq $31,256($17) \n\ + xor $28,$1,$1 \n\ + ldq $31,256($18) \n\ + xor $2,$3,$3 # 9 cycles from $3 load \n\ + \n\ + ldq $31,256($19) \n\ + xor $3,$4,$4 # 9 cycles from $4 load \n\ + ldq $31,256($20) \n\ + xor $5,$6,$6 # 8 cycles from $6 load \n\ + \n\ + stq $1,40($17) \n\ + xor $4,$6,$6 \n\ + xor $7,$22,$22 # 7 cycles from $22 load \n\ + xor $23,$24,$24 # 6 cycles from $24 load \n\ + \n\ + stq $6,48($17) \n\ + xor $22,$24,$24 \n\ + ldq $31,256($21) \n\ + xor $24,$25,$25 # 8 cycles from $25 load \n\ + \n\ + stq $25,56($17) \n\ + subq $16,1,$16 \n\ + addq $21,64,$21 \n\ + addq $20,64,$20 \n\ + \n\ + addq $19,64,$19 \n\ + addq $18,64,$18 \n\ + addq $17,64,$17 \n\ + bgt $16,5b \n\ + \n\ + ret \n\ + .end xor_alpha_prefetch_5 \n\ +"); + +static struct xor_block_template xor_block_alpha = { + .name = "alpha", + .do_2 = xor_alpha_2, + .do_3 = xor_alpha_3, + .do_4 = xor_alpha_4, + .do_5 = xor_alpha_5, +}; + +static struct xor_block_template xor_block_alpha_prefetch = { + .name = "alpha prefetch", + .do_2 = xor_alpha_prefetch_2, + .do_3 = xor_alpha_prefetch_3, + .do_4 = xor_alpha_prefetch_4, + .do_5 = xor_alpha_prefetch_5, +}; + +/* For grins, also test the generic routines. */ +#include + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_alpha); \ + xor_speed(&xor_block_alpha_prefetch); \ + } while (0) + +/* Force the use of alpha_prefetch if EV6, as it is significantly + faster in the cold cache case. */ +#define XOR_SELECT_TEMPLATE(FASTEST) \ + (implver() == IMPLVER_EV6 ? &xor_block_alpha_prefetch : FASTEST) diff --git a/include/asm-alpha/8253pit.h b/include/asm-alpha/8253pit.h deleted file mode 100644 index fef5c1450e4..00000000000 --- a/include/asm-alpha/8253pit.h +++ /dev/null @@ -1,10 +0,0 @@ -/* - * 8253/8254 Programmable Interval Timer - */ - -#ifndef _8253PIT_H -#define _8253PIT_H - -#define PIT_TICK_RATE 1193180UL - -#endif diff --git a/include/asm-alpha/Kbuild b/include/asm-alpha/Kbuild deleted file mode 100644 index b7c8f188b31..00000000000 --- a/include/asm-alpha/Kbuild +++ /dev/null @@ -1,11 +0,0 @@ -include include/asm-generic/Kbuild.asm - -header-y += gentrap.h -header-y += regdef.h -header-y += pal.h -header-y += reg.h - -unifdef-y += console.h -unifdef-y += fpu.h -unifdef-y += sysinfo.h -unifdef-y += compiler.h diff --git a/include/asm-alpha/a.out-core.h b/include/asm-alpha/a.out-core.h deleted file mode 100644 index 9e33e92e524..00000000000 --- a/include/asm-alpha/a.out-core.h +++ /dev/null @@ -1,80 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_A_OUT_CORE_H -#define _ASM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include - -/* - * Fill in the user structure for an ECOFF core dump. - */ -static inline void aout_dump_thread(struct pt_regs *pt, struct user *dump) -{ - /* switch stack follows right below pt_regs: */ - struct switch_stack * sw = ((struct switch_stack *) pt) - 1; - - dump->magic = CMAGIC; - dump->start_code = current->mm->start_code; - dump->start_data = current->mm->start_data; - dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); - dump->u_tsize = ((current->mm->end_code - dump->start_code) - >> PAGE_SHIFT); - dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data) - >> PAGE_SHIFT); - dump->u_ssize = (current->mm->start_stack - dump->start_stack - + PAGE_SIZE-1) >> PAGE_SHIFT; - - /* - * We store the registers in an order/format that is - * compatible with DEC Unix/OSF/1 as this makes life easier - * for gdb. - */ - dump->regs[EF_V0] = pt->r0; - dump->regs[EF_T0] = pt->r1; - dump->regs[EF_T1] = pt->r2; - dump->regs[EF_T2] = pt->r3; - dump->regs[EF_T3] = pt->r4; - dump->regs[EF_T4] = pt->r5; - dump->regs[EF_T5] = pt->r6; - dump->regs[EF_T6] = pt->r7; - dump->regs[EF_T7] = pt->r8; - dump->regs[EF_S0] = sw->r9; - dump->regs[EF_S1] = sw->r10; - dump->regs[EF_S2] = sw->r11; - dump->regs[EF_S3] = sw->r12; - dump->regs[EF_S4] = sw->r13; - dump->regs[EF_S5] = sw->r14; - dump->regs[EF_S6] = sw->r15; - dump->regs[EF_A3] = pt->r19; - dump->regs[EF_A4] = pt->r20; - dump->regs[EF_A5] = pt->r21; - dump->regs[EF_T8] = pt->r22; - dump->regs[EF_T9] = pt->r23; - dump->regs[EF_T10] = pt->r24; - dump->regs[EF_T11] = pt->r25; - dump->regs[EF_RA] = pt->r26; - dump->regs[EF_T12] = pt->r27; - dump->regs[EF_AT] = pt->r28; - dump->regs[EF_SP] = rdusp(); - dump->regs[EF_PS] = pt->ps; - dump->regs[EF_PC] = pt->pc; - dump->regs[EF_GP] = pt->gp; - dump->regs[EF_A0] = pt->r16; - dump->regs[EF_A1] = pt->r17; - dump->regs[EF_A2] = pt->r18; - memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_A_OUT_CORE_H */ diff --git a/include/asm-alpha/a.out.h b/include/asm-alpha/a.out.h deleted file mode 100644 index 02ce8473870..00000000000 --- a/include/asm-alpha/a.out.h +++ /dev/null @@ -1,102 +0,0 @@ -#ifndef __ALPHA_A_OUT_H__ -#define __ALPHA_A_OUT_H__ - -#include - -/* - * OSF/1 ECOFF header structs. ECOFF files consist of: - * - a file header (struct filehdr), - * - an a.out header (struct aouthdr), - * - one or more section headers (struct scnhdr). - * The filhdr's "f_nscns" field contains the - * number of section headers. - */ - -struct filehdr -{ - /* OSF/1 "file" header */ - __u16 f_magic, f_nscns; - __u32 f_timdat; - __u64 f_symptr; - __u32 f_nsyms; - __u16 f_opthdr, f_flags; -}; - -struct aouthdr -{ - __u64 info; /* after that it looks quite normal.. */ - __u64 tsize; - __u64 dsize; - __u64 bsize; - __u64 entry; - __u64 text_start; /* with a few additions that actually make sense */ - __u64 data_start; - __u64 bss_start; - __u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */ - __u64 gpvalue; -}; - -struct scnhdr -{ - char s_name[8]; - __u64 s_paddr; - __u64 s_vaddr; - __u64 s_size; - __u64 s_scnptr; - __u64 s_relptr; - __u64 s_lnnoptr; - __u16 s_nreloc; - __u16 s_nlnno; - __u32 s_flags; -}; - -struct exec -{ - /* OSF/1 "file" header */ - struct filehdr fh; - struct aouthdr ah; -}; - -/* - * Define's so that the kernel exec code can access the a.out header - * fields... - */ -#define a_info ah.info -#define a_text ah.tsize -#define a_data ah.dsize -#define a_bss ah.bsize -#define a_entry ah.entry -#define a_textstart ah.text_start -#define a_datastart ah.data_start -#define a_bssstart ah.bss_start -#define a_gprmask ah.gprmask -#define a_fprmask ah.fprmask -#define a_gpvalue ah.gpvalue - -#define N_TXTADDR(x) ((x).a_textstart) -#define N_DATADDR(x) ((x).a_datastart) -#define N_BSSADDR(x) ((x).a_bssstart) -#define N_DRSIZE(x) 0 -#define N_TRSIZE(x) 0 -#define N_SYMSIZE(x) 0 - -#define AOUTHSZ sizeof(struct aouthdr) -#define SCNHSZ sizeof(struct scnhdr) -#define SCNROUND 16 - -#define N_TXTOFF(x) \ - ((long) N_MAGIC(x) == ZMAGIC ? 0 : \ - (sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1)) - -#ifdef __KERNEL__ - -/* Assume that start addresses below 4G belong to a TASO application. - Unfortunately, there is no proper bit in the exec header to check. - Worse, we have to notice the start address before swapping to use - /sbin/loader, which of course is _not_ a TASO application. */ -#define SET_AOUT_PERSONALITY(BFPM, EX) \ - set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000L \ - ? ADDR_LIMIT_32BIT : 0) | PER_OSF4)) - -#endif /* __KERNEL__ */ -#endif /* __A_OUT_GNU_H__ */ diff --git a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h deleted file mode 100644 index 26c17913529..00000000000 --- a/include/asm-alpha/agp.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef AGP_H -#define AGP_H 1 - -#include - -/* dummy for now */ - -#define map_page_into_agp(page) -#define unmap_page_from_agp(page) -#define flush_agp_cache() mb() - -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - -/* GATT allocation. Returns/accepts GATT kernel virtual address. */ -#define alloc_gatt_pages(order) \ - ((char *)__get_free_pages(GFP_KERNEL, (order))) -#define free_gatt_pages(table, order) \ - free_pages((unsigned long)(table), (order)) - -#endif diff --git a/include/asm-alpha/agp_backend.h b/include/asm-alpha/agp_backend.h deleted file mode 100644 index 55dd44a2cea..00000000000 --- a/include/asm-alpha/agp_backend.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _ALPHA_AGP_BACKEND_H -#define _ALPHA_AGP_BACKEND_H 1 - -typedef union _alpha_agp_mode { - struct { - u32 rate : 3; - u32 reserved0 : 1; - u32 fw : 1; - u32 fourgb : 1; - u32 reserved1 : 2; - u32 enable : 1; - u32 sba : 1; - u32 reserved2 : 14; - u32 rq : 8; - } bits; - u32 lw; -} alpha_agp_mode; - -typedef struct _alpha_agp_info { - struct pci_controller *hose; - struct { - dma_addr_t bus_base; - unsigned long size; - void *sysdata; - } aperture; - alpha_agp_mode capability; - alpha_agp_mode mode; - void *private; - struct alpha_agp_ops *ops; -} alpha_agp_info; - -struct alpha_agp_ops { - int (*setup)(alpha_agp_info *); - void (*cleanup)(alpha_agp_info *); - int (*configure)(alpha_agp_info *); - int (*bind)(alpha_agp_info *, off_t, struct agp_memory *); - int (*unbind)(alpha_agp_info *, off_t, struct agp_memory *); - unsigned long (*translate)(alpha_agp_info *, dma_addr_t); -}; - - -#endif /* _ALPHA_AGP_BACKEND_H */ diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h deleted file mode 100644 index ca88e54dec9..00000000000 --- a/include/asm-alpha/atomic.h +++ /dev/null @@ -1,267 +0,0 @@ -#ifndef _ALPHA_ATOMIC_H -#define _ALPHA_ATOMIC_H - -#include -#include - -/* - * Atomic operations that C can't guarantee us. Useful for - * resource counting etc... - * - * But use these as seldom as possible since they are much slower - * than regular operations. - */ - - -/* - * Counter is volatile to make sure gcc doesn't try to be clever - * and move things around on us. We need to use _exactly_ the address - * the user gave us, not some alias that contains the same information. - */ -typedef struct { volatile int counter; } atomic_t; -typedef struct { volatile long counter; } atomic64_t; - -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) -#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) - -#define atomic_read(v) ((v)->counter + 0) -#define atomic64_read(v) ((v)->counter + 0) - -#define atomic_set(v,i) ((v)->counter = (i)) -#define atomic64_set(v,i) ((v)->counter = (i)) - -/* - * To get proper branch prediction for the main line, we must branch - * forward to code at the end of this object's .text section, then - * branch back to restart the operation. - */ - -static __inline__ void atomic_add(int i, atomic_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " addl %0,%2,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - -static __inline__ void atomic64_add(long i, atomic64_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%2,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " subl %0,%2,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - -static __inline__ void atomic64_sub(long i, atomic64_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " subq %0,%2,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - - -/* - * Same as above, but return the result value - */ -static inline int atomic_add_return(int i, atomic_t *v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " addl %0,%3,%2\n" - " addl %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} - -static __inline__ long atomic64_add_return(long i, atomic64_t * v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " addq %0,%3,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} - -static __inline__ long atomic_sub_return(int i, atomic_t * v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " subl %0,%3,%2\n" - " subl %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} - -static __inline__ long atomic64_sub_return(long i, atomic64_t * v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " subq %0,%3,%2\n" - " subq %0,%3,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} - -#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) - -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -/** - * atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. - */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); -} - -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -/** - * atomic64_add_unless - add unless the number is a given value - * @v: pointer of type atomic64_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. - */ -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); -} - -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - -#define atomic_dec_return(v) atomic_sub_return(1,(v)) -#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) - -#define atomic_inc_return(v) atomic_add_return(1,(v)) -#define atomic64_inc_return(v) atomic64_add_return(1,(v)) - -#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) -#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) - -#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) -#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) - -#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) - -#define atomic_inc(v) atomic_add(1,(v)) -#define atomic64_inc(v) atomic64_add(1,(v)) - -#define atomic_dec(v) atomic_sub(1,(v)) -#define atomic64_dec(v) atomic64_sub(1,(v)) - -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - -#include -#endif /* _ALPHA_ATOMIC_H */ diff --git a/include/asm-alpha/auxvec.h b/include/asm-alpha/auxvec.h deleted file mode 100644 index e96fe880e31..00000000000 --- a/include/asm-alpha/auxvec.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef __ASM_ALPHA_AUXVEC_H -#define __ASM_ALPHA_AUXVEC_H - -/* Reserve these numbers for any future use of a VDSO. */ -#if 0 -#define AT_SYSINFO 32 -#define AT_SYSINFO_EHDR 33 -#endif - -/* More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the - value is -1, then the cache doesn't exist. Otherwise: - - bit 0-3: Cache set-associativity; 0 means fully associative. - bit 4-7: Log2 of cacheline size. - bit 8-31: Size of the entire cache >> 8. - bit 32-63: Reserved. -*/ - -#define AT_L1I_CACHESHAPE 34 -#define AT_L1D_CACHESHAPE 35 -#define AT_L2_CACHESHAPE 36 -#define AT_L3_CACHESHAPE 37 - -#endif /* __ASM_ALPHA_AUXVEC_H */ diff --git a/include/asm-alpha/barrier.h b/include/asm-alpha/barrier.h deleted file mode 100644 index ac78eba909b..00000000000 --- a/include/asm-alpha/barrier.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __BARRIER_H -#define __BARRIER_H - -#include - -#define mb() \ -__asm__ __volatile__("mb": : :"memory") - -#define rmb() \ -__asm__ __volatile__("mb": : :"memory") - -#define wmb() \ -__asm__ __volatile__("wmb": : :"memory") - -#define read_barrier_depends() \ -__asm__ __volatile__("mb": : :"memory") - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#endif - -#define set_mb(var, value) \ -do { var = value; mb(); } while (0) - -#endif /* __BARRIER_H */ diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h deleted file mode 100644 index 15f3ae25c51..00000000000 --- a/include/asm-alpha/bitops.h +++ /dev/null @@ -1,466 +0,0 @@ -#ifndef _ALPHA_BITOPS_H -#define _ALPHA_BITOPS_H - -#ifndef _LINUX_BITOPS_H -#error only can be included directly -#endif - -#include -#include - -/* - * Copyright 1994, Linus Torvalds. - */ - -/* - * These have to be done with inline assembly: that way the bit-setting - * is guaranteed to be atomic. All bit operations return 0 if the bit - * was cleared before the operation and != 0 if it was not. - * - * To get proper branch prediction for the main line, we must branch - * forward to code at the end of this object's .text section, then - * branch back to restart the operation. - * - * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). - */ - -static inline void -set_bit(unsigned long nr, volatile void * addr) -{ - unsigned long temp; - int *m = ((int *) addr) + (nr >> 5); - - __asm__ __volatile__( - "1: ldl_l %0,%3\n" - " bis %0,%2,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (*m) - :"Ir" (1UL << (nr & 31)), "m" (*m)); -} - -/* - * WARNING: non atomic version. - */ -static inline void -__set_bit(unsigned long nr, volatile void * addr) -{ - int *m = ((int *) addr) + (nr >> 5); - - *m |= 1 << (nr & 31); -} - -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() - -static inline void -clear_bit(unsigned long nr, volatile void * addr) -{ - unsigned long temp; - int *m = ((int *) addr) + (nr >> 5); - - __asm__ __volatile__( - "1: ldl_l %0,%3\n" - " bic %0,%2,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (*m) - :"Ir" (1UL << (nr & 31)), "m" (*m)); -} - -static inline void -clear_bit_unlock(unsigned long nr, volatile void * addr) -{ - smp_mb(); - clear_bit(nr, addr); -} - -/* - * WARNING: non atomic version. - */ -static __inline__ void -__clear_bit(unsigned long nr, volatile void * addr) -{ - int *m = ((int *) addr) + (nr >> 5); - - *m &= ~(1 << (nr & 31)); -} - -static inline void -__clear_bit_unlock(unsigned long nr, volatile void * addr) -{ - smp_mb(); - __clear_bit(nr, addr); -} - -static inline void -change_bit(unsigned long nr, volatile void * addr) -{ - unsigned long temp; - int *m = ((int *) addr) + (nr >> 5); - - __asm__ __volatile__( - "1: ldl_l %0,%3\n" - " xor %0,%2,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (*m) - :"Ir" (1UL << (nr & 31)), "m" (*m)); -} - -/* - * WARNING: non atomic version. - */ -static __inline__ void -__change_bit(unsigned long nr, volatile void * addr) -{ - int *m = ((int *) addr) + (nr >> 5); - - *m ^= 1 << (nr & 31); -} - -static inline int -test_and_set_bit(unsigned long nr, volatile void *addr) -{ - unsigned long oldbit; - unsigned long temp; - int *m = ((int *) addr) + (nr >> 5); - - __asm__ __volatile__( -#ifdef CONFIG_SMP - " mb\n" -#endif - "1: ldl_l %0,%4\n" - " and %0,%3,%2\n" - " bne %2,2f\n" - " xor %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,3f\n" - "2:\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "3: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (*m), "=&r" (oldbit) - :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); - - return oldbit != 0; -} - -static inline int -test_and_set_bit_lock(unsigned long nr, volatile void *addr) -{ - unsigned long oldbit; - unsigned long temp; - int *m = ((int *) addr) + (nr >> 5); - - __asm__ __volatile__( - "1: ldl_l %0,%4\n" - " and %0,%3,%2\n" - " bne %2,2f\n" - " xor %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,3f\n" - "2:\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "3: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (*m), "=&r" (oldbit) - :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); - - return oldbit != 0; -} - -/* - * WARNING: non atomic version. - */ -static inline int -__test_and_set_bit(unsigned long nr, volatile void * addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - int *m = ((int *) addr) + (nr >> 5); - int old = *m; - - *m = old | mask; - return (old & mask) != 0; -} - -static inline int -test_and_clear_bit(unsigned long nr, volatile void * addr) -{ - unsigned long oldbit; - unsigned long temp; - int *m = ((int *) addr) + (nr >> 5); - - __asm__ __volatile__( -#ifdef CONFIG_SMP - " mb\n" -#endif - "1: ldl_l %0,%4\n" - " and %0,%3,%2\n" - " beq %2,2f\n" - " xor %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,3f\n" - "2:\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "3: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (*m), "=&r" (oldbit) - :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); - - return oldbit != 0; -} - -/* - * WARNING: non atomic version. - */ -static inline int -__test_and_clear_bit(unsigned long nr, volatile void * addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - int *m = ((int *) addr) + (nr >> 5); - int old = *m; - - *m = old & ~mask; - return (old & mask) != 0; -} - -static inline int -test_and_change_bit(unsigned long nr, volatile void * addr) -{ - unsigned long oldbit; - unsigned long temp; - int *m = ((int *) addr) + (nr >> 5); - - __asm__ __volatile__( -#ifdef CONFIG_SMP - " mb\n" -#endif - "1: ldl_l %0,%4\n" - " and %0,%3,%2\n" - " xor %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "3: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (*m), "=&r" (oldbit) - :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); - - return oldbit != 0; -} - -/* - * WARNING: non atomic version. - */ -static __inline__ int -__test_and_change_bit(unsigned long nr, volatile void * addr) -{ - unsigned long mask = 1 << (nr & 0x1f); - int *m = ((int *) addr) + (nr >> 5); - int old = *m; - - *m = old ^ mask; - return (old & mask) != 0; -} - -static inline int -test_bit(int nr, const volatile void * addr) -{ - return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; -} - -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - * - * Do a binary search on the bits. Due to the nature of large - * constants on the alpha, it is worthwhile to split the search. - */ -static inline unsigned long ffz_b(unsigned long x) -{ - unsigned long sum, x1, x2, x4; - - x = ~x & -~x; /* set first 0 bit, clear others */ - x1 = x & 0xAA; - x2 = x & 0xCC; - x4 = x & 0xF0; - sum = x2 ? 2 : 0; - sum += (x4 != 0) * 4; - sum += (x1 != 0); - - return sum; -} - -static inline unsigned long ffz(unsigned long word) -{ -#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) - /* Whee. EV67 can calculate it directly. */ - return __kernel_cttz(~word); -#else - unsigned long bits, qofs, bofs; - - bits = __kernel_cmpbge(word, ~0UL); - qofs = ffz_b(bits); - bits = __kernel_extbl(word, qofs); - bofs = ffz_b(bits); - - return qofs*8 + bofs; -#endif -} - -/* - * __ffs = Find First set bit in word. Undefined if no set bit exists. - */ -static inline unsigned long __ffs(unsigned long word) -{ -#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) - /* Whee. EV67 can calculate it directly. */ - return __kernel_cttz(word); -#else - unsigned long bits, qofs, bofs; - - bits = __kernel_cmpbge(0, word); - qofs = ffz_b(bits); - bits = __kernel_extbl(word, qofs); - bofs = ffz_b(~bits); - - return qofs*8 + bofs; -#endif -} - -#ifdef __KERNEL__ - -/* - * ffs: find first bit set. This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above __ffs. - */ - -static inline int ffs(int word) -{ - int result = __ffs(word) + 1; - return word ? result : 0; -} - -/* - * fls: find last bit set. - */ -#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) -static inline int fls64(unsigned long word) -{ - return 64 - __kernel_ctlz(word); -} -#else -extern const unsigned char __flsm1_tab[256]; - -static inline int fls64(unsigned long x) -{ - unsigned long t, a, r; - - t = __kernel_cmpbge (x, 0x0101010101010101UL); - a = __flsm1_tab[t]; - t = __kernel_extbl (x, a); - r = a*8 + __flsm1_tab[t] + (x != 0); - - return r; -} -#endif - -static inline unsigned long __fls(unsigned long x) -{ - return fls64(x) - 1; -} - -static inline int fls(int x) -{ - return fls64((unsigned int) x); -} - -/* - * hweightN: returns the hamming weight (i.e. the number - * of bits set) of a N-bit word - */ - -#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) -/* Whee. EV67 can calculate it directly. */ -static inline unsigned long hweight64(unsigned long w) -{ - return __kernel_ctpop(w); -} - -static inline unsigned int hweight32(unsigned int w) -{ - return hweight64(w); -} - -static inline unsigned int hweight16(unsigned int w) -{ - return hweight64(w & 0xffff); -} - -static inline unsigned int hweight8(unsigned int w) -{ - return hweight64(w & 0xff); -} -#else -#include -#endif - -#endif /* __KERNEL__ */ - -#include - -#ifdef __KERNEL__ - -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is set. - */ -static inline unsigned long -sched_find_first_bit(unsigned long b[3]) -{ - unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; - unsigned long ofs; - - ofs = (b1 ? 64 : 128); - b1 = (b1 ? b1 : b2); - ofs = (b0 ? 0 : ofs); - b0 = (b0 ? b0 : b1); - - return __ffs(b0) + ofs; -} - -#include - -#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) -#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) - -#include - -#endif /* __KERNEL__ */ - -#endif /* _ALPHA_BITOPS_H */ diff --git a/include/asm-alpha/bug.h b/include/asm-alpha/bug.h deleted file mode 100644 index 695a5ee4b5d..00000000000 --- a/include/asm-alpha/bug.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _ALPHA_BUG_H -#define _ALPHA_BUG_H - -#include - -#ifdef CONFIG_BUG -#include - -/* ??? Would be nice to use .gprel32 here, but we can't be sure that the - function loaded the GP, so this could fail in modules. */ -static inline void ATTRIB_NORET __BUG(const char *file, int line) -{ - __asm__ __volatile__( - "call_pal %0 # bugchk\n\t" - ".long %1\n\t.8byte %2" - : : "i" (PAL_bugchk), "i"(line), "i"(file)); - for ( ; ; ) - ; -} - -#define BUG() __BUG(__FILE__, __LINE__) - -#define HAVE_ARCH_BUG -#endif - -#include - -#endif diff --git a/include/asm-alpha/bugs.h b/include/asm-alpha/bugs.h deleted file mode 100644 index 78030d1c7e7..00000000000 --- a/include/asm-alpha/bugs.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * include/asm-alpha/bugs.h - * - * Copyright (C) 1994 Linus Torvalds - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -/* - * I don't know of any alpha bugs yet.. Nice chip - */ - -static void check_bugs(void) -{ -} diff --git a/include/asm-alpha/byteorder.h b/include/asm-alpha/byteorder.h deleted file mode 100644 index 58e958fc7f1..00000000000 --- a/include/asm-alpha/byteorder.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef _ALPHA_BYTEORDER_H -#define _ALPHA_BYTEORDER_H - -#include -#include -#include - -#ifdef __GNUC__ - -static inline __attribute_const__ __u32 __arch__swab32(__u32 x) -{ - /* - * Unfortunately, we can't use the 6 instruction sequence - * on ev6 since the latency of the UNPKBW is 3, which is - * pretty hard to hide. Just in case a future implementation - * has a lower latency, here's the sequence (also by Mike Burrows) - * - * UNPKBW a0, v0 v0: 00AA00BB00CC00DD - * SLL v0, 24, a0 a0: BB00CC00DD000000 - * BIS v0, a0, a0 a0: BBAACCBBDDCC00DD - * EXTWL a0, 6, v0 v0: 000000000000BBAA - * ZAP a0, 0xf3, a0 a0: 00000000DDCC0000 - * ADDL a0, v0, v0 v0: ssssssssDDCCBBAA - */ - - __u64 t0, t1, t2, t3; - - t0 = __kernel_inslh(x, 7); /* t0 : 0000000000AABBCC */ - t1 = __kernel_inswl(x, 3); /* t1 : 000000CCDD000000 */ - t1 |= t0; /* t1 : 000000CCDDAABBCC */ - t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */ - t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */ - t3 = t2 & 0x00FF00FF; /* t3 : 0000000000CC00AA */ - t1 = t0 + t3; /* t1 : ssssssssDDCCBBAA */ - - return t1; -} - -#define __arch__swab32 __arch__swab32 - -#endif /* __GNUC__ */ - -#define __BYTEORDER_HAS_U64__ - -#include - -#endif /* _ALPHA_BYTEORDER_H */ diff --git a/include/asm-alpha/cache.h b/include/asm-alpha/cache.h deleted file mode 100644 index f199e69a5d0..00000000000 --- a/include/asm-alpha/cache.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * include/asm-alpha/cache.h - */ -#ifndef __ARCH_ALPHA_CACHE_H -#define __ARCH_ALPHA_CACHE_H - - -/* Bytes per L1 (data) cache line. */ -#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) -# define L1_CACHE_BYTES 64 -# define L1_CACHE_SHIFT 6 -#else -/* Both EV4 and EV5 are write-through, read-allocate, - direct-mapped, physical. -*/ -# define L1_CACHE_BYTES 32 -# define L1_CACHE_SHIFT 5 -#endif - -#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) -#define SMP_CACHE_BYTES L1_CACHE_BYTES - -#endif diff --git a/include/asm-alpha/cacheflush.h b/include/asm-alpha/cacheflush.h deleted file mode 100644 index b686cc7fc44..00000000000 --- a/include/asm-alpha/cacheflush.h +++ /dev/null @@ -1,74 +0,0 @@ -#ifndef _ALPHA_CACHEFLUSH_H -#define _ALPHA_CACHEFLUSH_H - -#include - -/* Caches aren't brain-dead on the Alpha. */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_dcache_page(page) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_cache_vmap(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) do { } while (0) - -/* Note that the following two definitions are _highly_ dependent - on the contexts in which they are used in the kernel. I personally - think it is criminal how loosely defined these macros are. */ - -/* We need to flush the kernel's icache after loading modules. The - only other use of this macro is in load_aout_interp which is not - used on Alpha. - - Note that this definition should *not* be used for userspace - icache flushing. While functional, it is _way_ overkill. The - icache is tagged with ASNs and it suffices to allocate a new ASN - for the process. */ -#ifndef CONFIG_SMP -#define flush_icache_range(start, end) imb() -#else -#define flush_icache_range(start, end) smp_imb() -extern void smp_imb(void); -#endif - -/* We need to flush the userspace icache after setting breakpoints in - ptrace. - - Instead of indiscriminately using imb, take advantage of the fact - that icache entries are tagged with the ASN and load a new mm context. */ -/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */ - -#ifndef CONFIG_SMP -extern void __load_new_mm_context(struct mm_struct *); -static inline void -flush_icache_user_range(struct vm_area_struct *vma, struct page *page, - unsigned long addr, int len) -{ - if (vma->vm_flags & VM_EXEC) { - struct mm_struct *mm = vma->vm_mm; - if (current->active_mm == mm) - __load_new_mm_context(mm); - else - mm->context[smp_processor_id()] = 0; - } -} -#else -extern void flush_icache_user_range(struct vm_area_struct *vma, - struct page *page, unsigned long addr, int len); -#endif - -/* This is used only in do_no_page and do_swap_page. */ -#define flush_icache_page(vma, page) \ - flush_icache_user_range((vma), (page), 0, 0) - -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ -do { memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ -} while (0) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) - -#endif /* _ALPHA_CACHEFLUSH_H */ diff --git a/include/asm-alpha/checksum.h b/include/asm-alpha/checksum.h deleted file mode 100644 index d3854bbf0a9..00000000000 --- a/include/asm-alpha/checksum.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef _ALPHA_CHECKSUM_H -#define _ALPHA_CHECKSUM_H - -#include - -/* - * This is a version of ip_compute_csum() optimized for IP headers, - * which always checksum on 4 octet boundaries. - */ -extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); - -/* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 16-bit checksum, already complemented - */ -extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum); - -__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, unsigned short proto, - __wsum sum); - -/* - * computes the checksum of a memory block at buff, length len, - * and adds in "sum" (32-bit) - * - * returns a 32-bit number suitable for feeding into itself - * or csum_tcpudp_magic - * - * this function must be called with even lengths, except - * for the last fragment, which may be odd - * - * it's best to have buff aligned on a 32-bit boundary - */ -extern __wsum csum_partial(const void *buff, int len, __wsum sum); - -/* - * the same as csum_partial, but copies from src while it - * checksums - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp); - -__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); - - -/* - * this routine is used for miscellaneous IP-like checksums, mainly - * in icmp.c - */ - -extern __sum16 ip_compute_csum(const void *buff, int len); - -/* - * Fold a partial checksum without adding pseudo headers - */ - -static inline __sum16 csum_fold(__wsum csum) -{ - u32 sum = (__force u32)csum; - sum = (sum & 0xffff) + (sum >> 16); - sum = (sum & 0xffff) + (sum >> 16); - return (__force __sum16)~sum; -} - -#define _HAVE_ARCH_IPV6_CSUM -extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, - const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum); -#endif diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h deleted file mode 100644 index da6bb199839..00000000000 --- a/include/asm-alpha/compiler.h +++ /dev/null @@ -1,130 +0,0 @@ -#ifndef __ALPHA_COMPILER_H -#define __ALPHA_COMPILER_H - -/* - * Herein are macros we use when describing various patterns we want to GCC. - * In all cases we can get better schedules out of the compiler if we hide - * as little as possible inside inline assembly. However, we want to be - * able to know what we'll get out before giving up inline assembly. Thus - * these tests and macros. - */ - -#if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3 -# define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift) -# define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift) -# define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift) -# define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift) -# define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift) -# define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift) -# define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b) -#else -# define __kernel_insbl(val, shift) \ - ({ unsigned long __kir; \ - __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ - __kir; }) -# define __kernel_inswl(val, shift) \ - ({ unsigned long __kir; \ - __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ - __kir; }) -# define __kernel_insql(val, shift) \ - ({ unsigned long __kir; \ - __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ - __kir; }) -# define __kernel_inslh(val, shift) \ - ({ unsigned long __kir; \ - __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ - __kir; }) -# define __kernel_extbl(val, shift) \ - ({ unsigned long __kir; \ - __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ - __kir; }) -# define __kernel_extwl(val, shift) \ - ({ unsigned long __kir; \ - __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ - __kir; }) -# define __kernel_cmpbge(a, b) \ - ({ unsigned long __kir; \ - __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ - __kir; }) -#endif - -#ifdef __alpha_cix__ -# if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3 -# define __kernel_cttz(x) __builtin_ctzl(x) -# define __kernel_ctlz(x) __builtin_clzl(x) -# define __kernel_ctpop(x) __builtin_popcountl(x) -# else -# define __kernel_cttz(x) \ - ({ unsigned long __kir; \ - __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \ - __kir; }) -# define __kernel_ctlz(x) \ - ({ unsigned long __kir; \ - __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ - __kir; }) -# define __kernel_ctpop(x) \ - ({ unsigned long __kir; \ - __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ - __kir; }) -# endif -#else -# define __kernel_cttz(x) \ - ({ unsigned long __kir; \ - __asm__(".arch ev67; cttz %1,%0" : "=r"(__kir) : "r"(x)); \ - __kir; }) -# define __kernel_ctlz(x) \ - ({ unsigned long __kir; \ - __asm__(".arch ev67; ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ - __kir; }) -# define __kernel_ctpop(x) \ - ({ unsigned long __kir; \ - __asm__(".arch ev67; ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ - __kir; }) -#endif - - -/* - * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX - * extension is enabled. Previous versions did not define anything - * we could test during compilation -- too bad, so sad. - */ - -#if defined(__alpha_bwx__) -#define __kernel_ldbu(mem) (mem) -#define __kernel_ldwu(mem) (mem) -#define __kernel_stb(val,mem) ((mem) = (val)) -#define __kernel_stw(val,mem) ((mem) = (val)) -#else -#define __kernel_ldbu(mem) \ - ({ unsigned char __kir; \ - __asm__(".arch ev56; \ - ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \ - __kir; }) -#define __kernel_ldwu(mem) \ - ({ unsigned short __kir; \ - __asm__(".arch ev56; \ - ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \ - __kir; }) -#define __kernel_stb(val,mem) \ - __asm__(".arch ev56; \ - stb %1,%0" : "=m"(mem) : "r"(val)) -#define __kernel_stw(val,mem) \ - __asm__(".arch ev56; \ - stw %1,%0" : "=m"(mem) : "r"(val)) -#endif - -#ifdef __KERNEL__ -/* Some idiots over in thought inline should imply - always_inline. This breaks stuff. We'll include this file whenever - we run into such problems. */ - -#include -#undef inline -#undef __inline__ -#undef __inline -#undef __always_inline -#define __always_inline inline __attribute__((always_inline)) - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_COMPILER_H */ diff --git a/include/asm-alpha/console.h b/include/asm-alpha/console.h deleted file mode 100644 index a3ce4e62249..00000000000 --- a/include/asm-alpha/console.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef __AXP_CONSOLE_H -#define __AXP_CONSOLE_H - -/* - * Console callback routine numbers - */ -#define CCB_GETC 0x01 -#define CCB_PUTS 0x02 -#define CCB_RESET_TERM 0x03 -#define CCB_SET_TERM_INT 0x04 -#define CCB_SET_TERM_CTL 0x05 -#define CCB_PROCESS_KEYCODE 0x06 -#define CCB_OPEN_CONSOLE 0x07 -#define CCB_CLOSE_CONSOLE 0x08 - -#define CCB_OPEN 0x10 -#define CCB_CLOSE 0x11 -#define CCB_IOCTL 0x12 -#define CCB_READ 0x13 -#define CCB_WRITE 0x14 - -#define CCB_SET_ENV 0x20 -#define CCB_RESET_ENV 0x21 -#define CCB_GET_ENV 0x22 -#define CCB_SAVE_ENV 0x23 - -#define CCB_PSWITCH 0x30 -#define CCB_BIOS_EMUL 0x32 - -/* - * Environment variable numbers - */ -#define ENV_AUTO_ACTION 0x01 -#define ENV_BOOT_DEV 0x02 -#define ENV_BOOTDEF_DEV 0x03 -#define ENV_BOOTED_DEV 0x04 -#define ENV_BOOT_FILE 0x05 -#define ENV_BOOTED_FILE 0x06 -#define ENV_BOOT_OSFLAGS 0x07 -#define ENV_BOOTED_OSFLAGS 0x08 -#define ENV_BOOT_RESET 0x09 -#define ENV_DUMP_DEV 0x0A -#define ENV_ENABLE_AUDIT 0x0B -#define ENV_LICENSE 0x0C -#define ENV_CHAR_SET 0x0D -#define ENV_LANGUAGE 0x0E -#define ENV_TTY_DEV 0x0F - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ -extern long callback_puts(long unit, const char *s, long length); -extern long callback_getc(long unit); -extern long callback_open_console(void); -extern long callback_close_console(void); -extern long callback_open(const char *device, long length); -extern long callback_close(long unit); -extern long callback_read(long channel, long count, const char *buf, long lbn); -extern long callback_getenv(long id, const char *buf, unsigned long buf_size); -extern long callback_setenv(long id, const char *buf, unsigned long buf_size); -extern long callback_save_env(void); - -extern int srm_fixup(unsigned long new_callback_addr, - unsigned long new_hwrpb_addr); -extern long srm_puts(const char *, long); -extern long srm_printk(const char *, ...) - __attribute__ ((format (printf, 1, 2))); - -struct crb_struct; -struct hwrpb_struct; -extern int callback_init_done; -extern void * callback_init(void *); -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ - -#endif /* __AXP_CONSOLE_H */ diff --git a/include/asm-alpha/core_apecs.h b/include/asm-alpha/core_apecs.h deleted file mode 100644 index 6785ff7e02b..00000000000 --- a/include/asm-alpha/core_apecs.h +++ /dev/null @@ -1,517 +0,0 @@ -#ifndef __ALPHA_APECS__H__ -#define __ALPHA_APECS__H__ - -#include -#include - -/* - * APECS is the internal name for the 2107x chipset which provides - * memory controller and PCI access for the 21064 chip based systems. - * - * This file is based on: - * - * DECchip 21071-AA and DECchip 21072-AA Core Logic Chipsets - * Data Sheet - * - * EC-N0648-72 - * - * - * david.rusling@reo.mts.dec.com Initial Version. - * - */ - -/* - An AVANTI *might* be an XL, and an XL has only 27 bits of ISA address - that get passed through the PCI<->ISA bridge chip. So we've gotta use - both windows to max out the physical memory we can DMA to. Sigh... - - If we try a window at 0 for 1GB as a work-around, we run into conflicts - with ISA/PCI bus memory which can't be relocated, like VGA aperture and - BIOS ROMs. So we must put the windows high enough to avoid these areas. - - We put window 1 at BUS 64Mb for 64Mb, mapping physical 0 to 64Mb-1, - and window 2 at BUS 1Gb for 1Gb, mapping physical 0 to 1Gb-1. - Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually - be used for that range (via virt_to_bus()). - - Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb, - to keep virt_to_bus() from returning an address in the first window, for - a data area that goes beyond the 64Mb first DMA window. Sigh... - The fudge factor MUST match with MAX_DMA_ADDRESS, but - we can't just use that here, because of header file looping... :-( - - Window 1 will be used for all DMA from the ISA bus; yes, that does - limit what memory an ISA floppy or sound card or Ethernet can touch, but - it's also a known limitation on other platforms as well. We use the - same technique that is used on INTEL platforms with similar limitation: - set MAX_DMA_ADDRESS and clear some pages' DMAable flags during mem_init(). - We trust that any ISA bus device drivers will *always* ask for DMAable - memory explicitly via kmalloc()/get_free_pages() flags arguments. - - Note that most PCI bus devices' drivers do *not* explicitly ask for - DMAable memory; they count on being able to DMA to any memory they - get from kmalloc()/get_free_pages(). They will also use window 1 for - any physical memory accesses below 64Mb; the rest will be handled by - window 2, maxing out at 1Gb of memory. I trust this is enough... :-) - - We hope that the area before the first window is large enough so that - there will be no overlap at the top end (64Mb). We *must* locate the - PCI cards' memory just below window 1, so that there's still the - possibility of being able to access it via SPARSE space. This is - important for cards such as the Matrox Millennium, whose Xserver - wants to access memory-mapped registers in byte and short lengths. - - Note that the XL is treated differently from the AVANTI, even though - for most other things they are identical. It didn't seem reasonable to - make the AVANTI support pay for the limitations of the XL. It is true, - however, that an XL kernel will run on an AVANTI without problems. - - %%% All of this should be obviated by the ability to route - everything through the iommu. -*/ - -/* - * 21071-DA Control and Status registers. - * These are used for PCI memory access. - */ -#define APECS_IOC_DCSR (IDENT_ADDR + 0x1A0000000UL) -#define APECS_IOC_PEAR (IDENT_ADDR + 0x1A0000020UL) -#define APECS_IOC_SEAR (IDENT_ADDR + 0x1A0000040UL) -#define APECS_IOC_DR1 (IDENT_ADDR + 0x1A0000060UL) -#define APECS_IOC_DR2 (IDENT_ADDR + 0x1A0000080UL) -#define APECS_IOC_DR3 (IDENT_ADDR + 0x1A00000A0UL) - -#define APECS_IOC_TB1R (IDENT_ADDR + 0x1A00000C0UL) -#define APECS_IOC_TB2R (IDENT_ADDR + 0x1A00000E0UL) - -#define APECS_IOC_PB1R (IDENT_ADDR + 0x1A0000100UL) -#define APECS_IOC_PB2R (IDENT_ADDR + 0x1A0000120UL) - -#define APECS_IOC_PM1R (IDENT_ADDR + 0x1A0000140UL) -#define APECS_IOC_PM2R (IDENT_ADDR + 0x1A0000160UL) - -#define APECS_IOC_HAXR0 (IDENT_ADDR + 0x1A0000180UL) -#define APECS_IOC_HAXR1 (IDENT_ADDR + 0x1A00001A0UL) -#define APECS_IOC_HAXR2 (IDENT_ADDR + 0x1A00001C0UL) - -#define APECS_IOC_PMLT (IDENT_ADDR + 0x1A00001E0UL) - -#define APECS_IOC_TLBTAG0 (IDENT_ADDR + 0x1A0000200UL) -#define APECS_IOC_TLBTAG1 (IDENT_ADDR + 0x1A0000220UL) -#define APECS_IOC_TLBTAG2 (IDENT_ADDR + 0x1A0000240UL) -#define APECS_IOC_TLBTAG3 (IDENT_ADDR + 0x1A0000260UL) -#define APECS_IOC_TLBTAG4 (IDENT_ADDR + 0x1A0000280UL) -#define APECS_IOC_TLBTAG5 (IDENT_ADDR + 0x1A00002A0UL) -#define APECS_IOC_TLBTAG6 (IDENT_ADDR + 0x1A00002C0UL) -#define APECS_IOC_TLBTAG7 (IDENT_ADDR + 0x1A00002E0UL) - -#define APECS_IOC_TLBDATA0 (IDENT_ADDR + 0x1A0000300UL) -#define APECS_IOC_TLBDATA1 (IDENT_ADDR + 0x1A0000320UL) -#define APECS_IOC_TLBDATA2 (IDENT_ADDR + 0x1A0000340UL) -#define APECS_IOC_TLBDATA3 (IDENT_ADDR + 0x1A0000360UL) -#define APECS_IOC_TLBDATA4 (IDENT_ADDR + 0x1A0000380UL) -#define APECS_IOC_TLBDATA5 (IDENT_ADDR + 0x1A00003A0UL) -#define APECS_IOC_TLBDATA6 (IDENT_ADDR + 0x1A00003C0UL) -#define APECS_IOC_TLBDATA7 (IDENT_ADDR + 0x1A00003E0UL) - -#define APECS_IOC_TBIA (IDENT_ADDR + 0x1A0000400UL) - - -/* - * 21071-CA Control and Status registers. - * These are used to program memory timing, - * configure memory and initialise the B-Cache. - */ -#define APECS_MEM_GCR (IDENT_ADDR + 0x180000000UL) -#define APECS_MEM_EDSR (IDENT_ADDR + 0x180000040UL) -#define APECS_MEM_TAR (IDENT_ADDR + 0x180000060UL) -#define APECS_MEM_ELAR (IDENT_ADDR + 0x180000080UL) -#define APECS_MEM_EHAR (IDENT_ADDR + 0x1800000a0UL) -#define APECS_MEM_SFT_RST (IDENT_ADDR + 0x1800000c0UL) -#define APECS_MEM_LDxLAR (IDENT_ADDR + 0x1800000e0UL) -#define APECS_MEM_LDxHAR (IDENT_ADDR + 0x180000100UL) -#define APECS_MEM_GTR (IDENT_ADDR + 0x180000200UL) -#define APECS_MEM_RTR (IDENT_ADDR + 0x180000220UL) -#define APECS_MEM_VFPR (IDENT_ADDR + 0x180000240UL) -#define APECS_MEM_PDLDR (IDENT_ADDR + 0x180000260UL) -#define APECS_MEM_PDhDR (IDENT_ADDR + 0x180000280UL) - -/* Bank x Base Address Register */ -#define APECS_MEM_B0BAR (IDENT_ADDR + 0x180000800UL) -#define APECS_MEM_B1BAR (IDENT_ADDR + 0x180000820UL) -#define APECS_MEM_B2BAR (IDENT_ADDR + 0x180000840UL) -#define APECS_MEM_B3BAR (IDENT_ADDR + 0x180000860UL) -#define APECS_MEM_B4BAR (IDENT_ADDR + 0x180000880UL) -#define APECS_MEM_B5BAR (IDENT_ADDR + 0x1800008A0UL) -#define APECS_MEM_B6BAR (IDENT_ADDR + 0x1800008C0UL) -#define APECS_MEM_B7BAR (IDENT_ADDR + 0x1800008E0UL) -#define APECS_MEM_B8BAR (IDENT_ADDR + 0x180000900UL) - -/* Bank x Configuration Register */ -#define APECS_MEM_B0BCR (IDENT_ADDR + 0x180000A00UL) -#define APECS_MEM_B1BCR (IDENT_ADDR + 0x180000A20UL) -#define APECS_MEM_B2BCR (IDENT_ADDR + 0x180000A40UL) -#define APECS_MEM_B3BCR (IDENT_ADDR + 0x180000A60UL) -#define APECS_MEM_B4BCR (IDENT_ADDR + 0x180000A80UL) -#define APECS_MEM_B5BCR (IDENT_ADDR + 0x180000AA0UL) -#define APECS_MEM_B6BCR (IDENT_ADDR + 0x180000AC0UL) -#define APECS_MEM_B7BCR (IDENT_ADDR + 0x180000AE0UL) -#define APECS_MEM_B8BCR (IDENT_ADDR + 0x180000B00UL) - -/* Bank x Timing Register A */ -#define APECS_MEM_B0TRA (IDENT_ADDR + 0x180000C00UL) -#define APECS_MEM_B1TRA (IDENT_ADDR + 0x180000C20UL) -#define APECS_MEM_B2TRA (IDENT_ADDR + 0x180000C40UL) -#define APECS_MEM_B3TRA (IDENT_ADDR + 0x180000C60UL) -#define APECS_MEM_B4TRA (IDENT_ADDR + 0x180000C80UL) -#define APECS_MEM_B5TRA (IDENT_ADDR + 0x180000CA0UL) -#define APECS_MEM_B6TRA (IDENT_ADDR + 0x180000CC0UL) -#define APECS_MEM_B7TRA (IDENT_ADDR + 0x180000CE0UL) -#define APECS_MEM_B8TRA (IDENT_ADDR + 0x180000D00UL) - -/* Bank x Timing Register B */ -#define APECS_MEM_B0TRB (IDENT_ADDR + 0x180000E00UL) -#define APECS_MEM_B1TRB (IDENT_ADDR + 0x180000E20UL) -#define APECS_MEM_B2TRB (IDENT_ADDR + 0x180000E40UL) -#define APECS_MEM_B3TRB (IDENT_ADDR + 0x180000E60UL) -#define APECS_MEM_B4TRB (IDENT_ADDR + 0x180000E80UL) -#define APECS_MEM_B5TRB (IDENT_ADDR + 0x180000EA0UL) -#define APECS_MEM_B6TRB (IDENT_ADDR + 0x180000EC0UL) -#define APECS_MEM_B7TRB (IDENT_ADDR + 0x180000EE0UL) -#define APECS_MEM_B8TRB (IDENT_ADDR + 0x180000F00UL) - - -/* - * Memory spaces: - */ -#define APECS_IACK_SC (IDENT_ADDR + 0x1b0000000UL) -#define APECS_CONF (IDENT_ADDR + 0x1e0000000UL) -#define APECS_IO (IDENT_ADDR + 0x1c0000000UL) -#define APECS_SPARSE_MEM (IDENT_ADDR + 0x200000000UL) -#define APECS_DENSE_MEM (IDENT_ADDR + 0x300000000UL) - - -/* - * Bit definitions for I/O Controller status register 0: - */ -#define APECS_IOC_STAT0_CMD 0xf -#define APECS_IOC_STAT0_ERR (1<<4) -#define APECS_IOC_STAT0_LOST (1<<5) -#define APECS_IOC_STAT0_THIT (1<<6) -#define APECS_IOC_STAT0_TREF (1<<7) -#define APECS_IOC_STAT0_CODE_SHIFT 8 -#define APECS_IOC_STAT0_CODE_MASK 0x7 -#define APECS_IOC_STAT0_P_NBR_SHIFT 13 -#define APECS_IOC_STAT0_P_NBR_MASK 0x7ffff - -#define APECS_HAE_ADDRESS APECS_IOC_HAXR1 - - -/* - * Data structure for handling APECS machine checks: - */ - -struct el_apecs_mikasa_sysdata_mcheck -{ - unsigned long coma_gcr; - unsigned long coma_edsr; - unsigned long coma_ter; - unsigned long coma_elar; - unsigned long coma_ehar; - unsigned long coma_ldlr; - unsigned long coma_ldhr; - unsigned long coma_base0; - unsigned long coma_base1; - unsigned long coma_base2; - unsigned long coma_base3; - unsigned long coma_cnfg0; - unsigned long coma_cnfg1; - unsigned long coma_cnfg2; - unsigned long coma_cnfg3; - unsigned long epic_dcsr; - unsigned long epic_pear; - unsigned long epic_sear; - unsigned long epic_tbr1; - unsigned long epic_tbr2; - unsigned long epic_pbr1; - unsigned long epic_pbr2; - unsigned long epic_pmr1; - unsigned long epic_pmr2; - unsigned long epic_harx1; - unsigned long epic_harx2; - unsigned long epic_pmlt; - unsigned long epic_tag0; - unsigned long epic_tag1; - unsigned long epic_tag2; - unsigned long epic_tag3; - unsigned long epic_tag4; - unsigned long epic_tag5; - unsigned long epic_tag6; - unsigned long epic_tag7; - unsigned long epic_data0; - unsigned long epic_data1; - unsigned long epic_data2; - unsigned long epic_data3; - unsigned long epic_data4; - unsigned long epic_data5; - unsigned long epic_data6; - unsigned long epic_data7; - - unsigned long pceb_vid; - unsigned long pceb_did; - unsigned long pceb_revision; - unsigned long pceb_command; - unsigned long pceb_status; - unsigned long pceb_latency; - unsigned long pceb_control; - unsigned long pceb_arbcon; - unsigned long pceb_arbpri; - - unsigned long esc_id; - unsigned long esc_revision; - unsigned long esc_int0; - unsigned long esc_int1; - unsigned long esc_elcr0; - unsigned long esc_elcr1; - unsigned long esc_last_eisa; - unsigned long esc_nmi_stat; - - unsigned long pci_ir; - unsigned long pci_imr; - unsigned long svr_mgr; -}; - -/* This for the normal APECS machines. */ -struct el_apecs_sysdata_mcheck -{ - unsigned long coma_gcr; - unsigned long coma_edsr; - unsigned long coma_ter; - unsigned long coma_elar; - unsigned long coma_ehar; - unsigned long coma_ldlr; - unsigned long coma_ldhr; - unsigned long coma_base0; - unsigned long coma_base1; - unsigned long coma_base2; - unsigned long coma_cnfg0; - unsigned long coma_cnfg1; - unsigned long coma_cnfg2; - unsigned long epic_dcsr; - unsigned long epic_pear; - unsigned long epic_sear; - unsigned long epic_tbr1; - unsigned long epic_tbr2; - unsigned long epic_pbr1; - unsigned long epic_pbr2; - unsigned long epic_pmr1; - unsigned long epic_pmr2; - unsigned long epic_harx1; - unsigned long epic_harx2; - unsigned long epic_pmlt; - unsigned long epic_tag0; - unsigned long epic_tag1; - unsigned long epic_tag2; - unsigned long epic_tag3; - unsigned long epic_tag4; - unsigned long epic_tag5; - unsigned long epic_tag6; - unsigned long epic_tag7; - unsigned long epic_data0; - unsigned long epic_data1; - unsigned long epic_data2; - unsigned long epic_data3; - unsigned long epic_data4; - unsigned long epic_data5; - unsigned long epic_data6; - unsigned long epic_data7; -}; - -struct el_apecs_procdata -{ - unsigned long paltemp[32]; /* PAL TEMP REGS. */ - /* EV4-specific fields */ - unsigned long exc_addr; /* Address of excepting instruction. */ - unsigned long exc_sum; /* Summary of arithmetic traps. */ - unsigned long exc_mask; /* Exception mask (from exc_sum). */ - unsigned long iccsr; /* IBox hardware enables. */ - unsigned long pal_base; /* Base address for PALcode. */ - unsigned long hier; /* Hardware Interrupt Enable. */ - unsigned long hirr; /* Hardware Interrupt Request. */ - unsigned long csr; /* D-stream fault info. */ - unsigned long dc_stat; /* D-cache status (ECC/Parity Err). */ - unsigned long dc_addr; /* EV3 Phys Addr for ECC/DPERR. */ - unsigned long abox_ctl; /* ABox Control Register. */ - unsigned long biu_stat; /* BIU Status. */ - unsigned long biu_addr; /* BUI Address. */ - unsigned long biu_ctl; /* BIU Control. */ - unsigned long fill_syndrome;/* For correcting ECC errors. */ - unsigned long fill_addr; /* Cache block which was being read */ - unsigned long va; /* Effective VA of fault or miss. */ - unsigned long bc_tag; /* Backup Cache Tag Probe Results.*/ -}; - - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions: - * - * Unlike Jensen, the APECS machines have no concept of local - * I/O---everything goes over the PCI bus. - * - * There is plenty room for optimization here. In particular, - * the Alpha's insb/insw/extb/extw should be useful in moving - * data to/from the right byte-lanes. - */ - -#define vip volatile int __force * -#define vuip volatile unsigned int __force * -#define vulp volatile unsigned long __force * - -#define APECS_SET_HAE \ - do { \ - if (addr >= (1UL << 24)) { \ - unsigned long msb = addr & 0xf8000000; \ - addr -= msb; \ - set_hae(msb); \ - } \ - } while (0) - -__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long result, base_and_type; - - if (addr >= APECS_DENSE_MEM) { - addr -= APECS_DENSE_MEM; - APECS_SET_HAE; - base_and_type = APECS_SPARSE_MEM + 0x00; - } else { - addr -= APECS_IO; - base_and_type = APECS_IO + 0x00; - } - - result = *(vip) ((addr << 5) + base_and_type); - return __kernel_extbl(result, addr & 3); -} - -__EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long w, base_and_type; - - if (addr >= APECS_DENSE_MEM) { - addr -= APECS_DENSE_MEM; - APECS_SET_HAE; - base_and_type = APECS_SPARSE_MEM + 0x00; - } else { - addr -= APECS_IO; - base_and_type = APECS_IO + 0x00; - } - - w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + base_and_type) = w; -} - -__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long result, base_and_type; - - if (addr >= APECS_DENSE_MEM) { - addr -= APECS_DENSE_MEM; - APECS_SET_HAE; - base_and_type = APECS_SPARSE_MEM + 0x08; - } else { - addr -= APECS_IO; - base_and_type = APECS_IO + 0x08; - } - - result = *(vip) ((addr << 5) + base_and_type); - return __kernel_extwl(result, addr & 3); -} - -__EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long w, base_and_type; - - if (addr >= APECS_DENSE_MEM) { - addr -= APECS_DENSE_MEM; - APECS_SET_HAE; - base_and_type = APECS_SPARSE_MEM + 0x08; - } else { - addr -= APECS_IO; - base_and_type = APECS_IO + 0x08; - } - - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + base_and_type) = w; -} - -__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - if (addr < APECS_DENSE_MEM) - addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; - return *(vuip)addr; -} - -__EXTERN_INLINE void apecs_iowrite32(u32 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - if (addr < APECS_DENSE_MEM) - addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; - *(vuip)addr = b; -} - -__EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr) -{ - return (void __iomem *)(addr + APECS_IO); -} - -__EXTERN_INLINE void __iomem *apecs_ioremap(unsigned long addr, - unsigned long size) -{ - return (void __iomem *)(addr + APECS_DENSE_MEM); -} - -__EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr) -{ - return addr >= IDENT_ADDR + 0x180000000UL; -} - -__EXTERN_INLINE int apecs_is_mmio(const volatile void __iomem *addr) -{ - return (unsigned long)addr >= APECS_DENSE_MEM; -} - -#undef APECS_SET_HAE - -#undef vip -#undef vuip -#undef vulp - -#undef __IO_PREFIX -#define __IO_PREFIX apecs -#define apecs_trivial_io_bw 0 -#define apecs_trivial_io_lq 0 -#define apecs_trivial_rw_bw 2 -#define apecs_trivial_rw_lq 1 -#define apecs_trivial_iounmap 1 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_APECS__H__ */ diff --git a/include/asm-alpha/core_cia.h b/include/asm-alpha/core_cia.h deleted file mode 100644 index 9e0516c0ca2..00000000000 --- a/include/asm-alpha/core_cia.h +++ /dev/null @@ -1,500 +0,0 @@ -#ifndef __ALPHA_CIA__H__ -#define __ALPHA_CIA__H__ - -/* Define to experiment with fitting everything into one 512MB HAE window. */ -#define CIA_ONE_HAE_WINDOW 1 - -#include -#include - -/* - * CIA is the internal name for the 21171 chipset which provides - * memory controller and PCI access for the 21164 chip based systems. - * Also supported here is the 21172 (CIA-2) and 21174 (PYXIS). - * - * The lineage is a bit confused, since the 21174 was reportedly started - * from the 21171 Pass 1 mask, and so is missing bug fixes that appear - * in 21171 Pass 2 and 21172, but it also contains additional features. - * - * This file is based on: - * - * DECchip 21171 Core Logic Chipset - * Technical Reference Manual - * - * EC-QE18B-TE - * - * david.rusling@reo.mts.dec.com Initial Version. - * - */ - -/* - * CIA ADDRESS BIT DEFINITIONS - * - * 3333 3333 3322 2222 2222 1111 1111 11 - * 9876 5432 1098 7654 3210 9876 5432 1098 7654 3210 - * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- - * 1 000 - * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- - * | |\| - * | Byte Enable --+ | - * | Transfer Length --+ - * +-- IO space, not cached - * - * Byte Transfer - * Enable Length Transfer Byte Address - * adr<6:5> adr<4:3> Length Enable Adder - * --------------------------------------------- - * 00 00 Byte 1110 0x000 - * 01 00 Byte 1101 0x020 - * 10 00 Byte 1011 0x040 - * 11 00 Byte 0111 0x060 - * - * 00 01 Word 1100 0x008 - * 01 01 Word 1001 0x028 <= Not supported in this code. - * 10 01 Word 0011 0x048 - * - * 00 10 Tribyte 1000 0x010 - * 01 10 Tribyte 0001 0x030 - * - * 10 11 Longword 0000 0x058 - * - * Note that byte enables are asserted low. - * - */ - -#define CIA_MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */ -#define CIA_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */ -#define CIA_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */ - -/* - * 21171-CA Control and Status Registers - */ -#define CIA_IOC_CIA_REV (IDENT_ADDR + 0x8740000080UL) -# define CIA_REV_MASK 0xff -#define CIA_IOC_PCI_LAT (IDENT_ADDR + 0x87400000C0UL) -#define CIA_IOC_CIA_CTRL (IDENT_ADDR + 0x8740000100UL) -# define CIA_CTRL_PCI_EN (1 << 0) -# define CIA_CTRL_PCI_LOCK_EN (1 << 1) -# define CIA_CTRL_PCI_LOOP_EN (1 << 2) -# define CIA_CTRL_FST_BB_EN (1 << 3) -# define CIA_CTRL_PCI_MST_EN (1 << 4) -# define CIA_CTRL_PCI_MEM_EN (1 << 5) -# define CIA_CTRL_PCI_REQ64_EN (1 << 6) -# define CIA_CTRL_PCI_ACK64_EN (1 << 7) -# define CIA_CTRL_ADDR_PE_EN (1 << 8) -# define CIA_CTRL_PERR_EN (1 << 9) -# define CIA_CTRL_FILL_ERR_EN (1 << 10) -# define CIA_CTRL_MCHK_ERR_EN (1 << 11) -# define CIA_CTRL_ECC_CHK_EN (1 << 12) -# define CIA_CTRL_ASSERT_IDLE_BC (1 << 13) -# define CIA_CTRL_COM_IDLE_BC (1 << 14) -# define CIA_CTRL_CSR_IOA_BYPASS (1 << 15) -# define CIA_CTRL_IO_FLUSHREQ_EN (1 << 16) -# define CIA_CTRL_CPU_FLUSHREQ_EN (1 << 17) -# define CIA_CTRL_ARB_CPU_EN (1 << 18) -# define CIA_CTRL_EN_ARB_LINK (1 << 19) -# define CIA_CTRL_RD_TYPE_SHIFT 20 -# define CIA_CTRL_RL_TYPE_SHIFT 24 -# define CIA_CTRL_RM_TYPE_SHIFT 28 -# define CIA_CTRL_EN_DMA_RD_PERF (1 << 31) -#define CIA_IOC_CIA_CNFG (IDENT_ADDR + 0x8740000140UL) -# define CIA_CNFG_IOA_BWEN (1 << 0) -# define CIA_CNFG_PCI_MWEN (1 << 4) -# define CIA_CNFG_PCI_DWEN (1 << 5) -# define CIA_CNFG_PCI_WLEN (1 << 8) -#define CIA_IOC_FLASH_CTRL (IDENT_ADDR + 0x8740000200UL) -#define CIA_IOC_HAE_MEM (IDENT_ADDR + 0x8740000400UL) -#define CIA_IOC_HAE_IO (IDENT_ADDR + 0x8740000440UL) -#define CIA_IOC_CFG (IDENT_ADDR + 0x8740000480UL) -#define CIA_IOC_CACK_EN (IDENT_ADDR + 0x8740000600UL) -# define CIA_CACK_EN_LOCK_EN (1 << 0) -# define CIA_CACK_EN_MB_EN (1 << 1) -# define CIA_CACK_EN_SET_DIRTY_EN (1 << 2) -# define CIA_CACK_EN_BC_VICTIM_EN (1 << 3) - - -/* - * 21171-CA Diagnostic Registers - */ -#define CIA_IOC_CIA_DIAG (IDENT_ADDR + 0x8740002000UL) -#define CIA_IOC_DIAG_CHECK (IDENT_ADDR + 0x8740003000UL) - -/* - * 21171-CA Performance Monitor registers - */ -#define CIA_IOC_PERF_MONITOR (IDENT_ADDR + 0x8740004000UL) -#define CIA_IOC_PERF_CONTROL (IDENT_ADDR + 0x8740004040UL) - -/* - * 21171-CA Error registers - */ -#define CIA_IOC_CPU_ERR0 (IDENT_ADDR + 0x8740008000UL) -#define CIA_IOC_CPU_ERR1 (IDENT_ADDR + 0x8740008040UL) -#define CIA_IOC_CIA_ERR (IDENT_ADDR + 0x8740008200UL) -# define CIA_ERR_COR_ERR (1 << 0) -# define CIA_ERR_UN_COR_ERR (1 << 1) -# define CIA_ERR_CPU_PE (1 << 2) -# define CIA_ERR_MEM_NEM (1 << 3) -# define CIA_ERR_PCI_SERR (1 << 4) -# define CIA_ERR_PERR (1 << 5) -# define CIA_ERR_PCI_ADDR_PE (1 << 6) -# define CIA_ERR_RCVD_MAS_ABT (1 << 7) -# define CIA_ERR_RCVD_TAR_ABT (1 << 8) -# define CIA_ERR_PA_PTE_INV (1 << 9) -# define CIA_ERR_FROM_WRT_ERR (1 << 10) -# define CIA_ERR_IOA_TIMEOUT (1 << 11) -# define CIA_ERR_LOST_CORR_ERR (1 << 16) -# define CIA_ERR_LOST_UN_CORR_ERR (1 << 17) -# define CIA_ERR_LOST_CPU_PE (1 << 18) -# define CIA_ERR_LOST_MEM_NEM (1 << 19) -# define CIA_ERR_LOST_PERR (1 << 21) -# define CIA_ERR_LOST_PCI_ADDR_PE (1 << 22) -# define CIA_ERR_LOST_RCVD_MAS_ABT (1 << 23) -# define CIA_ERR_LOST_RCVD_TAR_ABT (1 << 24) -# define CIA_ERR_LOST_PA_PTE_INV (1 << 25) -# define CIA_ERR_LOST_FROM_WRT_ERR (1 << 26) -# define CIA_ERR_LOST_IOA_TIMEOUT (1 << 27) -# define CIA_ERR_VALID (1 << 31) -#define CIA_IOC_CIA_STAT (IDENT_ADDR + 0x8740008240UL) -#define CIA_IOC_ERR_MASK (IDENT_ADDR + 0x8740008280UL) -#define CIA_IOC_CIA_SYN (IDENT_ADDR + 0x8740008300UL) -#define CIA_IOC_MEM_ERR0 (IDENT_ADDR + 0x8740008400UL) -#define CIA_IOC_MEM_ERR1 (IDENT_ADDR + 0x8740008440UL) -#define CIA_IOC_PCI_ERR0 (IDENT_ADDR + 0x8740008800UL) -#define CIA_IOC_PCI_ERR1 (IDENT_ADDR + 0x8740008840UL) -#define CIA_IOC_PCI_ERR3 (IDENT_ADDR + 0x8740008880UL) - -/* - * 21171-CA System configuration registers - */ -#define CIA_IOC_MCR (IDENT_ADDR + 0x8750000000UL) -#define CIA_IOC_MBA0 (IDENT_ADDR + 0x8750000600UL) -#define CIA_IOC_MBA2 (IDENT_ADDR + 0x8750000680UL) -#define CIA_IOC_MBA4 (IDENT_ADDR + 0x8750000700UL) -#define CIA_IOC_MBA6 (IDENT_ADDR + 0x8750000780UL) -#define CIA_IOC_MBA8 (IDENT_ADDR + 0x8750000800UL) -#define CIA_IOC_MBAA (IDENT_ADDR + 0x8750000880UL) -#define CIA_IOC_MBAC (IDENT_ADDR + 0x8750000900UL) -#define CIA_IOC_MBAE (IDENT_ADDR + 0x8750000980UL) -#define CIA_IOC_TMG0 (IDENT_ADDR + 0x8750000B00UL) -#define CIA_IOC_TMG1 (IDENT_ADDR + 0x8750000B40UL) -#define CIA_IOC_TMG2 (IDENT_ADDR + 0x8750000B80UL) - -/* - * 2117A-CA PCI Address and Scatter-Gather Registers. - */ -#define CIA_IOC_PCI_TBIA (IDENT_ADDR + 0x8760000100UL) - -#define CIA_IOC_PCI_W0_BASE (IDENT_ADDR + 0x8760000400UL) -#define CIA_IOC_PCI_W0_MASK (IDENT_ADDR + 0x8760000440UL) -#define CIA_IOC_PCI_T0_BASE (IDENT_ADDR + 0x8760000480UL) - -#define CIA_IOC_PCI_W1_BASE (IDENT_ADDR + 0x8760000500UL) -#define CIA_IOC_PCI_W1_MASK (IDENT_ADDR + 0x8760000540UL) -#define CIA_IOC_PCI_T1_BASE (IDENT_ADDR + 0x8760000580UL) - -#define CIA_IOC_PCI_W2_BASE (IDENT_ADDR + 0x8760000600UL) -#define CIA_IOC_PCI_W2_MASK (IDENT_ADDR + 0x8760000640UL) -#define CIA_IOC_PCI_T2_BASE (IDENT_ADDR + 0x8760000680UL) - -#define CIA_IOC_PCI_W3_BASE (IDENT_ADDR + 0x8760000700UL) -#define CIA_IOC_PCI_W3_MASK (IDENT_ADDR + 0x8760000740UL) -#define CIA_IOC_PCI_T3_BASE (IDENT_ADDR + 0x8760000780UL) - -#define CIA_IOC_PCI_Wn_BASE(N) (IDENT_ADDR + 0x8760000400UL + (N)*0x100) -#define CIA_IOC_PCI_Wn_MASK(N) (IDENT_ADDR + 0x8760000440UL + (N)*0x100) -#define CIA_IOC_PCI_Tn_BASE(N) (IDENT_ADDR + 0x8760000480UL + (N)*0x100) - -#define CIA_IOC_PCI_W_DAC (IDENT_ADDR + 0x87600007C0UL) - -/* - * 2117A-CA Address Translation Registers. - */ - -/* 8 tag registers, the first 4 of which are lockable. */ -#define CIA_IOC_TB_TAGn(n) \ - (IDENT_ADDR + 0x8760000800UL + (n)*0x40) - -/* 4 page registers per tag register. */ -#define CIA_IOC_TBn_PAGEm(n,m) \ - (IDENT_ADDR + 0x8760001000UL + (n)*0x100 + (m)*0x40) - -/* - * Memory spaces: - */ -#define CIA_IACK_SC (IDENT_ADDR + 0x8720000000UL) -#define CIA_CONF (IDENT_ADDR + 0x8700000000UL) -#define CIA_IO (IDENT_ADDR + 0x8580000000UL) -#define CIA_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL) -#define CIA_SPARSE_MEM_R2 (IDENT_ADDR + 0x8400000000UL) -#define CIA_SPARSE_MEM_R3 (IDENT_ADDR + 0x8500000000UL) -#define CIA_DENSE_MEM (IDENT_ADDR + 0x8600000000UL) -#define CIA_BW_MEM (IDENT_ADDR + 0x8800000000UL) -#define CIA_BW_IO (IDENT_ADDR + 0x8900000000UL) -#define CIA_BW_CFG_0 (IDENT_ADDR + 0x8a00000000UL) -#define CIA_BW_CFG_1 (IDENT_ADDR + 0x8b00000000UL) - -/* - * ALCOR's GRU ASIC registers - */ -#define GRU_INT_REQ (IDENT_ADDR + 0x8780000000UL) -#define GRU_INT_MASK (IDENT_ADDR + 0x8780000040UL) -#define GRU_INT_EDGE (IDENT_ADDR + 0x8780000080UL) -#define GRU_INT_HILO (IDENT_ADDR + 0x87800000C0UL) -#define GRU_INT_CLEAR (IDENT_ADDR + 0x8780000100UL) - -#define GRU_CACHE_CNFG (IDENT_ADDR + 0x8780000200UL) -#define GRU_SCR (IDENT_ADDR + 0x8780000300UL) -#define GRU_LED (IDENT_ADDR + 0x8780000800UL) -#define GRU_RESET (IDENT_ADDR + 0x8780000900UL) - -#define ALCOR_GRU_INT_REQ_BITS 0x800fffffUL -#define XLT_GRU_INT_REQ_BITS 0x80003fffUL -#define GRU_INT_REQ_BITS (alpha_mv.sys.cia.gru_int_req_bits+0) - -/* - * PYXIS interrupt control registers - */ -#define PYXIS_INT_REQ (IDENT_ADDR + 0x87A0000000UL) -#define PYXIS_INT_MASK (IDENT_ADDR + 0x87A0000040UL) -#define PYXIS_INT_HILO (IDENT_ADDR + 0x87A00000C0UL) -#define PYXIS_INT_ROUTE (IDENT_ADDR + 0x87A0000140UL) -#define PYXIS_GPO (IDENT_ADDR + 0x87A0000180UL) -#define PYXIS_INT_CNFG (IDENT_ADDR + 0x87A00001C0UL) -#define PYXIS_RT_COUNT (IDENT_ADDR + 0x87A0000200UL) -#define PYXIS_INT_TIME (IDENT_ADDR + 0x87A0000240UL) -#define PYXIS_IIC_CTRL (IDENT_ADDR + 0x87A00002C0UL) -#define PYXIS_RESET (IDENT_ADDR + 0x8780000900UL) - -/* Offset between ram physical addresses and pci64 DAC bus addresses. */ -#define PYXIS_DAC_OFFSET (1UL << 40) - -/* - * Data structure for handling CIA machine checks. - */ - -/* System-specific info. */ -struct el_CIA_sysdata_mcheck { - unsigned long cpu_err0; - unsigned long cpu_err1; - unsigned long cia_err; - unsigned long cia_stat; - unsigned long err_mask; - unsigned long cia_syn; - unsigned long mem_err0; - unsigned long mem_err1; - unsigned long pci_err0; - unsigned long pci_err1; - unsigned long pci_err2; -}; - - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -/* Do not touch, this should *NOT* be static inline */ -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions: - * - * CIA (the 2117x PCI/memory support chipset for the EV5 (21164) - * series of processors uses a sparse address mapping scheme to - * get at PCI memory and I/O. - */ - -/* - * Memory functions. 64-bit and 32-bit accesses are done through - * dense memory space, everything else through sparse space. - * - * For reading and writing 8 and 16 bit quantities we need to - * go through one of the three sparse address mapping regions - * and use the HAE_MEM CSR to provide some bits of the address. - * The following few routines use only sparse address region 1 - * which gives 1Gbyte of accessible space which relates exactly - * to the amount of PCI memory mapping *into* system address space. - * See p 6-17 of the specification but it looks something like this: - * - * 21164 Address: - * - * 3 2 1 - * 9876543210987654321098765432109876543210 - * 1ZZZZ0.PCI.QW.Address............BBLL - * - * ZZ = SBZ - * BB = Byte offset - * LL = Transfer length - * - * PCI Address: - * - * 3 2 1 - * 10987654321098765432109876543210 - * HHH....PCI.QW.Address........ 00 - * - * HHH = 31:29 HAE_MEM CSR - * - */ - -#define vip volatile int __force * -#define vuip volatile unsigned int __force * -#define vulp volatile unsigned long __force * - -__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long result, base_and_type; - - if (addr >= CIA_DENSE_MEM) - base_and_type = CIA_SPARSE_MEM + 0x00; - else - base_and_type = CIA_IO + 0x00; - - /* We can use CIA_MEM_R1_MASK for io ports too, since it is large - enough to cover all io ports, and smaller than CIA_IO. */ - addr &= CIA_MEM_R1_MASK; - result = *(vip) ((addr << 5) + base_and_type); - return __kernel_extbl(result, addr & 3); -} - -__EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long w, base_and_type; - - if (addr >= CIA_DENSE_MEM) - base_and_type = CIA_SPARSE_MEM + 0x00; - else - base_and_type = CIA_IO + 0x00; - - addr &= CIA_MEM_R1_MASK; - w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + base_and_type) = w; -} - -__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long result, base_and_type; - - if (addr >= CIA_DENSE_MEM) - base_and_type = CIA_SPARSE_MEM + 0x08; - else - base_and_type = CIA_IO + 0x08; - - addr &= CIA_MEM_R1_MASK; - result = *(vip) ((addr << 5) + base_and_type); - return __kernel_extwl(result, addr & 3); -} - -__EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long w, base_and_type; - - if (addr >= CIA_DENSE_MEM) - base_and_type = CIA_SPARSE_MEM + 0x08; - else - base_and_type = CIA_IO + 0x08; - - addr &= CIA_MEM_R1_MASK; - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + base_and_type) = w; -} - -__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - if (addr < CIA_DENSE_MEM) - addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; - return *(vuip)addr; -} - -__EXTERN_INLINE void cia_iowrite32(u32 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - if (addr < CIA_DENSE_MEM) - addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; - *(vuip)addr = b; -} - -__EXTERN_INLINE void __iomem *cia_ioportmap(unsigned long addr) -{ - return (void __iomem *)(addr + CIA_IO); -} - -__EXTERN_INLINE void __iomem *cia_ioremap(unsigned long addr, - unsigned long size) -{ - return (void __iomem *)(addr + CIA_DENSE_MEM); -} - -__EXTERN_INLINE int cia_is_ioaddr(unsigned long addr) -{ - return addr >= IDENT_ADDR + 0x8000000000UL; -} - -__EXTERN_INLINE int cia_is_mmio(const volatile void __iomem *addr) -{ - return (unsigned long)addr >= CIA_DENSE_MEM; -} - -__EXTERN_INLINE void __iomem *cia_bwx_ioportmap(unsigned long addr) -{ - return (void __iomem *)(addr + CIA_BW_IO); -} - -__EXTERN_INLINE void __iomem *cia_bwx_ioremap(unsigned long addr, - unsigned long size) -{ - return (void __iomem *)(addr + CIA_BW_MEM); -} - -__EXTERN_INLINE int cia_bwx_is_ioaddr(unsigned long addr) -{ - return addr >= IDENT_ADDR + 0x8000000000UL; -} - -__EXTERN_INLINE int cia_bwx_is_mmio(const volatile void __iomem *addr) -{ - return (unsigned long)addr < CIA_BW_IO; -} - -#undef vip -#undef vuip -#undef vulp - -#undef __IO_PREFIX -#define __IO_PREFIX cia -#define cia_trivial_rw_bw 2 -#define cia_trivial_rw_lq 1 -#define cia_trivial_io_bw 0 -#define cia_trivial_io_lq 0 -#define cia_trivial_iounmap 1 -#include - -#undef __IO_PREFIX -#define __IO_PREFIX cia_bwx -#define cia_bwx_trivial_rw_bw 1 -#define cia_bwx_trivial_rw_lq 1 -#define cia_bwx_trivial_io_bw 1 -#define cia_bwx_trivial_io_lq 1 -#define cia_bwx_trivial_iounmap 1 -#include - -#undef __IO_PREFIX -#ifdef CONFIG_ALPHA_PYXIS -#define __IO_PREFIX cia_bwx -#else -#define __IO_PREFIX cia -#endif - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_CIA__H__ */ diff --git a/include/asm-alpha/core_irongate.h b/include/asm-alpha/core_irongate.h deleted file mode 100644 index 24b2db54150..00000000000 --- a/include/asm-alpha/core_irongate.h +++ /dev/null @@ -1,232 +0,0 @@ -#ifndef __ALPHA_IRONGATE__H__ -#define __ALPHA_IRONGATE__H__ - -#include -#include - -/* - * IRONGATE is the internal name for the AMD-751 K7 core logic chipset - * which provides memory controller and PCI access for NAUTILUS-based - * EV6 (21264) systems. - * - * This file is based on: - * - * IronGate management library, (c) 1999 Alpha Processor, Inc. - * Copyright (C) 1999 Alpha Processor, Inc., - * (David Daniel, Stig Telfer, Soohoon Lee) - */ - -/* - * The 21264 supports, and internally recognizes, a 44-bit physical - * address space that is divided equally between memory address space - * and I/O address space. Memory address space resides in the lower - * half of the physical address space (PA[43]=0) and I/O address space - * resides in the upper half of the physical address space (PA[43]=1). - */ - -/* - * Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access - * through the routines given is 32-bit. - * - * The first 0x40 bytes are standard as per the PCI spec. - */ - -typedef volatile __u32 igcsr32; - -typedef struct { - igcsr32 dev_vendor; /* 0x00 - device ID, vendor ID */ - igcsr32 stat_cmd; /* 0x04 - status, command */ - igcsr32 class; /* 0x08 - class code, rev ID */ - igcsr32 latency; /* 0x0C - header type, PCI latency */ - igcsr32 bar0; /* 0x10 - BAR0 - AGP */ - igcsr32 bar1; /* 0x14 - BAR1 - GART */ - igcsr32 bar2; /* 0x18 - Power Management reg block */ - - igcsr32 rsrvd0[6]; /* 0x1C-0x33 reserved */ - - igcsr32 capptr; /* 0x34 - Capabilities pointer */ - - igcsr32 rsrvd1[2]; /* 0x38-0x3F reserved */ - - igcsr32 bacsr10; /* 0x40 - base address chip selects */ - igcsr32 bacsr32; /* 0x44 - base address chip selects */ - igcsr32 bacsr54_eccms761; /* 0x48 - 751: base addr. chip selects - 761: ECC, mode/status */ - - igcsr32 rsrvd2[1]; /* 0x4C-0x4F reserved */ - - igcsr32 drammap; /* 0x50 - address mapping control */ - igcsr32 dramtm; /* 0x54 - timing, driver strength */ - igcsr32 dramms; /* 0x58 - DRAM mode/status */ - - igcsr32 rsrvd3[1]; /* 0x5C-0x5F reserved */ - - igcsr32 biu0; /* 0x60 - bus interface unit */ - igcsr32 biusip; /* 0x64 - Serial initialisation pkt */ - - igcsr32 rsrvd4[2]; /* 0x68-0x6F reserved */ - - igcsr32 mro; /* 0x70 - memory request optimiser */ - - igcsr32 rsrvd5[3]; /* 0x74-0x7F reserved */ - - igcsr32 whami; /* 0x80 - who am I */ - igcsr32 pciarb; /* 0x84 - PCI arbitration control */ - igcsr32 pcicfg; /* 0x88 - PCI config status */ - - igcsr32 rsrvd6[4]; /* 0x8C-0x9B reserved */ - - igcsr32 pci_mem; /* 0x9C - PCI top of memory, - 761 only */ - - /* AGP (bus 1) control registers */ - igcsr32 agpcap; /* 0xA0 - AGP Capability Identifier */ - igcsr32 agpstat; /* 0xA4 - AGP status register */ - igcsr32 agpcmd; /* 0xA8 - AGP control register */ - igcsr32 agpva; /* 0xAC - AGP Virtual Address Space */ - igcsr32 agpmode; /* 0xB0 - AGP/GART mode control */ -} Irongate0; - - -typedef struct { - - igcsr32 dev_vendor; /* 0x00 - Device and Vendor IDs */ - igcsr32 stat_cmd; /* 0x04 - Status and Command regs */ - igcsr32 class; /* 0x08 - subclass, baseclass etc */ - igcsr32 htype; /* 0x0C - header type (at 0x0E) */ - igcsr32 rsrvd0[2]; /* 0x10-0x17 reserved */ - igcsr32 busnos; /* 0x18 - Primary, secondary bus nos */ - igcsr32 io_baselim_regs; /* 0x1C - IO base, IO lim, AGP status */ - igcsr32 mem_baselim; /* 0x20 - memory base, memory lim */ - igcsr32 pfmem_baselim; /* 0x24 - prefetchable base, lim */ - igcsr32 rsrvd1[2]; /* 0x28-0x2F reserved */ - igcsr32 io_baselim; /* 0x30 - IO base, IO limit */ - igcsr32 rsrvd2[2]; /* 0x34-0x3B - reserved */ - igcsr32 interrupt; /* 0x3C - interrupt, PCI bridge ctrl */ - -} Irongate1; - -extern igcsr32 *IronECC; - -/* - * Memory spaces: - */ - -/* Irongate is consistent with a subset of the Tsunami memory map */ -#ifdef USE_48_BIT_KSEG -#define IRONGATE_BIAS 0x80000000000UL -#else -#define IRONGATE_BIAS 0x10000000000UL -#endif - - -#define IRONGATE_MEM (IDENT_ADDR | IRONGATE_BIAS | 0x000000000UL) -#define IRONGATE_IACK_SC (IDENT_ADDR | IRONGATE_BIAS | 0x1F8000000UL) -#define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL) -#define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL) - -/* - * PCI Configuration space accesses are formed like so: - * - * 0x1FE << 24 | : 2 2 2 2 1 1 1 1 : 1 1 1 1 1 1 0 0 : 0 0 0 0 0 0 0 0 : - * : 3 2 1 0 9 8 7 6 : 5 4 3 2 1 0 9 8 : 7 6 5 4 3 2 1 0 : - * ---bus numer--- -device-- -fun- ---register---- - */ - -#define IGCSR(dev,fun,reg) ( IRONGATE_CONF | \ - ((dev)<<11) | \ - ((fun)<<8) | \ - (reg) ) - -#define IRONGATE0 ((Irongate0 *) IGCSR(0, 0, 0)) -#define IRONGATE1 ((Irongate1 *) IGCSR(1, 0, 0)) - -/* - * Data structure for handling IRONGATE machine checks: - * This is the standard OSF logout frame - */ - -#define SCB_Q_SYSERR 0x620 /* OSF definitions */ -#define SCB_Q_PROCERR 0x630 -#define SCB_Q_SYSMCHK 0x660 -#define SCB_Q_PROCMCHK 0x670 - -struct el_IRONGATE_sysdata_mcheck { - __u32 FrameSize; /* Bytes, including this field */ - __u32 FrameFlags; /* <31> = Retry, <30> = Second Error */ - __u32 CpuOffset; /* Offset to CPU-specific into */ - __u32 SystemOffset; /* Offset to system-specific info */ - __u32 MCHK_Code; - __u32 MCHK_Frame_Rev; - __u64 I_STAT; - __u64 DC_STAT; - __u64 C_ADDR; - __u64 DC1_SYNDROME; - __u64 DC0_SYNDROME; - __u64 C_STAT; - __u64 C_STS; - __u64 RESERVED0; - __u64 EXC_ADDR; - __u64 IER_CM; - __u64 ISUM; - __u64 MM_STAT; - __u64 PAL_BASE; - __u64 I_CTL; - __u64 PCTX; -}; - - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions: - * - * IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and - * K7 can only use linear accesses to get at PCI memory and I/O spaces. - */ - -/* - * Memory functions. All accesses are done through linear space. - */ - -__EXTERN_INLINE void __iomem *irongate_ioportmap(unsigned long addr) -{ - return (void __iomem *)(addr + IRONGATE_IO); -} - -extern void __iomem *irongate_ioremap(unsigned long addr, unsigned long size); -extern void irongate_iounmap(volatile void __iomem *addr); - -__EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr) -{ - return addr >= IRONGATE_MEM; -} - -__EXTERN_INLINE int irongate_is_mmio(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long)xaddr; - return addr < IRONGATE_IO || addr >= IRONGATE_CONF; -} - -#undef __IO_PREFIX -#define __IO_PREFIX irongate -#define irongate_trivial_rw_bw 1 -#define irongate_trivial_rw_lq 1 -#define irongate_trivial_io_bw 1 -#define irongate_trivial_io_lq 1 -#define irongate_trivial_iounmap 0 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_IRONGATE__H__ */ diff --git a/include/asm-alpha/core_lca.h b/include/asm-alpha/core_lca.h deleted file mode 100644 index f7cb4b46095..00000000000 --- a/include/asm-alpha/core_lca.h +++ /dev/null @@ -1,361 +0,0 @@ -#ifndef __ALPHA_LCA__H__ -#define __ALPHA_LCA__H__ - -#include -#include - -/* - * Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068, - * for example). - * - * This file is based on: - * - * DECchip 21066 and DECchip 21068 Alpha AXP Microprocessors - * Hardware Reference Manual; Digital Equipment Corp.; May 1994; - * Maynard, MA; Order Number: EC-N2681-71. - */ - -/* - * NOTE: The LCA uses a Host Address Extension (HAE) register to access - * PCI addresses that are beyond the first 27 bits of address - * space. Updating the HAE requires an external cycle (and - * a memory barrier), which tends to be slow. Instead of updating - * it on each sparse memory access, we keep the current HAE value - * cached in variable cache_hae. Only if the cached HAE differs - * from the desired HAE value do we actually updated HAE register. - * The HAE register is preserved by the interrupt handler entry/exit - * code, so this scheme works even in the presence of interrupts. - * - * Dense memory space doesn't require the HAE, but is restricted to - * aligned 32 and 64 bit accesses. Special Cycle and Interrupt - * Acknowledge cycles may also require the use of the HAE. The LCA - * limits I/O address space to the bottom 24 bits of address space, - * but this easily covers the 16 bit ISA I/O address space. - */ - -/* - * NOTE 2! The memory operations do not set any memory barriers, as - * it's not needed for cases like a frame buffer that is essentially - * memory-like. You need to do them by hand if the operations depend - * on ordering. - * - * Similarly, the port I/O operations do a "mb" only after a write - * operation: if an mb is needed before (as in the case of doing - * memory mapped I/O first, and then a port I/O operation to the same - * device), it needs to be done by hand. - * - * After the above has bitten me 100 times, I'll give up and just do - * the mb all the time, but right now I'm hoping this will work out. - * Avoiding mb's may potentially be a noticeable speed improvement, - * but I can't honestly say I've tested it. - * - * Handling interrupts that need to do mb's to synchronize to - * non-interrupts is another fun race area. Don't do it (because if - * you do, I'll have to do *everything* with interrupts disabled, - * ugh). - */ - -/* - * Memory Controller registers: - */ -#define LCA_MEM_BCR0 (IDENT_ADDR + 0x120000000UL) -#define LCA_MEM_BCR1 (IDENT_ADDR + 0x120000008UL) -#define LCA_MEM_BCR2 (IDENT_ADDR + 0x120000010UL) -#define LCA_MEM_BCR3 (IDENT_ADDR + 0x120000018UL) -#define LCA_MEM_BMR0 (IDENT_ADDR + 0x120000020UL) -#define LCA_MEM_BMR1 (IDENT_ADDR + 0x120000028UL) -#define LCA_MEM_BMR2 (IDENT_ADDR + 0x120000030UL) -#define LCA_MEM_BMR3 (IDENT_ADDR + 0x120000038UL) -#define LCA_MEM_BTR0 (IDENT_ADDR + 0x120000040UL) -#define LCA_MEM_BTR1 (IDENT_ADDR + 0x120000048UL) -#define LCA_MEM_BTR2 (IDENT_ADDR + 0x120000050UL) -#define LCA_MEM_BTR3 (IDENT_ADDR + 0x120000058UL) -#define LCA_MEM_GTR (IDENT_ADDR + 0x120000060UL) -#define LCA_MEM_ESR (IDENT_ADDR + 0x120000068UL) -#define LCA_MEM_EAR (IDENT_ADDR + 0x120000070UL) -#define LCA_MEM_CAR (IDENT_ADDR + 0x120000078UL) -#define LCA_MEM_VGR (IDENT_ADDR + 0x120000080UL) -#define LCA_MEM_PLM (IDENT_ADDR + 0x120000088UL) -#define LCA_MEM_FOR (IDENT_ADDR + 0x120000090UL) - -/* - * I/O Controller registers: - */ -#define LCA_IOC_HAE (IDENT_ADDR + 0x180000000UL) -#define LCA_IOC_CONF (IDENT_ADDR + 0x180000020UL) -#define LCA_IOC_STAT0 (IDENT_ADDR + 0x180000040UL) -#define LCA_IOC_STAT1 (IDENT_ADDR + 0x180000060UL) -#define LCA_IOC_TBIA (IDENT_ADDR + 0x180000080UL) -#define LCA_IOC_TB_ENA (IDENT_ADDR + 0x1800000a0UL) -#define LCA_IOC_SFT_RST (IDENT_ADDR + 0x1800000c0UL) -#define LCA_IOC_PAR_DIS (IDENT_ADDR + 0x1800000e0UL) -#define LCA_IOC_W_BASE0 (IDENT_ADDR + 0x180000100UL) -#define LCA_IOC_W_BASE1 (IDENT_ADDR + 0x180000120UL) -#define LCA_IOC_W_MASK0 (IDENT_ADDR + 0x180000140UL) -#define LCA_IOC_W_MASK1 (IDENT_ADDR + 0x180000160UL) -#define LCA_IOC_T_BASE0 (IDENT_ADDR + 0x180000180UL) -#define LCA_IOC_T_BASE1 (IDENT_ADDR + 0x1800001a0UL) -#define LCA_IOC_TB_TAG0 (IDENT_ADDR + 0x188000000UL) -#define LCA_IOC_TB_TAG1 (IDENT_ADDR + 0x188000020UL) -#define LCA_IOC_TB_TAG2 (IDENT_ADDR + 0x188000040UL) -#define LCA_IOC_TB_TAG3 (IDENT_ADDR + 0x188000060UL) -#define LCA_IOC_TB_TAG4 (IDENT_ADDR + 0x188000070UL) -#define LCA_IOC_TB_TAG5 (IDENT_ADDR + 0x1880000a0UL) -#define LCA_IOC_TB_TAG6 (IDENT_ADDR + 0x1880000c0UL) -#define LCA_IOC_TB_TAG7 (IDENT_ADDR + 0x1880000e0UL) - -/* - * Memory spaces: - */ -#define LCA_IACK_SC (IDENT_ADDR + 0x1a0000000UL) -#define LCA_CONF (IDENT_ADDR + 0x1e0000000UL) -#define LCA_IO (IDENT_ADDR + 0x1c0000000UL) -#define LCA_SPARSE_MEM (IDENT_ADDR + 0x200000000UL) -#define LCA_DENSE_MEM (IDENT_ADDR + 0x300000000UL) - -/* - * Bit definitions for I/O Controller status register 0: - */ -#define LCA_IOC_STAT0_CMD 0xf -#define LCA_IOC_STAT0_ERR (1<<4) -#define LCA_IOC_STAT0_LOST (1<<5) -#define LCA_IOC_STAT0_THIT (1<<6) -#define LCA_IOC_STAT0_TREF (1<<7) -#define LCA_IOC_STAT0_CODE_SHIFT 8 -#define LCA_IOC_STAT0_CODE_MASK 0x7 -#define LCA_IOC_STAT0_P_NBR_SHIFT 13 -#define LCA_IOC_STAT0_P_NBR_MASK 0x7ffff - -#define LCA_HAE_ADDRESS LCA_IOC_HAE - -/* LCA PMR Power Management register defines */ -#define LCA_PMR_ADDR (IDENT_ADDR + 0x120000098UL) -#define LCA_PMR_PDIV 0x7 /* Primary clock divisor */ -#define LCA_PMR_ODIV 0x38 /* Override clock divisor */ -#define LCA_PMR_INTO 0x40 /* Interrupt override */ -#define LCA_PMR_DMAO 0x80 /* DMA override */ -#define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even bits */ -#define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even bits */ -#define LCA_PMR_PRIMARY_MASK 0xfffffffffffffff8L - -/* LCA PMR Macros */ - -#define LCA_READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR) -#define LCA_WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d)) - -#define LCA_GET_PRIMARY(r) ((r) & LCA_PMR_PDIV) -#define LCA_GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV) -#define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c))) - -/* LCA PMR Divisor values */ -#define LCA_PMR_DIV_1 0x0 -#define LCA_PMR_DIV_1_5 0x1 -#define LCA_PMR_DIV_2 0x2 -#define LCA_PMR_DIV_4 0x3 -#define LCA_PMR_DIV_8 0x4 -#define LCA_PMR_DIV_16 0x5 -#define LCA_PMR_DIV_MIN DIV_1 -#define LCA_PMR_DIV_MAX DIV_16 - - -/* - * Data structure for handling LCA machine checks. Correctable errors - * result in a short logout frame, uncorrectable ones in a long one. - */ -struct el_lca_mcheck_short { - struct el_common h; /* common logout header */ - unsigned long esr; /* error-status register */ - unsigned long ear; /* error-address register */ - unsigned long dc_stat; /* dcache status register */ - unsigned long ioc_stat0; /* I/O controller status register 0 */ - unsigned long ioc_stat1; /* I/O controller status register 1 */ -}; - -struct el_lca_mcheck_long { - struct el_common h; /* common logout header */ - unsigned long pt[31]; /* PAL temps */ - unsigned long exc_addr; /* exception address */ - unsigned long pad1[3]; - unsigned long pal_base; /* PALcode base address */ - unsigned long hier; /* hw interrupt enable */ - unsigned long hirr; /* hw interrupt request */ - unsigned long mm_csr; /* MMU control & status */ - unsigned long dc_stat; /* data cache status */ - unsigned long dc_addr; /* data cache addr register */ - unsigned long abox_ctl; /* address box control register */ - unsigned long esr; /* error status register */ - unsigned long ear; /* error address register */ - unsigned long car; /* cache control register */ - unsigned long ioc_stat0; /* I/O controller status register 0 */ - unsigned long ioc_stat1; /* I/O controller status register 1 */ - unsigned long va; /* virtual address register */ -}; - -union el_lca { - struct el_common * c; - struct el_lca_mcheck_long * l; - struct el_lca_mcheck_short * s; -}; - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions: - * - * Unlike Jensen, the Noname machines have no concept of local - * I/O---everything goes over the PCI bus. - * - * There is plenty room for optimization here. In particular, - * the Alpha's insb/insw/extb/extw should be useful in moving - * data to/from the right byte-lanes. - */ - -#define vip volatile int __force * -#define vuip volatile unsigned int __force * -#define vulp volatile unsigned long __force * - -#define LCA_SET_HAE \ - do { \ - if (addr >= (1UL << 24)) { \ - unsigned long msb = addr & 0xf8000000; \ - addr -= msb; \ - set_hae(msb); \ - } \ - } while (0) - - -__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long result, base_and_type; - - if (addr >= LCA_DENSE_MEM) { - addr -= LCA_DENSE_MEM; - LCA_SET_HAE; - base_and_type = LCA_SPARSE_MEM + 0x00; - } else { - addr -= LCA_IO; - base_and_type = LCA_IO + 0x00; - } - - result = *(vip) ((addr << 5) + base_and_type); - return __kernel_extbl(result, addr & 3); -} - -__EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long w, base_and_type; - - if (addr >= LCA_DENSE_MEM) { - addr -= LCA_DENSE_MEM; - LCA_SET_HAE; - base_and_type = LCA_SPARSE_MEM + 0x00; - } else { - addr -= LCA_IO; - base_and_type = LCA_IO + 0x00; - } - - w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + base_and_type) = w; -} - -__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long result, base_and_type; - - if (addr >= LCA_DENSE_MEM) { - addr -= LCA_DENSE_MEM; - LCA_SET_HAE; - base_and_type = LCA_SPARSE_MEM + 0x08; - } else { - addr -= LCA_IO; - base_and_type = LCA_IO + 0x08; - } - - result = *(vip) ((addr << 5) + base_and_type); - return __kernel_extwl(result, addr & 3); -} - -__EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long w, base_and_type; - - if (addr >= LCA_DENSE_MEM) { - addr -= LCA_DENSE_MEM; - LCA_SET_HAE; - base_and_type = LCA_SPARSE_MEM + 0x08; - } else { - addr -= LCA_IO; - base_and_type = LCA_IO + 0x08; - } - - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + base_and_type) = w; -} - -__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - if (addr < LCA_DENSE_MEM) - addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; - return *(vuip)addr; -} - -__EXTERN_INLINE void lca_iowrite32(u32 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - if (addr < LCA_DENSE_MEM) - addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; - *(vuip)addr = b; -} - -__EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr) -{ - return (void __iomem *)(addr + LCA_IO); -} - -__EXTERN_INLINE void __iomem *lca_ioremap(unsigned long addr, - unsigned long size) -{ - return (void __iomem *)(addr + LCA_DENSE_MEM); -} - -__EXTERN_INLINE int lca_is_ioaddr(unsigned long addr) -{ - return addr >= IDENT_ADDR + 0x120000000UL; -} - -__EXTERN_INLINE int lca_is_mmio(const volatile void __iomem *addr) -{ - return (unsigned long)addr >= LCA_DENSE_MEM; -} - -#undef vip -#undef vuip -#undef vulp - -#undef __IO_PREFIX -#define __IO_PREFIX lca -#define lca_trivial_rw_bw 2 -#define lca_trivial_rw_lq 1 -#define lca_trivial_io_bw 0 -#define lca_trivial_io_lq 0 -#define lca_trivial_iounmap 1 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_LCA__H__ */ diff --git a/include/asm-alpha/core_marvel.h b/include/asm-alpha/core_marvel.h deleted file mode 100644 index 30d55fe7aaf..00000000000 --- a/include/asm-alpha/core_marvel.h +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Marvel systems use the IO7 I/O chip provides PCI/PCIX/AGP access - * - * This file is based on: - * - * Marvel / EV7 System Programmer's Manual - * Revision 1.00 - * 14 May 2001 - */ - -#ifndef __ALPHA_MARVEL__H__ -#define __ALPHA_MARVEL__H__ - -#include -#include -#include - -#include - -#define MARVEL_MAX_PIDS 32 /* as long as we rely on 43-bit superpage */ -#define MARVEL_IRQ_VEC_PE_SHIFT (10) -#define MARVEL_IRQ_VEC_IRQ_MASK ((1 << MARVEL_IRQ_VEC_PE_SHIFT) - 1) -#define MARVEL_NR_IRQS \ - (16 + (MARVEL_MAX_PIDS * (1 << MARVEL_IRQ_VEC_PE_SHIFT))) - -/* - * EV7 RBOX Registers - */ -typedef struct { - volatile unsigned long csr __attribute__((aligned(16))); -} ev7_csr; - -typedef struct { - ev7_csr RBOX_CFG; /* 0x0000 */ - ev7_csr RBOX_NSVC; - ev7_csr RBOX_EWVC; - ev7_csr RBOX_WHAMI; - ev7_csr RBOX_TCTL; /* 0x0040 */ - ev7_csr RBOX_INT; - ev7_csr RBOX_IMASK; - ev7_csr RBOX_IREQ; - ev7_csr RBOX_INTQ; /* 0x0080 */ - ev7_csr RBOX_INTA; - ev7_csr RBOX_IT; - ev7_csr RBOX_SCRATCH1; - ev7_csr RBOX_SCRATCH2; /* 0x00c0 */ - ev7_csr RBOX_L_ERR; -} ev7_csrs; - -/* - * EV7 CSR addressing macros - */ -#define EV7_MASK40(addr) ((addr) & ((1UL << 41) - 1)) -#define EV7_KERN_ADDR(addr) ((void *)(IDENT_ADDR | EV7_MASK40(addr))) - -#define EV7_PE_MASK 0x1ffUL /* 9 bits ( 256 + mem/io ) */ -#define EV7_IPE(pe) ((~((long)(pe)) & EV7_PE_MASK) << 35) - -#define EV7_CSR_PHYS(pe, off) (EV7_IPE(pe) | (0x7FFCUL << 20) | (off)) -#define EV7_CSRS_PHYS(pe) (EV7_CSR_PHYS(pe, 0UL)) - -#define EV7_CSR_KERN(pe, off) (EV7_KERN_ADDR(EV7_CSR_PHYS(pe, off))) -#define EV7_CSRS_KERN(pe) (EV7_KERN_ADDR(EV7_CSRS_PHYS(pe))) - -#define EV7_CSR_OFFSET(name) ((unsigned long)&((ev7_csrs *)NULL)->name.csr) - -/* - * IO7 registers - */ -typedef struct { - volatile unsigned long csr __attribute__((aligned(64))); -} io7_csr; - -typedef struct { - /* I/O Port Control Registers */ - io7_csr POx_CTRL; /* 0x0000 */ - io7_csr POx_CACHE_CTL; - io7_csr POx_TIMER; - io7_csr POx_IO_ADR_EXT; - io7_csr POx_MEM_ADR_EXT; /* 0x0100 */ - io7_csr POx_XCAL_CTRL; - io7_csr rsvd1[2]; /* ?? spec doesn't show 0x180 */ - io7_csr POx_DM_SOURCE; /* 0x0200 */ - io7_csr POx_DM_DEST; - io7_csr POx_DM_SIZE; - io7_csr POx_DM_CTRL; - io7_csr rsvd2[4]; /* 0x0300 */ - - /* AGP Control Registers -- port 3 only */ - io7_csr AGP_CAP_ID; /* 0x0400 */ - io7_csr AGP_STAT; - io7_csr AGP_CMD; - io7_csr rsvd3; - - /* I/O Port Monitor Registers */ - io7_csr POx_MONCTL; /* 0x0500 */ - io7_csr POx_CTRA; - io7_csr POx_CTRB; - io7_csr POx_CTR56; - io7_csr POx_SCRATCH; /* 0x0600 */ - io7_csr POx_XTRA_A; - io7_csr POx_XTRA_TS; - io7_csr POx_XTRA_Z; - io7_csr rsvd4; /* 0x0700 */ - io7_csr POx_THRESHA; - io7_csr POx_THRESHB; - io7_csr rsvd5[33]; - - /* System Address Space Window Control Registers */ - - io7_csr POx_WBASE[4]; /* 0x1000 */ - io7_csr POx_WMASK[4]; - io7_csr POx_TBASE[4]; - io7_csr POx_SG_TBIA; - io7_csr POx_MSI_WBASE; - io7_csr rsvd6[50]; - - /* I/O Port Error Registers */ - io7_csr POx_ERR_SUM; - io7_csr POx_FIRST_ERR; - io7_csr POx_MSK_HEI; - io7_csr POx_TLB_ERR; - io7_csr POx_SPL_COMPLT; - io7_csr POx_TRANS_SUM; - io7_csr POx_FRC_PCI_ERR; - io7_csr POx_MULT_ERR; - io7_csr rsvd7[8]; - - /* I/O Port End of Interrupt Registers */ - io7_csr EOI_DAT; - io7_csr rsvd8[7]; - io7_csr POx_IACK_SPECIAL; - io7_csr rsvd9[103]; -} io7_ioport_csrs; - -typedef struct { - io7_csr IO_ASIC_REV; /* 0x30.0000 */ - io7_csr IO_SYS_REV; - io7_csr SER_CHAIN3; - io7_csr PO7_RST1; - io7_csr PO7_RST2; /* 0x30.0100 */ - io7_csr POx_RST[4]; - io7_csr IO7_DWNH; - io7_csr IO7_MAF; - io7_csr IO7_MAF_TO; - io7_csr IO7_ACC_CLUMP; /* 0x30.0300 */ - io7_csr IO7_PMASK; - io7_csr IO7_IOMASK; - io7_csr IO7_UPH; - io7_csr IO7_UPH_TO; /* 0x30.0400 */ - io7_csr RBX_IREQ_OFF; - io7_csr RBX_INTA_OFF; - io7_csr INT_RTY; - io7_csr PO7_MONCTL; /* 0x30.0500 */ - io7_csr PO7_CTRA; - io7_csr PO7_CTRB; - io7_csr PO7_CTR56; - io7_csr PO7_SCRATCH; /* 0x30.0600 */ - io7_csr PO7_XTRA_A; - io7_csr PO7_XTRA_TS; - io7_csr PO7_XTRA_Z; - io7_csr PO7_PMASK; /* 0x30.0700 */ - io7_csr PO7_THRESHA; - io7_csr PO7_THRESHB; - io7_csr rsvd1[97]; - io7_csr PO7_ERROR_SUM; /* 0x30.2000 */ - io7_csr PO7_BHOLE_MASK; - io7_csr PO7_HEI_MSK; - io7_csr PO7_CRD_MSK; - io7_csr PO7_UNCRR_SYM; /* 0x30.2100 */ - io7_csr PO7_CRRCT_SYM; - io7_csr PO7_ERR_PKT[2]; - io7_csr PO7_UGBGE_SYM; /* 0x30.2200 */ - io7_csr rsbv2[887]; - io7_csr PO7_LSI_CTL[128]; /* 0x31.0000 */ - io7_csr rsvd3[123]; - io7_csr HLT_CTL; /* 0x31.3ec0 */ - io7_csr HPI_CTL; /* 0x31.3f00 */ - io7_csr CRD_CTL; - io7_csr STV_CTL; - io7_csr HEI_CTL; - io7_csr PO7_MSI_CTL[16]; /* 0x31.4000 */ - io7_csr rsvd4[240]; - - /* - * Interrupt Diagnostic / Test - */ - struct { - io7_csr INT_PND; - io7_csr INT_CLR; - io7_csr INT_EOI; - io7_csr rsvd[29]; - } INT_DIAG[4]; - io7_csr rsvd5[125]; /* 0x31.a000 */ - io7_csr MISC_PND; /* 0x31.b800 */ - io7_csr rsvd6[31]; - io7_csr MSI_PND[16]; /* 0x31.c000 */ - io7_csr rsvd7[16]; - io7_csr MSI_CLR[16]; /* 0x31.c800 */ -} io7_port7_csrs; - -/* - * IO7 DMA Window Base register (POx_WBASEx) - */ -#define wbase_m_ena 0x1 -#define wbase_m_sg 0x2 -#define wbase_m_dac 0x4 -#define wbase_m_addr 0xFFF00000 -union IO7_POx_WBASE { - struct { - unsigned ena : 1; /* <0> */ - unsigned sg : 1; /* <1> */ - unsigned dac : 1; /* <2> -- window 3 only */ - unsigned rsvd1 : 17; - unsigned addr : 12; /* <31:20> */ - unsigned rsvd2 : 32; - } bits; - unsigned as_long[2]; - unsigned as_quad; -}; - -/* - * IO7 IID (Interrupt IDentifier) format - * - * For level-sensative interrupts, int_num is encoded as: - * - * bus/port slot/device INTx - * <7:5> <4:2> <1:0> - */ -union IO7_IID { - struct { - unsigned int_num : 9; /* <8:0> */ - unsigned tpu_mask : 4; /* <12:9> rsvd */ - unsigned msi : 1; /* 13 */ - unsigned ipe : 10; /* <23:14> */ - unsigned long rsvd : 40; - } bits; - unsigned int as_long[2]; - unsigned long as_quad; -}; - -/* - * IO7 addressing macros - */ -#define IO7_KERN_ADDR(addr) (EV7_KERN_ADDR(addr)) - -#define IO7_PORT_MASK 0x07UL /* 3 bits of port */ - -#define IO7_IPE(pe) (EV7_IPE(pe)) -#define IO7_IPORT(port) ((~((long)(port)) & IO7_PORT_MASK) << 32) - -#define IO7_HOSE(pe, port) (IO7_IPE(pe) | IO7_IPORT(port)) - -#define IO7_MEM_PHYS(pe, port) (IO7_HOSE(pe, port) | 0x00000000UL) -#define IO7_CONF_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFE000000UL) -#define IO7_IO_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFF000000UL) -#define IO7_CSR_PHYS(pe, port, off) \ - (IO7_HOSE(pe, port) | 0xFF800000UL | (off)) -#define IO7_CSRS_PHYS(pe, port) (IO7_CSR_PHYS(pe, port, 0UL)) -#define IO7_PORT7_CSRS_PHYS(pe) (IO7_CSR_PHYS(pe, 7, 0x300000UL)) - -#define IO7_MEM_KERN(pe, port) (IO7_KERN_ADDR(IO7_MEM_PHYS(pe, port))) -#define IO7_CONF_KERN(pe, port) (IO7_KERN_ADDR(IO7_CONF_PHYS(pe, port))) -#define IO7_IO_KERN(pe, port) (IO7_KERN_ADDR(IO7_IO_PHYS(pe, port))) -#define IO7_CSR_KERN(pe, port, off) (IO7_KERN_ADDR(IO7_CSR_PHYS(pe,port,off))) -#define IO7_CSRS_KERN(pe, port) (IO7_KERN_ADDR(IO7_CSRS_PHYS(pe, port))) -#define IO7_PORT7_CSRS_KERN(pe) (IO7_KERN_ADDR(IO7_PORT7_CSRS_PHYS(pe))) - -#define IO7_PLL_RNGA(pll) (((pll) >> 3) & 0x7) -#define IO7_PLL_RNGB(pll) (((pll) >> 6) & 0x7) - -#define IO7_MEM_SPACE (2UL * 1024 * 1024 * 1024) /* 2GB MEM */ -#define IO7_IO_SPACE (8UL * 1024 * 1024) /* 8MB I/O */ - - -/* - * Offset between ram physical addresses and pci64 DAC addresses - */ -#define IO7_DAC_OFFSET (1UL << 49) - -/* - * This is needed to satisify the IO() macro used in initializing the machvec - */ -#define MARVEL_IACK_SC \ - ((unsigned long) \ - (&(((io7_ioport_csrs *)IO7_CSRS_KERN(0, 0))->POx_IACK_SPECIAL))) - -#ifdef __KERNEL__ - -/* - * IO7 structs - */ -#define IO7_NUM_PORTS 4 -#define IO7_AGP_PORT 3 - -struct io7_port { - struct io7 *io7; - struct pci_controller *hose; - - int enabled; - unsigned int port; - io7_ioport_csrs *csrs; - - unsigned long saved_wbase[4]; - unsigned long saved_wmask[4]; - unsigned long saved_tbase[4]; -}; - -struct io7 { - struct io7 *next; - - unsigned int pe; - io7_port7_csrs *csrs; - struct io7_port ports[IO7_NUM_PORTS]; - - spinlock_t irq_lock; -}; - -#ifndef __EXTERN_INLINE -# define __EXTERN_INLINE extern inline -# define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions. All access through linear space. - */ - -/* - * Memory functions. All accesses through linear space. - */ - -#define vucp volatile unsigned char __force * -#define vusp volatile unsigned short __force * - -extern unsigned int marvel_ioread8(void __iomem *); -extern void marvel_iowrite8(u8 b, void __iomem *); - -__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr) -{ - return __kernel_ldwu(*(vusp)addr); -} - -__EXTERN_INLINE void marvel_iowrite16(u16 b, void __iomem *addr) -{ - __kernel_stw(b, *(vusp)addr); -} - -extern void __iomem *marvel_ioremap(unsigned long addr, unsigned long size); -extern void marvel_iounmap(volatile void __iomem *addr); -extern void __iomem *marvel_ioportmap (unsigned long addr); - -__EXTERN_INLINE int marvel_is_ioaddr(unsigned long addr) -{ - return (addr >> 40) & 1; -} - -extern int marvel_is_mmio(const volatile void __iomem *); - -#undef vucp -#undef vusp - -#undef __IO_PREFIX -#define __IO_PREFIX marvel -#define marvel_trivial_rw_bw 1 -#define marvel_trivial_rw_lq 1 -#define marvel_trivial_io_bw 0 -#define marvel_trivial_io_lq 1 -#define marvel_trivial_iounmap 0 -#include - -#ifdef __IO_EXTERN_INLINE -# undef __EXTERN_INLINE -# undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_MARVEL__H__ */ diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h deleted file mode 100644 index acf55b48347..00000000000 --- a/include/asm-alpha/core_mcpcia.h +++ /dev/null @@ -1,381 +0,0 @@ -#ifndef __ALPHA_MCPCIA__H__ -#define __ALPHA_MCPCIA__H__ - -/* Define to experiment with fitting everything into one 128MB HAE window. - One window per bus, that is. */ -#define MCPCIA_ONE_HAE_WINDOW 1 - -#include -#include -#include - -/* - * MCPCIA is the internal name for a core logic chipset which provides - * PCI access for the RAWHIDE family of systems. - * - * This file is based on: - * - * RAWHIDE System Programmer's Manual - * 16-May-96 - * Rev. 1.4 - * - */ - -/*------------------------------------------------------------------------** -** ** -** I/O procedures ** -** ** -** inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers ** -** inportbxt: 8 bits only ** -** inport: alias of inportw ** -** outport: alias of outportw ** -** ** -** inmem[b|w|t|l], outmem[b|w|t|l] 8:16:24:32 ISA memory xfers ** -** inmembxt: 8 bits only ** -** inmem: alias of inmemw ** -** outmem: alias of outmemw ** -** ** -**------------------------------------------------------------------------*/ - - -/* MCPCIA ADDRESS BIT DEFINITIONS - * - * 3333 3333 3322 2222 2222 1111 1111 11 - * 9876 5432 1098 7654 3210 9876 5432 1098 7654 3210 - * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- - * 1 000 - * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- - * | |\| - * | Byte Enable --+ | - * | Transfer Length --+ - * +-- IO space, not cached - * - * Byte Transfer - * Enable Length Transfer Byte Address - * adr<6:5> adr<4:3> Length Enable Adder - * --------------------------------------------- - * 00 00 Byte 1110 0x000 - * 01 00 Byte 1101 0x020 - * 10 00 Byte 1011 0x040 - * 11 00 Byte 0111 0x060 - * - * 00 01 Word 1100 0x008 - * 01 01 Word 1001 0x028 <= Not supported in this code. - * 10 01 Word 0011 0x048 - * - * 00 10 Tribyte 1000 0x010 - * 01 10 Tribyte 0001 0x030 - * - * 10 11 Longword 0000 0x058 - * - * Note that byte enables are asserted low. - * - */ - -#define MCPCIA_MAX_HOSES 4 - -#define MCPCIA_MID(m) ((unsigned long)(m) << 33) - -/* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively. - Durango adds PCI2 and PCI3 at MID 6 and 7 respectively. */ -#define MCPCIA_HOSE2MID(h) ((h) + 4) - -#define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */ - -/* - * Memory spaces: - */ -#define MCPCIA_SPARSE(m) (IDENT_ADDR + 0xf000000000UL + MCPCIA_MID(m)) -#define MCPCIA_DENSE(m) (IDENT_ADDR + 0xf100000000UL + MCPCIA_MID(m)) -#define MCPCIA_IO(m) (IDENT_ADDR + 0xf180000000UL + MCPCIA_MID(m)) -#define MCPCIA_CONF(m) (IDENT_ADDR + 0xf1c0000000UL + MCPCIA_MID(m)) -#define MCPCIA_CSR(m) (IDENT_ADDR + 0xf1e0000000UL + MCPCIA_MID(m)) -#define MCPCIA_IO_IACK(m) (IDENT_ADDR + 0xf1f0000000UL + MCPCIA_MID(m)) -#define MCPCIA_DENSE_IO(m) (IDENT_ADDR + 0xe1fc000000UL + MCPCIA_MID(m)) -#define MCPCIA_DENSE_CONF(m) (IDENT_ADDR + 0xe1fe000000UL + MCPCIA_MID(m)) - -/* - * General Registers - */ -#define MCPCIA_REV(m) (MCPCIA_CSR(m) + 0x000) -#define MCPCIA_WHOAMI(m) (MCPCIA_CSR(m) + 0x040) -#define MCPCIA_PCI_LAT(m) (MCPCIA_CSR(m) + 0x080) -#define MCPCIA_CAP_CTRL(m) (MCPCIA_CSR(m) + 0x100) -#define MCPCIA_HAE_MEM(m) (MCPCIA_CSR(m) + 0x400) -#define MCPCIA_HAE_IO(m) (MCPCIA_CSR(m) + 0x440) -#define _MCPCIA_IACK_SC(m) (MCPCIA_CSR(m) + 0x480) -#define MCPCIA_HAE_DENSE(m) (MCPCIA_CSR(m) + 0x4C0) - -/* - * Interrupt Control registers - */ -#define MCPCIA_INT_CTL(m) (MCPCIA_CSR(m) + 0x500) -#define MCPCIA_INT_REQ(m) (MCPCIA_CSR(m) + 0x540) -#define MCPCIA_INT_TARG(m) (MCPCIA_CSR(m) + 0x580) -#define MCPCIA_INT_ADR(m) (MCPCIA_CSR(m) + 0x5C0) -#define MCPCIA_INT_ADR_EXT(m) (MCPCIA_CSR(m) + 0x600) -#define MCPCIA_INT_MASK0(m) (MCPCIA_CSR(m) + 0x640) -#define MCPCIA_INT_MASK1(m) (MCPCIA_CSR(m) + 0x680) -#define MCPCIA_INT_ACK0(m) (MCPCIA_CSR(m) + 0x10003f00) -#define MCPCIA_INT_ACK1(m) (MCPCIA_CSR(m) + 0x10003f40) - -/* - * Performance Monitor registers - */ -#define MCPCIA_PERF_MON(m) (MCPCIA_CSR(m) + 0x300) -#define MCPCIA_PERF_CONT(m) (MCPCIA_CSR(m) + 0x340) - -/* - * Diagnostic Registers - */ -#define MCPCIA_CAP_DIAG(m) (MCPCIA_CSR(m) + 0x700) -#define MCPCIA_TOP_OF_MEM(m) (MCPCIA_CSR(m) + 0x7C0) - -/* - * Error registers - */ -#define MCPCIA_MC_ERR0(m) (MCPCIA_CSR(m) + 0x800) -#define MCPCIA_MC_ERR1(m) (MCPCIA_CSR(m) + 0x840) -#define MCPCIA_CAP_ERR(m) (MCPCIA_CSR(m) + 0x880) -#define MCPCIA_PCI_ERR1(m) (MCPCIA_CSR(m) + 0x1040) -#define MCPCIA_MDPA_STAT(m) (MCPCIA_CSR(m) + 0x4000) -#define MCPCIA_MDPA_SYN(m) (MCPCIA_CSR(m) + 0x4040) -#define MCPCIA_MDPA_DIAG(m) (MCPCIA_CSR(m) + 0x4080) -#define MCPCIA_MDPB_STAT(m) (MCPCIA_CSR(m) + 0x8000) -#define MCPCIA_MDPB_SYN(m) (MCPCIA_CSR(m) + 0x8040) -#define MCPCIA_MDPB_DIAG(m) (MCPCIA_CSR(m) + 0x8080) - -/* - * PCI Address Translation Registers. - */ -#define MCPCIA_SG_TBIA(m) (MCPCIA_CSR(m) + 0x1300) -#define MCPCIA_HBASE(m) (MCPCIA_CSR(m) + 0x1340) - -#define MCPCIA_W0_BASE(m) (MCPCIA_CSR(m) + 0x1400) -#define MCPCIA_W0_MASK(m) (MCPCIA_CSR(m) + 0x1440) -#define MCPCIA_T0_BASE(m) (MCPCIA_CSR(m) + 0x1480) - -#define MCPCIA_W1_BASE(m) (MCPCIA_CSR(m) + 0x1500) -#define MCPCIA_W1_MASK(m) (MCPCIA_CSR(m) + 0x1540) -#define MCPCIA_T1_BASE(m) (MCPCIA_CSR(m) + 0x1580) - -#define MCPCIA_W2_BASE(m) (MCPCIA_CSR(m) + 0x1600) -#define MCPCIA_W2_MASK(m) (MCPCIA_CSR(m) + 0x1640) -#define MCPCIA_T2_BASE(m) (MCPCIA_CSR(m) + 0x1680) - -#define MCPCIA_W3_BASE(m) (MCPCIA_CSR(m) + 0x1700) -#define MCPCIA_W3_MASK(m) (MCPCIA_CSR(m) + 0x1740) -#define MCPCIA_T3_BASE(m) (MCPCIA_CSR(m) + 0x1780) - -/* Hack! Only words for bus 0. */ - -#ifndef MCPCIA_ONE_HAE_WINDOW -#define MCPCIA_HAE_ADDRESS MCPCIA_HAE_MEM(4) -#endif -#define MCPCIA_IACK_SC _MCPCIA_IACK_SC(4) - -/* - * The canonical non-remaped I/O and MEM addresses have these values - * subtracted out. This is arranged so that folks manipulating ISA - * devices can use their familiar numbers and have them map to bus 0. - */ - -#define MCPCIA_IO_BIAS MCPCIA_IO(4) -#define MCPCIA_MEM_BIAS MCPCIA_DENSE(4) - -/* Offset between ram physical addresses and pci64 DAC bus addresses. */ -#define MCPCIA_DAC_OFFSET (1UL << 40) - -/* - * Data structure for handling MCPCIA machine checks: - */ -struct el_MCPCIA_uncorrected_frame_mcheck { - struct el_common header; - struct el_common_EV5_uncorrectable_mcheck procdata; -}; - - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions: - * - * MCPCIA, the RAWHIDE family PCI/memory support chipset for the EV5 (21164) - * and EV56 (21164a) processors, can use either a sparse address mapping - * scheme, or the so-called byte-word PCI address space, to get at PCI memory - * and I/O. - * - * Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE. - */ - -/* - * Memory functions. 64-bit and 32-bit accesses are done through - * dense memory space, everything else through sparse space. - * - * For reading and writing 8 and 16 bit quantities we need to - * go through one of the three sparse address mapping regions - * and use the HAE_MEM CSR to provide some bits of the address. - * The following few routines use only sparse address region 1 - * which gives 1Gbyte of accessible space which relates exactly - * to the amount of PCI memory mapping *into* system address space. - * See p 6-17 of the specification but it looks something like this: - * - * 21164 Address: - * - * 3 2 1 - * 9876543210987654321098765432109876543210 - * 1ZZZZ0.PCI.QW.Address............BBLL - * - * ZZ = SBZ - * BB = Byte offset - * LL = Transfer length - * - * PCI Address: - * - * 3 2 1 - * 10987654321098765432109876543210 - * HHH....PCI.QW.Address........ 00 - * - * HHH = 31:29 HAE_MEM CSR - * - */ - -#define vip volatile int __force * -#define vuip volatile unsigned int __force * - -#ifdef MCPCIA_ONE_HAE_WINDOW -#define MCPCIA_FROB_MMIO \ - if (__mcpcia_is_mmio(hose)) { \ - set_hae(hose & 0xffffffff); \ - hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \ - } -#else -#define MCPCIA_FROB_MMIO \ - if (__mcpcia_is_mmio(hose)) { \ - hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \ - } -#endif - -extern inline int __mcpcia_is_mmio(unsigned long addr) -{ - return (addr & 0x80000000UL) == 0; -} - -__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; - unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; - unsigned long result; - - MCPCIA_FROB_MMIO; - - result = *(vip) ((addr << 5) + hose + 0x00); - return __kernel_extbl(result, addr & 3); -} - -__EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; - unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; - unsigned long w; - - MCPCIA_FROB_MMIO; - - w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + hose + 0x00) = w; -} - -__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; - unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; - unsigned long result; - - MCPCIA_FROB_MMIO; - - result = *(vip) ((addr << 5) + hose + 0x08); - return __kernel_extwl(result, addr & 3); -} - -__EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; - unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; - unsigned long w; - - MCPCIA_FROB_MMIO; - - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + hose + 0x08) = w; -} - -__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr) -{ - unsigned long addr = (unsigned long)xaddr; - - if (!__mcpcia_is_mmio(addr)) - addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; - - return *(vuip)addr; -} - -__EXTERN_INLINE void mcpcia_iowrite32(u32 b, void __iomem *xaddr) -{ - unsigned long addr = (unsigned long)xaddr; - - if (!__mcpcia_is_mmio(addr)) - addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; - - *(vuip)addr = b; -} - - -__EXTERN_INLINE void __iomem *mcpcia_ioportmap(unsigned long addr) -{ - return (void __iomem *)(addr + MCPCIA_IO_BIAS); -} - -__EXTERN_INLINE void __iomem *mcpcia_ioremap(unsigned long addr, - unsigned long size) -{ - return (void __iomem *)(addr + MCPCIA_MEM_BIAS); -} - -__EXTERN_INLINE int mcpcia_is_ioaddr(unsigned long addr) -{ - return addr >= MCPCIA_SPARSE(0); -} - -__EXTERN_INLINE int mcpcia_is_mmio(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - return __mcpcia_is_mmio(addr); -} - -#undef MCPCIA_FROB_MMIO - -#undef vip -#undef vuip - -#undef __IO_PREFIX -#define __IO_PREFIX mcpcia -#define mcpcia_trivial_rw_bw 2 -#define mcpcia_trivial_rw_lq 1 -#define mcpcia_trivial_io_bw 0 -#define mcpcia_trivial_io_lq 0 -#define mcpcia_trivial_iounmap 1 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_MCPCIA__H__ */ diff --git a/include/asm-alpha/core_polaris.h b/include/asm-alpha/core_polaris.h deleted file mode 100644 index 2f966b64659..00000000000 --- a/include/asm-alpha/core_polaris.h +++ /dev/null @@ -1,110 +0,0 @@ -#ifndef __ALPHA_POLARIS__H__ -#define __ALPHA_POLARIS__H__ - -#include -#include - -/* - * POLARIS is the internal name for a core logic chipset which provides - * memory controller and PCI access for the 21164PC chip based systems. - * - * This file is based on: - * - * Polaris System Controller - * Device Functional Specification - * 22-Jan-98 - * Rev. 4.2 - * - */ - -/* Polaris memory regions */ -#define POLARIS_SPARSE_MEM_BASE (IDENT_ADDR + 0xf800000000UL) -#define POLARIS_DENSE_MEM_BASE (IDENT_ADDR + 0xf900000000UL) -#define POLARIS_SPARSE_IO_BASE (IDENT_ADDR + 0xf980000000UL) -#define POLARIS_SPARSE_CONFIG_BASE (IDENT_ADDR + 0xf9c0000000UL) -#define POLARIS_IACK_BASE (IDENT_ADDR + 0xf9f8000000UL) -#define POLARIS_DENSE_IO_BASE (IDENT_ADDR + 0xf9fc000000UL) -#define POLARIS_DENSE_CONFIG_BASE (IDENT_ADDR + 0xf9fe000000UL) - -#define POLARIS_IACK_SC POLARIS_IACK_BASE - -/* The Polaris command/status registers live in PCI Config space for - * bus 0/device 0. As such, they may be bytes, words, or doublewords. - */ -#define POLARIS_W_VENID (POLARIS_DENSE_CONFIG_BASE) -#define POLARIS_W_DEVID (POLARIS_DENSE_CONFIG_BASE+2) -#define POLARIS_W_CMD (POLARIS_DENSE_CONFIG_BASE+4) -#define POLARIS_W_STATUS (POLARIS_DENSE_CONFIG_BASE+6) - -/* - * Data structure for handling POLARIS machine checks: - */ -struct el_POLARIS_sysdata_mcheck { - u_long psc_status; - u_long psc_pcictl0; - u_long psc_pcictl1; - u_long psc_pcictl2; -}; - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions: - * - * POLARIS, the PCI/memory support chipset for the PCA56 (21164PC) - * processors, can use either a sparse address mapping scheme, or the - * so-called byte-word PCI address space, to get at PCI memory and I/O. - * - * However, we will support only the BWX form. - */ - -/* - * Memory functions. Polaris allows all accesses (byte/word - * as well as long/quad) to be done through dense space. - * - * We will only support DENSE access via BWX insns. - */ - -__EXTERN_INLINE void __iomem *polaris_ioportmap(unsigned long addr) -{ - return (void __iomem *)(addr + POLARIS_DENSE_IO_BASE); -} - -__EXTERN_INLINE void __iomem *polaris_ioremap(unsigned long addr, - unsigned long size) -{ - return (void __iomem *)(addr + POLARIS_DENSE_MEM_BASE); -} - -__EXTERN_INLINE int polaris_is_ioaddr(unsigned long addr) -{ - return addr >= POLARIS_SPARSE_MEM_BASE; -} - -__EXTERN_INLINE int polaris_is_mmio(const volatile void __iomem *addr) -{ - return (unsigned long)addr < POLARIS_SPARSE_IO_BASE; -} - -#undef __IO_PREFIX -#define __IO_PREFIX polaris -#define polaris_trivial_rw_bw 1 -#define polaris_trivial_rw_lq 1 -#define polaris_trivial_io_bw 1 -#define polaris_trivial_io_lq 1 -#define polaris_trivial_iounmap 1 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_POLARIS__H__ */ diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h deleted file mode 100644 index 46bfff58f67..00000000000 --- a/include/asm-alpha/core_t2.h +++ /dev/null @@ -1,633 +0,0 @@ -#ifndef __ALPHA_T2__H__ -#define __ALPHA_T2__H__ - -#include -#include -#include -#include - -/* - * T2 is the internal name for the core logic chipset which provides - * memory controller and PCI access for the SABLE-based systems. - * - * This file is based on: - * - * SABLE I/O Specification - * Revision/Update Information: 1.3 - * - * jestabro@amt.tay1.dec.com Initial Version. - * - */ - -#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */ - -/* GAMMA-SABLE is a SABLE with EV5-based CPUs */ -/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */ -#define _GAMMA_BIAS 0x8000000000UL - -#if defined(CONFIG_ALPHA_GENERIC) -#define GAMMA_BIAS alpha_mv.sys.t2.gamma_bias -#elif defined(CONFIG_ALPHA_GAMMA) -#define GAMMA_BIAS _GAMMA_BIAS -#else -#define GAMMA_BIAS 0 -#endif - -/* - * Memory spaces: - */ -#define T2_CONF (IDENT_ADDR + GAMMA_BIAS + 0x390000000UL) -#define T2_IO (IDENT_ADDR + GAMMA_BIAS + 0x3a0000000UL) -#define T2_SPARSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x200000000UL) -#define T2_DENSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x3c0000000UL) - -#define T2_IOCSR (IDENT_ADDR + GAMMA_BIAS + 0x38e000000UL) -#define T2_CERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000020UL) -#define T2_CERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000040UL) -#define T2_CERR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000060UL) -#define T2_PERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000080UL) -#define T2_PERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000a0UL) -#define T2_PSCR (IDENT_ADDR + GAMMA_BIAS + 0x38e0000c0UL) -#define T2_HAE_1 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000e0UL) -#define T2_HAE_2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000100UL) -#define T2_HBASE (IDENT_ADDR + GAMMA_BIAS + 0x38e000120UL) -#define T2_WBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000140UL) -#define T2_WMASK1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000160UL) -#define T2_TBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000180UL) -#define T2_WBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001a0UL) -#define T2_WMASK2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL) -#define T2_TBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL) -#define T2_TLBBR (IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL) -#define T2_IVR (IDENT_ADDR + GAMMA_BIAS + 0x38e000220UL) -#define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL) -#define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL) - -/* The CSRs below are T3/T4 only */ -#define T2_WBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000280UL) -#define T2_WMASK3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002a0UL) -#define T2_TBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002c0UL) - -#define T2_TDR0 (IDENT_ADDR + GAMMA_BIAS + 0x38e000300UL) -#define T2_TDR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000320UL) -#define T2_TDR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000340UL) -#define T2_TDR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000360UL) -#define T2_TDR4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000380UL) -#define T2_TDR5 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003a0UL) -#define T2_TDR6 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003c0UL) -#define T2_TDR7 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003e0UL) - -#define T2_WBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000400UL) -#define T2_WMASK4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000420UL) -#define T2_TBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000440UL) - -#define T2_AIR (IDENT_ADDR + GAMMA_BIAS + 0x38e000460UL) -#define T2_VAR (IDENT_ADDR + GAMMA_BIAS + 0x38e000480UL) -#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL) -#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL) - -#define T2_HAE_ADDRESS T2_HAE_1 - -/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to - 3.8fff.ffff - * - * +--------------+ 3 8000 0000 - * | CPU 0 CSRs | - * +--------------+ 3 8100 0000 - * | CPU 1 CSRs | - * +--------------+ 3 8200 0000 - * | CPU 2 CSRs | - * +--------------+ 3 8300 0000 - * | CPU 3 CSRs | - * +--------------+ 3 8400 0000 - * | CPU Reserved | - * +--------------+ 3 8700 0000 - * | Mem Reserved | - * +--------------+ 3 8800 0000 - * | Mem 0 CSRs | - * +--------------+ 3 8900 0000 - * | Mem 1 CSRs | - * +--------------+ 3 8a00 0000 - * | Mem 2 CSRs | - * +--------------+ 3 8b00 0000 - * | Mem 3 CSRs | - * +--------------+ 3 8c00 0000 - * | Mem Reserved | - * +--------------+ 3 8e00 0000 - * | PCI Bridge | - * +--------------+ 3 8f00 0000 - * | Expansion IO | - * +--------------+ 3 9000 0000 - * - * - */ -#define T2_CPU0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x380000000L) -#define T2_CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L) -#define T2_CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L) -#define T2_CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L) - -#define T2_CPUn_BASE(n) (T2_CPU0_BASE + (((n)&3) * 0x001000000L)) - -#define T2_MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L) -#define T2_MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L) -#define T2_MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L) -#define T2_MEM3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L) - - -/* - * Sable CPU Module CSRS - * - * These are CSRs for hardware other than the CPU chip on the CPU module. - * The CPU module has Backup Cache control logic, Cbus control logic, and - * interrupt control logic on it. There is a duplicate tag store to speed - * up maintaining cache coherency. - */ - -struct sable_cpu_csr { - unsigned long bcc; long fill_00[3]; /* Backup Cache Control */ - unsigned long bcce; long fill_01[3]; /* Backup Cache Correctable Error */ - unsigned long bccea; long fill_02[3]; /* B-Cache Corr Err Address Latch */ - unsigned long bcue; long fill_03[3]; /* B-Cache Uncorrectable Error */ - unsigned long bcuea; long fill_04[3]; /* B-Cache Uncorr Err Addr Latch */ - unsigned long dter; long fill_05[3]; /* Duplicate Tag Error */ - unsigned long cbctl; long fill_06[3]; /* CBus Control */ - unsigned long cbe; long fill_07[3]; /* CBus Error */ - unsigned long cbeal; long fill_08[3]; /* CBus Error Addr Latch low */ - unsigned long cbeah; long fill_09[3]; /* CBus Error Addr Latch high */ - unsigned long pmbx; long fill_10[3]; /* Processor Mailbox */ - unsigned long ipir; long fill_11[3]; /* Inter-Processor Int Request */ - unsigned long sic; long fill_12[3]; /* System Interrupt Clear */ - unsigned long adlk; long fill_13[3]; /* Address Lock (LDxL/STxC) */ - unsigned long madrl; long fill_14[3]; /* CBus Miss Address */ - unsigned long rev; long fill_15[3]; /* CMIC Revision */ -}; - -/* - * Data structure for handling T2 machine checks: - */ -struct el_t2_frame_header { - unsigned int elcf_fid; /* Frame ID (from above) */ - unsigned int elcf_size; /* Size of frame in bytes */ -}; - -struct el_t2_procdata_mcheck { - unsigned long elfmc_paltemp[32]; /* PAL TEMP REGS. */ - /* EV4-specific fields */ - unsigned long elfmc_exc_addr; /* Addr of excepting insn. */ - unsigned long elfmc_exc_sum; /* Summary of arith traps. */ - unsigned long elfmc_exc_mask; /* Exception mask (from exc_sum). */ - unsigned long elfmc_iccsr; /* IBox hardware enables. */ - unsigned long elfmc_pal_base; /* Base address for PALcode. */ - unsigned long elfmc_hier; /* Hardware Interrupt Enable. */ - unsigned long elfmc_hirr; /* Hardware Interrupt Request. */ - unsigned long elfmc_mm_csr; /* D-stream fault info. */ - unsigned long elfmc_dc_stat; /* D-cache status (ECC/Parity Err). */ - unsigned long elfmc_dc_addr; /* EV3 Phys Addr for ECC/DPERR. */ - unsigned long elfmc_abox_ctl; /* ABox Control Register. */ - unsigned long elfmc_biu_stat; /* BIU Status. */ - unsigned long elfmc_biu_addr; /* BUI Address. */ - unsigned long elfmc_biu_ctl; /* BIU Control. */ - unsigned long elfmc_fill_syndrome; /* For correcting ECC errors. */ - unsigned long elfmc_fill_addr;/* Cache block which was being read. */ - unsigned long elfmc_va; /* Effective VA of fault or miss. */ - unsigned long elfmc_bc_tag; /* Backup Cache Tag Probe Results. */ -}; - -/* - * Sable processor specific Machine Check Data segment. - */ - -struct el_t2_logout_header { - unsigned int elfl_size; /* size in bytes of logout area. */ - unsigned int elfl_sbz1:31; /* Should be zero. */ - unsigned int elfl_retry:1; /* Retry flag. */ - unsigned int elfl_procoffset; /* Processor-specific offset. */ - unsigned int elfl_sysoffset; /* Offset of system-specific. */ - unsigned int elfl_error_type; /* PAL error type code. */ - unsigned int elfl_frame_rev; /* PAL Frame revision. */ -}; -struct el_t2_sysdata_mcheck { - unsigned long elcmc_bcc; /* CSR 0 */ - unsigned long elcmc_bcce; /* CSR 1 */ - unsigned long elcmc_bccea; /* CSR 2 */ - unsigned long elcmc_bcue; /* CSR 3 */ - unsigned long elcmc_bcuea; /* CSR 4 */ - unsigned long elcmc_dter; /* CSR 5 */ - unsigned long elcmc_cbctl; /* CSR 6 */ - unsigned long elcmc_cbe; /* CSR 7 */ - unsigned long elcmc_cbeal; /* CSR 8 */ - unsigned long elcmc_cbeah; /* CSR 9 */ - unsigned long elcmc_pmbx; /* CSR 10 */ - unsigned long elcmc_ipir; /* CSR 11 */ - unsigned long elcmc_sic; /* CSR 12 */ - unsigned long elcmc_adlk; /* CSR 13 */ - unsigned long elcmc_madrl; /* CSR 14 */ - unsigned long elcmc_crrev4; /* CSR 15 */ -}; - -/* - * Sable memory error frame - sable pfms section 3.42 - */ -struct el_t2_data_memory { - struct el_t2_frame_header elcm_hdr; /* ID$MEM-FERR = 0x08 */ - unsigned int elcm_module; /* Module id. */ - unsigned int elcm_res04; /* Reserved. */ - unsigned long elcm_merr; /* CSR0: Error Reg 1. */ - unsigned long elcm_mcmd1; /* CSR1: Command Trap 1. */ - unsigned long elcm_mcmd2; /* CSR2: Command Trap 2. */ - unsigned long elcm_mconf; /* CSR3: Configuration. */ - unsigned long elcm_medc1; /* CSR4: EDC Status 1. */ - unsigned long elcm_medc2; /* CSR5: EDC Status 2. */ - unsigned long elcm_medcc; /* CSR6: EDC Control. */ - unsigned long elcm_msctl; /* CSR7: Stream Buffer Control. */ - unsigned long elcm_mref; /* CSR8: Refresh Control. */ - unsigned long elcm_filter; /* CSR9: CRD Filter Control. */ -}; - - -/* - * Sable other CPU error frame - sable pfms section 3.43 - */ -struct el_t2_data_other_cpu { - short elco_cpuid; /* CPU ID */ - short elco_res02[3]; - unsigned long elco_bcc; /* CSR 0 */ - unsigned long elco_bcce; /* CSR 1 */ - unsigned long elco_bccea; /* CSR 2 */ - unsigned long elco_bcue; /* CSR 3 */ - unsigned long elco_bcuea; /* CSR 4 */ - unsigned long elco_dter; /* CSR 5 */ - unsigned long elco_cbctl; /* CSR 6 */ - unsigned long elco_cbe; /* CSR 7 */ - unsigned long elco_cbeal; /* CSR 8 */ - unsigned long elco_cbeah; /* CSR 9 */ - unsigned long elco_pmbx; /* CSR 10 */ - unsigned long elco_ipir; /* CSR 11 */ - unsigned long elco_sic; /* CSR 12 */ - unsigned long elco_adlk; /* CSR 13 */ - unsigned long elco_madrl; /* CSR 14 */ - unsigned long elco_crrev4; /* CSR 15 */ -}; - -/* - * Sable other CPU error frame - sable pfms section 3.44 - */ -struct el_t2_data_t2{ - struct el_t2_frame_header elct_hdr; /* ID$T2-FRAME */ - unsigned long elct_iocsr; /* IO Control and Status Register */ - unsigned long elct_cerr1; /* Cbus Error Register 1 */ - unsigned long elct_cerr2; /* Cbus Error Register 2 */ - unsigned long elct_cerr3; /* Cbus Error Register 3 */ - unsigned long elct_perr1; /* PCI Error Register 1 */ - unsigned long elct_perr2; /* PCI Error Register 2 */ - unsigned long elct_hae0_1; /* High Address Extension Register 1 */ - unsigned long elct_hae0_2; /* High Address Extension Register 2 */ - unsigned long elct_hbase; /* High Base Register */ - unsigned long elct_wbase1; /* Window Base Register 1 */ - unsigned long elct_wmask1; /* Window Mask Register 1 */ - unsigned long elct_tbase1; /* Translated Base Register 1 */ - unsigned long elct_wbase2; /* Window Base Register 2 */ - unsigned long elct_wmask2; /* Window Mask Register 2 */ - unsigned long elct_tbase2; /* Translated Base Register 2 */ - unsigned long elct_tdr0; /* TLB Data Register 0 */ - unsigned long elct_tdr1; /* TLB Data Register 1 */ - unsigned long elct_tdr2; /* TLB Data Register 2 */ - unsigned long elct_tdr3; /* TLB Data Register 3 */ - unsigned long elct_tdr4; /* TLB Data Register 4 */ - unsigned long elct_tdr5; /* TLB Data Register 5 */ - unsigned long elct_tdr6; /* TLB Data Register 6 */ - unsigned long elct_tdr7; /* TLB Data Register 7 */ -}; - -/* - * Sable error log data structure - sable pfms section 3.40 - */ -struct el_t2_data_corrected { - unsigned long elcpb_biu_stat; - unsigned long elcpb_biu_addr; - unsigned long elcpb_biu_ctl; - unsigned long elcpb_fill_syndrome; - unsigned long elcpb_fill_addr; - unsigned long elcpb_bc_tag; -}; - -/* - * Sable error log data structure - * Note there are 4 memory slots on sable (see t2.h) - */ -struct el_t2_frame_mcheck { - struct el_t2_frame_header elfmc_header; /* ID$P-FRAME_MCHECK */ - struct el_t2_logout_header elfmc_hdr; - struct el_t2_procdata_mcheck elfmc_procdata; - struct el_t2_sysdata_mcheck elfmc_sysdata; - struct el_t2_data_t2 elfmc_t2data; - struct el_t2_data_memory elfmc_memdata[4]; - struct el_t2_frame_header elfmc_footer; /* empty */ -}; - - -/* - * Sable error log data structures on memory errors - */ -struct el_t2_frame_corrected { - struct el_t2_frame_header elfcc_header; /* ID$P-BC-COR */ - struct el_t2_logout_header elfcc_hdr; - struct el_t2_data_corrected elfcc_procdata; -/* struct el_t2_data_t2 elfcc_t2data; */ -/* struct el_t2_data_memory elfcc_memdata[4]; */ - struct el_t2_frame_header elfcc_footer; /* empty */ -}; - - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions: - * - * T2 (the core logic PCI/memory support chipset for the SABLE - * series of processors uses a sparse address mapping scheme to - * get at PCI memory and I/O. - */ - -#define vip volatile int * -#define vuip volatile unsigned int * - -extern inline u8 t2_inb(unsigned long addr) -{ - long result = *(vip) ((addr << 5) + T2_IO + 0x00); - return __kernel_extbl(result, addr & 3); -} - -extern inline void t2_outb(u8 b, unsigned long addr) -{ - unsigned long w; - - w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + T2_IO + 0x00) = w; - mb(); -} - -extern inline u16 t2_inw(unsigned long addr) -{ - long result = *(vip) ((addr << 5) + T2_IO + 0x08); - return __kernel_extwl(result, addr & 3); -} - -extern inline void t2_outw(u16 b, unsigned long addr) -{ - unsigned long w; - - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + T2_IO + 0x08) = w; - mb(); -} - -extern inline u32 t2_inl(unsigned long addr) -{ - return *(vuip) ((addr << 5) + T2_IO + 0x18); -} - -extern inline void t2_outl(u32 b, unsigned long addr) -{ - *(vuip) ((addr << 5) + T2_IO + 0x18) = b; - mb(); -} - - -/* - * Memory functions. - * - * For reading and writing 8 and 16 bit quantities we need to - * go through one of the three sparse address mapping regions - * and use the HAE_MEM CSR to provide some bits of the address. - * The following few routines use only sparse address region 1 - * which gives 1Gbyte of accessible space which relates exactly - * to the amount of PCI memory mapping *into* system address space. - * See p 6-17 of the specification but it looks something like this: - * - * 21164 Address: - * - * 3 2 1 - * 9876543210987654321098765432109876543210 - * 1ZZZZ0.PCI.QW.Address............BBLL - * - * ZZ = SBZ - * BB = Byte offset - * LL = Transfer length - * - * PCI Address: - * - * 3 2 1 - * 10987654321098765432109876543210 - * HHH....PCI.QW.Address........ 00 - * - * HHH = 31:29 HAE_MEM CSR - * - */ - -#define t2_set_hae { \ - msb = addr >> 27; \ - addr &= T2_MEM_R1_MASK; \ - set_hae(msb); \ -} - -extern spinlock_t t2_hae_lock; - -/* - * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since - * they may be called directly, rather than through the - * ioreadNN/iowriteNN routines. - */ - -__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long result, msb; - unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); - - t2_set_hae; - - result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); - spin_unlock_irqrestore(&t2_hae_lock, flags); - return __kernel_extbl(result, addr & 3); -} - -__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long result, msb; - unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); - - t2_set_hae; - - result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); - spin_unlock_irqrestore(&t2_hae_lock, flags); - return __kernel_extwl(result, addr & 3); -} - -/* - * On SABLE with T2, we must use SPARSE memory even for 32-bit access, - * because we cannot access all of DENSE without changing its HAE. - */ -__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long result, msb; - unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); - - t2_set_hae; - - result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); - spin_unlock_irqrestore(&t2_hae_lock, flags); - return result & 0xffffffffUL; -} - -__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long r0, r1, work, msb; - unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); - - t2_set_hae; - - work = (addr << 5) + T2_SPARSE_MEM + 0x18; - r0 = *(vuip)(work); - r1 = *(vuip)(work + (4 << 5)); - spin_unlock_irqrestore(&t2_hae_lock, flags); - return r1 << 32 | r0; -} - -__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long msb, w; - unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); - - t2_set_hae; - - w = __kernel_insbl(b, addr & 3); - *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; - spin_unlock_irqrestore(&t2_hae_lock, flags); -} - -__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long msb, w; - unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); - - t2_set_hae; - - w = __kernel_inswl(b, addr & 3); - *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; - spin_unlock_irqrestore(&t2_hae_lock, flags); -} - -/* - * On SABLE with T2, we must use SPARSE memory even for 32-bit access, - * because we cannot access all of DENSE without changing its HAE. - */ -__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long msb; - unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); - - t2_set_hae; - - *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; - spin_unlock_irqrestore(&t2_hae_lock, flags); -} - -__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; - unsigned long msb, work; - unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); - - t2_set_hae; - - work = (addr << 5) + T2_SPARSE_MEM + 0x18; - *(vuip)work = b; - *(vuip)(work + (4 << 5)) = b >> 32; - spin_unlock_irqrestore(&t2_hae_lock, flags); -} - -__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) -{ - return (void __iomem *)(addr + T2_IO); -} - -__EXTERN_INLINE void __iomem *t2_ioremap(unsigned long addr, - unsigned long size) -{ - return (void __iomem *)(addr + T2_DENSE_MEM); -} - -__EXTERN_INLINE int t2_is_ioaddr(unsigned long addr) -{ - return (long)addr >= 0; -} - -__EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr) -{ - return (unsigned long)addr >= T2_DENSE_MEM; -} - -/* New-style ioread interface. The mmio routines are so ugly for T2 that - it doesn't make sense to merge the pio and mmio routines. */ - -#define IOPORT(OS, NS) \ -__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \ -{ \ - if (t2_is_mmio(xaddr)) \ - return t2_read##OS(xaddr); \ - else \ - return t2_in##OS((unsigned long)xaddr - T2_IO); \ -} \ -__EXTERN_INLINE void t2_iowrite##NS(u##NS b, void __iomem *xaddr) \ -{ \ - if (t2_is_mmio(xaddr)) \ - t2_write##OS(b, xaddr); \ - else \ - t2_out##OS(b, (unsigned long)xaddr - T2_IO); \ -} - -IOPORT(b, 8) -IOPORT(w, 16) -IOPORT(l, 32) - -#undef IOPORT - -#undef vip -#undef vuip - -#undef __IO_PREFIX -#define __IO_PREFIX t2 -#define t2_trivial_rw_bw 0 -#define t2_trivial_rw_lq 0 -#define t2_trivial_io_bw 0 -#define t2_trivial_io_lq 0 -#define t2_trivial_iounmap 1 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_T2__H__ */ diff --git a/include/asm-alpha/core_titan.h b/include/asm-alpha/core_titan.h deleted file mode 100644 index a17f6f33b68..00000000000 --- a/include/asm-alpha/core_titan.h +++ /dev/null @@ -1,410 +0,0 @@ -#ifndef __ALPHA_TITAN__H__ -#define __ALPHA_TITAN__H__ - -#include -#include -#include - -/* - * TITAN is the internal names for a core logic chipset which provides - * memory controller and PCI/AGP access for 21264 based systems. - * - * This file is based on: - * - * Titan Chipset Engineering Specification - * Revision 0.12 - * 13 July 1999 - * - */ - -/* XXX: Do we need to conditionalize on this? */ -#ifdef USE_48_BIT_KSEG -#define TI_BIAS 0x80000000000UL -#else -#define TI_BIAS 0x10000000000UL -#endif - -/* - * CChip, DChip, and PChip registers - */ - -typedef struct { - volatile unsigned long csr __attribute__((aligned(64))); -} titan_64; - -typedef struct { - titan_64 csc; - titan_64 mtr; - titan_64 misc; - titan_64 mpd; - titan_64 aar0; - titan_64 aar1; - titan_64 aar2; - titan_64 aar3; - titan_64 dim0; - titan_64 dim1; - titan_64 dir0; - titan_64 dir1; - titan_64 drir; - titan_64 prben; - titan_64 iic0; - titan_64 iic1; - titan_64 mpr0; - titan_64 mpr1; - titan_64 mpr2; - titan_64 mpr3; - titan_64 rsvd[2]; - titan_64 ttr; - titan_64 tdr; - titan_64 dim2; - titan_64 dim3; - titan_64 dir2; - titan_64 dir3; - titan_64 iic2; - titan_64 iic3; - titan_64 pwr; - titan_64 reserved[17]; - titan_64 cmonctla; - titan_64 cmonctlb; - titan_64 cmoncnt01; - titan_64 cmoncnt23; - titan_64 cpen; -} titan_cchip; - -typedef struct { - titan_64 dsc; - titan_64 str; - titan_64 drev; - titan_64 dsc2; -} titan_dchip; - -typedef struct { - titan_64 wsba[4]; - titan_64 wsm[4]; - titan_64 tba[4]; - titan_64 pctl; - titan_64 plat; - titan_64 reserved0[2]; - union { - struct { - titan_64 serror; - titan_64 serren; - titan_64 serrset; - titan_64 reserved0; - titan_64 gperror; - titan_64 gperren; - titan_64 gperrset; - titan_64 reserved1; - titan_64 gtlbiv; - titan_64 gtlbia; - titan_64 reserved2[2]; - titan_64 sctl; - titan_64 reserved3[3]; - } g; - struct { - titan_64 agperror; - titan_64 agperren; - titan_64 agperrset; - titan_64 agplastwr; - titan_64 aperror; - titan_64 aperren; - titan_64 aperrset; - titan_64 reserved0; - titan_64 atlbiv; - titan_64 atlbia; - titan_64 reserved1[6]; - } a; - } port_specific; - titan_64 sprst; - titan_64 reserved1[31]; -} titan_pachip_port; - -typedef struct { - titan_pachip_port g_port; - titan_pachip_port a_port; -} titan_pachip; - -#define TITAN_cchip ((titan_cchip *)(IDENT_ADDR+TI_BIAS+0x1A0000000UL)) -#define TITAN_dchip ((titan_dchip *)(IDENT_ADDR+TI_BIAS+0x1B0000800UL)) -#define TITAN_pachip0 ((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x180000000UL)) -#define TITAN_pachip1 ((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x380000000UL)) -extern unsigned TITAN_agp; -extern int TITAN_bootcpu; - -/* - * TITAN PA-chip Window Space Base Address register. - * (WSBA[0-2]) - */ -#define wsba_m_ena 0x1 -#define wsba_m_sg 0x2 -#define wsba_m_addr 0xFFF00000 -#define wmask_k_sz1gb 0x3FF00000 -union TPAchipWSBA { - struct { - unsigned wsba_v_ena : 1; - unsigned wsba_v_sg : 1; - unsigned wsba_v_rsvd1 : 18; - unsigned wsba_v_addr : 12; - unsigned wsba_v_rsvd2 : 32; - } wsba_r_bits; - int wsba_q_whole [2]; -}; - -/* - * TITAN PA-chip Control Register - * This definition covers both the G-Port GPCTL and the A-PORT APCTL. - * Bits <51:0> are the same in both cases. APCTL<63:52> are only - * applicable to AGP. - */ -#define pctl_m_fbtb 0x00000001 -#define pctl_m_thdis 0x00000002 -#define pctl_m_chaindis 0x00000004 -#define pctl_m_tgtlat 0x00000018 -#define pctl_m_hole 0x00000020 -#define pctl_m_mwin 0x00000040 -#define pctl_m_arbena 0x00000080 -#define pctl_m_prigrp 0x0000FF00 -#define pctl_m_ppri 0x00010000 -#define pctl_m_pcispd66 0x00020000 -#define pctl_m_cngstlt 0x003C0000 -#define pctl_m_ptpdesten 0x3FC00000 -#define pctl_m_dpcen 0x40000000 -#define pctl_m_apcen 0x0000000080000000UL -#define pctl_m_dcrtv 0x0000000300000000UL -#define pctl_m_en_stepping 0x0000000400000000UL -#define apctl_m_rsvd1 0x000FFFF800000000UL -#define apctl_m_agp_rate 0x0030000000000000UL -#define apctl_m_agp_sba_en 0x0040000000000000UL -#define apctl_m_agp_en 0x0080000000000000UL -#define apctl_m_rsvd2 0x0100000000000000UL -#define apctl_m_agp_present 0x0200000000000000UL -#define apctl_agp_hp_rd 0x1C00000000000000UL -#define apctl_agp_lp_rd 0xE000000000000000UL -#define gpctl_m_rsvd 0xFFFFFFF800000000UL -union TPAchipPCTL { - struct { - unsigned pctl_v_fbtb : 1; /* A/G [0] */ - unsigned pctl_v_thdis : 1; /* A/G [1] */ - unsigned pctl_v_chaindis : 1; /* A/G [2] */ - unsigned pctl_v_tgtlat : 2; /* A/G [4:3] */ - unsigned pctl_v_hole : 1; /* A/G [5] */ - unsigned pctl_v_mwin : 1; /* A/G [6] */ - unsigned pctl_v_arbena : 1; /* A/G [7] */ - unsigned pctl_v_prigrp : 8; /* A/G [15:8] */ - unsigned pctl_v_ppri : 1; /* A/G [16] */ - unsigned pctl_v_pcispd66 : 1; /* A/G [17] */ - unsigned pctl_v_cngstlt : 4; /* A/G [21:18] */ - unsigned pctl_v_ptpdesten : 8; /* A/G [29:22] */ - unsigned pctl_v_dpcen : 1; /* A/G [30] */ - unsigned pctl_v_apcen : 1; /* A/G [31] */ - unsigned pctl_v_dcrtv : 2; /* A/G [33:32] */ - unsigned pctl_v_en_stepping :1; /* A/G [34] */ - unsigned apctl_v_rsvd1 : 17; /* A [51:35] */ - unsigned apctl_v_agp_rate : 2; /* A [53:52] */ - unsigned apctl_v_agp_sba_en : 1; /* A [54] */ - unsigned apctl_v_agp_en : 1; /* A [55] */ - unsigned apctl_v_rsvd2 : 1; /* A [56] */ - unsigned apctl_v_agp_present : 1; /* A [57] */ - unsigned apctl_v_agp_hp_rd : 3; /* A [60:58] */ - unsigned apctl_v_agp_lp_rd : 3; /* A [63:61] */ - } pctl_r_bits; - unsigned int pctl_l_whole [2]; - unsigned long pctl_q_whole; -}; - -/* - * SERROR / SERREN / SERRSET - */ -union TPAchipSERR { - struct { - unsigned serr_v_lost_uecc : 1; /* [0] */ - unsigned serr_v_uecc : 1; /* [1] */ - unsigned serr_v_cre : 1; /* [2] */ - unsigned serr_v_nxio : 1; /* [3] */ - unsigned serr_v_lost_cre : 1; /* [4] */ - unsigned serr_v_rsvd0 : 10; /* [14:5] */ - unsigned serr_v_addr : 32; /* [46:15] */ - unsigned serr_v_rsvd1 : 5; /* [51:47] */ - unsigned serr_v_source : 2; /* [53:52] */ - unsigned serr_v_cmd : 2; /* [55:54] */ - unsigned serr_v_syn : 8; /* [63:56] */ - } serr_r_bits; - unsigned int serr_l_whole[2]; - unsigned long serr_q_whole; -}; - -/* - * GPERROR / APERROR / GPERREN / APERREN / GPERRSET / APERRSET - */ -union TPAchipPERR { - struct { - unsigned long perr_v_lost : 1; /* [0] */ - unsigned long perr_v_serr : 1; /* [1] */ - unsigned long perr_v_perr : 1; /* [2] */ - unsigned long perr_v_dcrto : 1; /* [3] */ - unsigned long perr_v_sge : 1; /* [4] */ - unsigned long perr_v_ape : 1; /* [5] */ - unsigned long perr_v_ta : 1; /* [6] */ - unsigned long perr_v_dpe : 1; /* [7] */ - unsigned long perr_v_nds : 1; /* [8] */ - unsigned long perr_v_iptpr : 1; /* [9] */ - unsigned long perr_v_iptpw : 1; /* [10] */ - unsigned long perr_v_rsvd0 : 3; /* [13:11] */ - unsigned long perr_v_addr : 33; /* [46:14] */ - unsigned long perr_v_dac : 1; /* [47] */ - unsigned long perr_v_mwin : 1; /* [48] */ - unsigned long perr_v_rsvd1 : 3; /* [51:49] */ - unsigned long perr_v_cmd : 4; /* [55:52] */ - unsigned long perr_v_rsvd2 : 8; /* [63:56] */ - } perr_r_bits; - unsigned int perr_l_whole[2]; - unsigned long perr_q_whole; -}; - -/* - * AGPERROR / AGPERREN / AGPERRSET - */ -union TPAchipAGPERR { - struct { - unsigned agperr_v_lost : 1; /* [0] */ - unsigned agperr_v_lpqfull : 1; /* [1] */ - unsigned apgerr_v_hpqfull : 1; /* [2] */ - unsigned agperr_v_rescmd : 1; /* [3] */ - unsigned agperr_v_ipte : 1; /* [4] */ - unsigned agperr_v_ptp : 1; /* [5] */ - unsigned agperr_v_nowindow : 1; /* [6] */ - unsigned agperr_v_rsvd0 : 8; /* [14:7] */ - unsigned agperr_v_addr : 32; /* [46:15] */ - unsigned agperr_v_rsvd1 : 1; /* [47] */ - unsigned agperr_v_dac : 1; /* [48] */ - unsigned agperr_v_mwin : 1; /* [49] */ - unsigned agperr_v_cmd : 3; /* [52:50] */ - unsigned agperr_v_length : 6; /* [58:53] */ - unsigned agperr_v_fence : 1; /* [59] */ - unsigned agperr_v_rsvd2 : 4; /* [63:60] */ - } agperr_r_bits; - unsigned int agperr_l_whole[2]; - unsigned long agperr_q_whole; -}; -/* - * Memory spaces: - * Hose numbers are assigned as follows: - * 0 - pachip 0 / G Port - * 1 - pachip 1 / G Port - * 2 - pachip 0 / A Port - * 3 - pachip 1 / A Port - */ -#define TITAN_HOSE_SHIFT (33) -#define TITAN_HOSE(h) (((unsigned long)(h)) << TITAN_HOSE_SHIFT) -#define TITAN_BASE (IDENT_ADDR + TI_BIAS) -#define TITAN_MEM(h) (TITAN_BASE+TITAN_HOSE(h)+0x000000000UL) -#define _TITAN_IACK_SC(h) (TITAN_BASE+TITAN_HOSE(h)+0x1F8000000UL) -#define TITAN_IO(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FC000000UL) -#define TITAN_CONF(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FE000000UL) - -#define TITAN_HOSE_MASK TITAN_HOSE(3) -#define TITAN_IACK_SC _TITAN_IACK_SC(0) /* hack! */ - -/* - * The canonical non-remaped I/O and MEM addresses have these values - * subtracted out. This is arranged so that folks manipulating ISA - * devices can use their familiar numbers and have them map to bus 0. - */ - -#define TITAN_IO_BIAS TITAN_IO(0) -#define TITAN_MEM_BIAS TITAN_MEM(0) - -/* The IO address space is larger than 0xffff */ -#define TITAN_IO_SPACE (TITAN_CONF(0) - TITAN_IO(0)) - -/* TIG Space */ -#define TITAN_TIG_SPACE (TITAN_BASE + 0x100000000UL) - -/* Offset between ram physical addresses and pci64 DAC bus addresses. */ -/* ??? Just a guess. Ought to confirm it hasn't been moved. */ -#define TITAN_DAC_OFFSET (1UL << 40) - -/* - * Data structure for handling TITAN machine checks: - */ -#define SCB_Q_SYSERR 0x620 -#define SCB_Q_PROCERR 0x630 -#define SCB_Q_SYSMCHK 0x660 -#define SCB_Q_PROCMCHK 0x670 -#define SCB_Q_SYSEVENT 0x680 /* environmental / system management */ -struct el_TITAN_sysdata_mcheck { - u64 summary; /* 0x00 */ - u64 c_dirx; /* 0x08 */ - u64 c_misc; /* 0x10 */ - u64 p0_serror; /* 0x18 */ - u64 p0_gperror; /* 0x20 */ - u64 p0_aperror; /* 0x28 */ - u64 p0_agperror;/* 0x30 */ - u64 p1_serror; /* 0x38 */ - u64 p1_gperror; /* 0x40 */ - u64 p1_aperror; /* 0x48 */ - u64 p1_agperror;/* 0x50 */ -}; - -/* - * System area for a privateer 680 environmental/system management mcheck - */ -struct el_PRIVATEER_envdata_mcheck { - u64 summary; /* 0x00 */ - u64 c_dirx; /* 0x08 */ - u64 smir; /* 0x10 */ - u64 cpuir; /* 0x18 */ - u64 psir; /* 0x20 */ - u64 fault; /* 0x28 */ - u64 sys_doors; /* 0x30 */ - u64 temp_warn; /* 0x38 */ - u64 fan_ctrl; /* 0x40 */ - u64 code; /* 0x48 */ - u64 reserved; /* 0x50 */ -}; - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions: - * - * TITAN, a 21??? PCI/memory support chipset for the EV6 (21264) - * can only use linear accesses to get at PCI/AGP memory and I/O spaces. - */ - -/* - * Memory functions. all accesses are done through linear space. - */ -extern void __iomem *titan_ioportmap(unsigned long addr); -extern void __iomem *titan_ioremap(unsigned long addr, unsigned long size); -extern void titan_iounmap(volatile void __iomem *addr); - -__EXTERN_INLINE int titan_is_ioaddr(unsigned long addr) -{ - return addr >= TITAN_BASE; -} - -extern int titan_is_mmio(const volatile void __iomem *addr); - -#undef __IO_PREFIX -#define __IO_PREFIX titan -#define titan_trivial_rw_bw 1 -#define titan_trivial_rw_lq 1 -#define titan_trivial_io_bw 1 -#define titan_trivial_io_lq 1 -#define titan_trivial_iounmap 0 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_TITAN__H__ */ diff --git a/include/asm-alpha/core_tsunami.h b/include/asm-alpha/core_tsunami.h deleted file mode 100644 index 58d4fe48742..00000000000 --- a/include/asm-alpha/core_tsunami.h +++ /dev/null @@ -1,335 +0,0 @@ -#ifndef __ALPHA_TSUNAMI__H__ -#define __ALPHA_TSUNAMI__H__ - -#include -#include -#include - -/* - * TSUNAMI/TYPHOON are the internal names for the core logic chipset which - * provides memory controller and PCI access for the 21264 based systems. - * - * This file is based on: - * - * Tsunami System Programmers Manual - * Preliminary, Chapters 2-5 - * - */ - -/* XXX: Do we need to conditionalize on this? */ -#ifdef USE_48_BIT_KSEG -#define TS_BIAS 0x80000000000UL -#else -#define TS_BIAS 0x10000000000UL -#endif - -/* - * CChip, DChip, and PChip registers - */ - -typedef struct { - volatile unsigned long csr __attribute__((aligned(64))); -} tsunami_64; - -typedef struct { - tsunami_64 csc; - tsunami_64 mtr; - tsunami_64 misc; - tsunami_64 mpd; - tsunami_64 aar0; - tsunami_64 aar1; - tsunami_64 aar2; - tsunami_64 aar3; - tsunami_64 dim0; - tsunami_64 dim1; - tsunami_64 dir0; - tsunami_64 dir1; - tsunami_64 drir; - tsunami_64 prben; - tsunami_64 iic; /* a.k.a. iic0 */ - tsunami_64 wdr; /* a.k.a. iic1 */ - tsunami_64 mpr0; - tsunami_64 mpr1; - tsunami_64 mpr2; - tsunami_64 mpr3; - tsunami_64 mctl; - tsunami_64 __pad1; - tsunami_64 ttr; - tsunami_64 tdr; - tsunami_64 dim2; - tsunami_64 dim3; - tsunami_64 dir2; - tsunami_64 dir3; - tsunami_64 iic2; - tsunami_64 iic3; -} tsunami_cchip; - -typedef struct { - tsunami_64 dsc; - tsunami_64 str; - tsunami_64 drev; -} tsunami_dchip; - -typedef struct { - tsunami_64 wsba[4]; - tsunami_64 wsm[4]; - tsunami_64 tba[4]; - tsunami_64 pctl; - tsunami_64 plat; - tsunami_64 reserved; - tsunami_64 perror; - tsunami_64 perrmask; - tsunami_64 perrset; - tsunami_64 tlbiv; - tsunami_64 tlbia; - tsunami_64 pmonctl; - tsunami_64 pmoncnt; -} tsunami_pchip; - -#define TSUNAMI_cchip ((tsunami_cchip *)(IDENT_ADDR+TS_BIAS+0x1A0000000UL)) -#define TSUNAMI_dchip ((tsunami_dchip *)(IDENT_ADDR+TS_BIAS+0x1B0000800UL)) -#define TSUNAMI_pchip0 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x180000000UL)) -#define TSUNAMI_pchip1 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x380000000UL)) -extern int TSUNAMI_bootcpu; - -/* - * TSUNAMI Pchip Error register. - */ - -#define perror_m_lost 0x1 -#define perror_m_serr 0x2 -#define perror_m_perr 0x4 -#define perror_m_dcrto 0x8 -#define perror_m_sge 0x10 -#define perror_m_ape 0x20 -#define perror_m_ta 0x40 -#define perror_m_rdpe 0x80 -#define perror_m_nds 0x100 -#define perror_m_rto 0x200 -#define perror_m_uecc 0x400 -#define perror_m_cre 0x800 -#define perror_m_addrl 0xFFFFFFFF0000UL -#define perror_m_addrh 0x7000000000000UL -#define perror_m_cmd 0xF0000000000000UL -#define perror_m_syn 0xFF00000000000000UL -union TPchipPERROR { - struct { - unsigned int perror_v_lost : 1; - unsigned perror_v_serr : 1; - unsigned perror_v_perr : 1; - unsigned perror_v_dcrto : 1; - unsigned perror_v_sge : 1; - unsigned perror_v_ape : 1; - unsigned perror_v_ta : 1; - unsigned perror_v_rdpe : 1; - unsigned perror_v_nds : 1; - unsigned perror_v_rto : 1; - unsigned perror_v_uecc : 1; - unsigned perror_v_cre : 1; - unsigned perror_v_rsvd1 : 4; - unsigned perror_v_addrl : 32; - unsigned perror_v_addrh : 3; - unsigned perror_v_rsvd2 : 1; - unsigned perror_v_cmd : 4; - unsigned perror_v_syn : 8; - } perror_r_bits; - int perror_q_whole [2]; -}; - -/* - * TSUNAMI Pchip Window Space Base Address register. - */ -#define wsba_m_ena 0x1 -#define wsba_m_sg 0x2 -#define wsba_m_ptp 0x4 -#define wsba_m_addr 0xFFF00000 -#define wmask_k_sz1gb 0x3FF00000 -union TPchipWSBA { - struct { - unsigned wsba_v_ena : 1; - unsigned wsba_v_sg : 1; - unsigned wsba_v_ptp : 1; - unsigned wsba_v_rsvd1 : 17; - unsigned wsba_v_addr : 12; - unsigned wsba_v_rsvd2 : 32; - } wsba_r_bits; - int wsba_q_whole [2]; -}; - -/* - * TSUNAMI Pchip Control Register - */ -#define pctl_m_fdsc 0x1 -#define pctl_m_fbtb 0x2 -#define pctl_m_thdis 0x4 -#define pctl_m_chaindis 0x8 -#define pctl_m_tgtlat 0x10 -#define pctl_m_hole 0x20 -#define pctl_m_mwin 0x40 -#define pctl_m_arbena 0x80 -#define pctl_m_prigrp 0x7F00 -#define pctl_m_ppri 0x8000 -#define pctl_m_rsvd1 0x30000 -#define pctl_m_eccen 0x40000 -#define pctl_m_padm 0x80000 -#define pctl_m_cdqmax 0xF00000 -#define pctl_m_rev 0xFF000000 -#define pctl_m_crqmax 0xF00000000UL -#define pctl_m_ptpmax 0xF000000000UL -#define pctl_m_pclkx 0x30000000000UL -#define pctl_m_fdsdis 0x40000000000UL -#define pctl_m_fdwdis 0x80000000000UL -#define pctl_m_ptevrfy 0x100000000000UL -#define pctl_m_rpp 0x200000000000UL -#define pctl_m_pid 0xC00000000000UL -#define pctl_m_rsvd2 0xFFFF000000000000UL - -union TPchipPCTL { - struct { - unsigned pctl_v_fdsc : 1; - unsigned pctl_v_fbtb : 1; - unsigned pctl_v_thdis : 1; - unsigned pctl_v_chaindis : 1; - unsigned pctl_v_tgtlat : 1; - unsigned pctl_v_hole : 1; - unsigned pctl_v_mwin : 1; - unsigned pctl_v_arbena : 1; - unsigned pctl_v_prigrp : 7; - unsigned pctl_v_ppri : 1; - unsigned pctl_v_rsvd1 : 2; - unsigned pctl_v_eccen : 1; - unsigned pctl_v_padm : 1; - unsigned pctl_v_cdqmax : 4; - unsigned pctl_v_rev : 8; - unsigned pctl_v_crqmax : 4; - unsigned pctl_v_ptpmax : 4; - unsigned pctl_v_pclkx : 2; - unsigned pctl_v_fdsdis : 1; - unsigned pctl_v_fdwdis : 1; - unsigned pctl_v_ptevrfy : 1; - unsigned pctl_v_rpp : 1; - unsigned pctl_v_pid : 2; - unsigned pctl_v_rsvd2 : 16; - } pctl_r_bits; - int pctl_q_whole [2]; -}; - -/* - * TSUNAMI Pchip Error Mask Register. - */ -#define perrmask_m_lost 0x1 -#define perrmask_m_serr 0x2 -#define perrmask_m_perr 0x4 -#define perrmask_m_dcrto 0x8 -#define perrmask_m_sge 0x10 -#define perrmask_m_ape 0x20 -#define perrmask_m_ta 0x40 -#define perrmask_m_rdpe 0x80 -#define perrmask_m_nds 0x100 -#define perrmask_m_rto 0x200 -#define perrmask_m_uecc 0x400 -#define perrmask_m_cre 0x800 -#define perrmask_m_rsvd 0xFFFFFFFFFFFFF000UL -union TPchipPERRMASK { - struct { - unsigned int perrmask_v_lost : 1; - unsigned perrmask_v_serr : 1; - unsigned perrmask_v_perr : 1; - unsigned perrmask_v_dcrto : 1; - unsigned perrmask_v_sge : 1; - unsigned perrmask_v_ape : 1; - unsigned perrmask_v_ta : 1; - unsigned perrmask_v_rdpe : 1; - unsigned perrmask_v_nds : 1; - unsigned perrmask_v_rto : 1; - unsigned perrmask_v_uecc : 1; - unsigned perrmask_v_cre : 1; - unsigned perrmask_v_rsvd1 : 20; - unsigned perrmask_v_rsvd2 : 32; - } perrmask_r_bits; - int perrmask_q_whole [2]; -}; - -/* - * Memory spaces: - */ -#define TSUNAMI_HOSE(h) (((unsigned long)(h)) << 33) -#define TSUNAMI_BASE (IDENT_ADDR + TS_BIAS) - -#define TSUNAMI_MEM(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x000000000UL) -#define _TSUNAMI_IACK_SC(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1F8000000UL) -#define TSUNAMI_IO(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FC000000UL) -#define TSUNAMI_CONF(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FE000000UL) - -#define TSUNAMI_IACK_SC _TSUNAMI_IACK_SC(0) /* hack! */ - - -/* - * The canonical non-remaped I/O and MEM addresses have these values - * subtracted out. This is arranged so that folks manipulating ISA - * devices can use their familiar numbers and have them map to bus 0. - */ - -#define TSUNAMI_IO_BIAS TSUNAMI_IO(0) -#define TSUNAMI_MEM_BIAS TSUNAMI_MEM(0) - -/* The IO address space is larger than 0xffff */ -#define TSUNAMI_IO_SPACE (TSUNAMI_CONF(0) - TSUNAMI_IO(0)) - -/* Offset between ram physical addresses and pci64 DAC bus addresses. */ -#define TSUNAMI_DAC_OFFSET (1UL << 40) - -/* - * Data structure for handling TSUNAMI machine checks: - */ -struct el_TSUNAMI_sysdata_mcheck { -}; - - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * I/O functions: - * - * TSUNAMI, the 21??? PCI/memory support chipset for the EV6 (21264) - * can only use linear accesses to get at PCI memory and I/O spaces. - */ - -/* - * Memory functions. all accesses are done through linear space. - */ -extern void __iomem *tsunami_ioportmap(unsigned long addr); -extern void __iomem *tsunami_ioremap(unsigned long addr, unsigned long size); -__EXTERN_INLINE int tsunami_is_ioaddr(unsigned long addr) -{ - return addr >= TSUNAMI_BASE; -} - -__EXTERN_INLINE int tsunami_is_mmio(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - return (addr & 0x100000000UL) == 0; -} - -#undef __IO_PREFIX -#define __IO_PREFIX tsunami -#define tsunami_trivial_rw_bw 1 -#define tsunami_trivial_rw_lq 1 -#define tsunami_trivial_io_bw 1 -#define tsunami_trivial_io_lq 1 -#define tsunami_trivial_iounmap 1 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_TSUNAMI__H__ */ diff --git a/include/asm-alpha/core_wildfire.h b/include/asm-alpha/core_wildfire.h deleted file mode 100644 index cd562f544ba..00000000000 --- a/include/asm-alpha/core_wildfire.h +++ /dev/null @@ -1,318 +0,0 @@ -#ifndef __ALPHA_WILDFIRE__H__ -#define __ALPHA_WILDFIRE__H__ - -#include -#include - -#define WILDFIRE_MAX_QBB 8 /* more than 8 requires other mods */ -#define WILDFIRE_PCA_PER_QBB 4 -#define WILDFIRE_IRQ_PER_PCA 64 - -#define WILDFIRE_NR_IRQS \ - (WILDFIRE_MAX_QBB * WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) - -extern unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB]; -extern unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB]; -#define QBB_MAP_EMPTY 0xff - -extern unsigned long wildfire_hard_qbb_mask; -extern unsigned long wildfire_soft_qbb_mask; -extern unsigned long wildfire_gp_mask; -extern unsigned long wildfire_hs_mask; -extern unsigned long wildfire_iop_mask; -extern unsigned long wildfire_ior_mask; -extern unsigned long wildfire_pca_mask; -extern unsigned long wildfire_cpu_mask; -extern unsigned long wildfire_mem_mask; - -#define WILDFIRE_QBB_EXISTS(qbbno) (wildfire_soft_qbb_mask & (1 << (qbbno))) - -#define WILDFIRE_MEM_EXISTS(qbbno) (wildfire_mem_mask & (0xf << ((qbbno) << 2))) - -#define WILDFIRE_PCA_EXISTS(qbbno, pcano) \ - (wildfire_pca_mask & (1 << (((qbbno) << 2) + (pcano)))) - -typedef struct { - volatile unsigned long csr __attribute__((aligned(64))); -} wildfire_64; - -typedef struct { - volatile unsigned long csr __attribute__((aligned(256))); -} wildfire_256; - -typedef struct { - volatile unsigned long csr __attribute__((aligned(2048))); -} wildfire_2k; - -typedef struct { - wildfire_64 qsd_whami; - wildfire_64 qsd_rev; - wildfire_64 qsd_port_present; - wildfire_64 qsd_port_active; - wildfire_64 qsd_fault_ena; - wildfire_64 qsd_cpu_int_ena; - wildfire_64 qsd_mem_config; - wildfire_64 qsd_err_sum; - wildfire_64 ce_sum[4]; - wildfire_64 dev_init[4]; - wildfire_64 it_int[4]; - wildfire_64 ip_int[4]; - wildfire_64 uce_sum[4]; - wildfire_64 se_sum__non_dev_int[4]; - wildfire_64 scratch[4]; - wildfire_64 qsd_timer; - wildfire_64 qsd_diag; -} wildfire_qsd; - -typedef struct { - wildfire_256 qsd_whami; - wildfire_256 __pad1; - wildfire_256 ce_sum; - wildfire_256 dev_init; - wildfire_256 it_int; - wildfire_256 ip_int; - wildfire_256 uce_sum; - wildfire_256 se_sum; -} wildfire_fast_qsd; - -typedef struct { - wildfire_2k qsa_qbb_id; - wildfire_2k __pad1; - wildfire_2k qsa_port_ena; - wildfire_2k qsa_scratch; - wildfire_2k qsa_config[5]; - wildfire_2k qsa_ref_int; - wildfire_2k qsa_qbb_pop[2]; - wildfire_2k qsa_dtag_fc; - wildfire_2k __pad2[3]; - wildfire_2k qsa_diag; - wildfire_2k qsa_diag_lock[4]; - wildfire_2k __pad3[11]; - wildfire_2k qsa_cpu_err_sum; - wildfire_2k qsa_misc_err_sum; - wildfire_2k qsa_tmo_err_sum; - wildfire_2k qsa_err_ena; - wildfire_2k qsa_tmo_config; - wildfire_2k qsa_ill_cmd_err_sum; - wildfire_2k __pad4[26]; - wildfire_2k qsa_busy_mask; - wildfire_2k qsa_arr_valid; - wildfire_2k __pad5[2]; - wildfire_2k qsa_port_map[4]; - wildfire_2k qsa_arr_addr[8]; - wildfire_2k qsa_arr_mask[8]; -} wildfire_qsa; - -typedef struct { - wildfire_64 ioa_config; - wildfire_64 iod_config; - wildfire_64 iop_switch_credits; - wildfire_64 __pad1; - wildfire_64 iop_hose_credits; - wildfire_64 __pad2[11]; - struct { - wildfire_64 __pad3; - wildfire_64 init; - } iop_hose[4]; - wildfire_64 ioa_hose_0_ctrl; - wildfire_64 iod_hose_0_ctrl; - wildfire_64 ioa_hose_1_ctrl; - wildfire_64 iod_hose_1_ctrl; - wildfire_64 ioa_hose_2_ctrl; - wildfire_64 iod_hose_2_ctrl; - wildfire_64 ioa_hose_3_ctrl; - wildfire_64 iod_hose_3_ctrl; - struct { - wildfire_64 target; - wildfire_64 __pad4; - } iop_dev_int[4]; - - wildfire_64 iop_err_int_target; - wildfire_64 __pad5[7]; - wildfire_64 iop_qbb_err_sum; - wildfire_64 __pad6; - wildfire_64 iop_qbb_se_sum; - wildfire_64 __pad7; - wildfire_64 ioa_err_sum; - wildfire_64 iod_err_sum; - wildfire_64 __pad8[4]; - wildfire_64 ioa_diag_force_err; - wildfire_64 iod_diag_force_err; - wildfire_64 __pad9[4]; - wildfire_64 iop_diag_send_err_int; - wildfire_64 __pad10[15]; - wildfire_64 ioa_scratch; - wildfire_64 iod_scratch; -} wildfire_iop; - -typedef struct { - wildfire_2k gpa_qbb_map[4]; - wildfire_2k gpa_mem_pop_map; - wildfire_2k gpa_scratch; - wildfire_2k gpa_diag; - wildfire_2k gpa_config_0; - wildfire_2k __pad1; - wildfire_2k gpa_init_id; - wildfire_2k gpa_config_2; - /* not complete */ -} wildfire_gp; - -typedef struct { - wildfire_64 pca_what_am_i; - wildfire_64 pca_err_sum; - wildfire_64 pca_diag_force_err; - wildfire_64 pca_diag_send_err_int; - wildfire_64 pca_hose_credits; - wildfire_64 pca_scratch; - wildfire_64 pca_micro_addr; - wildfire_64 pca_micro_data; - wildfire_64 pca_pend_int; - wildfire_64 pca_sent_int; - wildfire_64 __pad1; - wildfire_64 pca_stdio_edge_level; - wildfire_64 __pad2[52]; - struct { - wildfire_64 target; - wildfire_64 enable; - } pca_int[4]; - wildfire_64 __pad3[56]; - wildfire_64 pca_alt_sent_int[32]; -} wildfire_pca; - -typedef struct { - wildfire_64 ne_what_am_i; - /* not complete */ -} wildfire_ne; - -typedef struct { - wildfire_64 fe_what_am_i; - /* not complete */ -} wildfire_fe; - -typedef struct { - wildfire_64 pci_io_addr_ext; - wildfire_64 pci_ctrl; - wildfire_64 pci_err_sum; - wildfire_64 pci_err_addr; - wildfire_64 pci_stall_cnt; - wildfire_64 pci_iack_special; - wildfire_64 __pad1[2]; - wildfire_64 pci_pend_int; - wildfire_64 pci_sent_int; - wildfire_64 __pad2[54]; - struct { - wildfire_64 wbase; - wildfire_64 wmask; - wildfire_64 tbase; - } pci_window[4]; - wildfire_64 pci_flush_tlb; - wildfire_64 pci_perf_mon; -} wildfire_pci; - -#define WILDFIRE_ENTITY_SHIFT 18 - -#define WILDFIRE_GP_ENTITY (0x10UL << WILDFIRE_ENTITY_SHIFT) -#define WILDFIRE_IOP_ENTITY (0x08UL << WILDFIRE_ENTITY_SHIFT) -#define WILDFIRE_QSA_ENTITY (0x04UL << WILDFIRE_ENTITY_SHIFT) -#define WILDFIRE_QSD_ENTITY_SLOW (0x05UL << WILDFIRE_ENTITY_SHIFT) -#define WILDFIRE_QSD_ENTITY_FAST (0x01UL << WILDFIRE_ENTITY_SHIFT) - -#define WILDFIRE_PCA_ENTITY(pca) ((0xc|(pca))<>1)|((((h)&1)|2)<<16)|(((1UL<<13)-1)<<23))) - -#define WILDFIRE_IO_BIAS WILDFIRE_IO(0,0) -#define WILDFIRE_MEM_BIAS WILDFIRE_MEM(0,0) /* ??? */ - -/* The IO address space is larger than 0xffff */ -#define WILDFIRE_IO_SPACE (8UL*1024*1024) - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * Memory functions. all accesses are done through linear space. - */ - -__EXTERN_INLINE void __iomem *wildfire_ioportmap(unsigned long addr) -{ - return (void __iomem *)(addr + WILDFIRE_IO_BIAS); -} - -__EXTERN_INLINE void __iomem *wildfire_ioremap(unsigned long addr, - unsigned long size) -{ - return (void __iomem *)(addr + WILDFIRE_MEM_BIAS); -} - -__EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr) -{ - return addr >= WILDFIRE_BASE; -} - -__EXTERN_INLINE int wildfire_is_mmio(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long)xaddr; - return (addr & 0x100000000UL) == 0; -} - -#undef __IO_PREFIX -#define __IO_PREFIX wildfire -#define wildfire_trivial_rw_bw 1 -#define wildfire_trivial_rw_lq 1 -#define wildfire_trivial_io_bw 1 -#define wildfire_trivial_io_lq 1 -#define wildfire_trivial_iounmap 1 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_WILDFIRE__H__ */ diff --git a/include/asm-alpha/cputime.h b/include/asm-alpha/cputime.h deleted file mode 100644 index 19577fd9323..00000000000 --- a/include/asm-alpha/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ALPHA_CPUTIME_H -#define __ALPHA_CPUTIME_H - -#include - -#endif /* __ALPHA_CPUTIME_H */ diff --git a/include/asm-alpha/current.h b/include/asm-alpha/current.h deleted file mode 100644 index 094d285a1b3..00000000000 --- a/include/asm-alpha/current.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _ALPHA_CURRENT_H -#define _ALPHA_CURRENT_H - -#include - -#define get_current() (current_thread_info()->task) -#define current get_current() - -#endif /* _ALPHA_CURRENT_H */ diff --git a/include/asm-alpha/delay.h b/include/asm-alpha/delay.h deleted file mode 100644 index 2aa3f410f7e..00000000000 --- a/include/asm-alpha/delay.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ALPHA_DELAY_H -#define __ALPHA_DELAY_H - -extern void __delay(int loops); -extern void udelay(unsigned long usecs); - -extern void ndelay(unsigned long nsecs); -#define ndelay ndelay - -#endif /* defined(__ALPHA_DELAY_H) */ diff --git a/include/asm-alpha/device.h b/include/asm-alpha/device.h deleted file mode 100644 index d8f9872b0e2..00000000000 --- a/include/asm-alpha/device.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#include - diff --git a/include/asm-alpha/div64.h b/include/asm-alpha/div64.h deleted file mode 100644 index 6cd978cefb2..00000000000 --- a/include/asm-alpha/div64.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h deleted file mode 100644 index a5801ae02e4..00000000000 --- a/include/asm-alpha/dma-mapping.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef _ALPHA_DMA_MAPPING_H -#define _ALPHA_DMA_MAPPING_H - - -#ifdef CONFIG_PCI - -#include - -#define dma_map_single(dev, va, size, dir) \ - pci_map_single(alpha_gendev_to_pci(dev), va, size, dir) -#define dma_unmap_single(dev, addr, size, dir) \ - pci_unmap_single(alpha_gendev_to_pci(dev), addr, size, dir) -#define dma_alloc_coherent(dev, size, addr, gfp) \ - __pci_alloc_consistent(alpha_gendev_to_pci(dev), size, addr, gfp) -#define dma_free_coherent(dev, size, va, addr) \ - pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr) -#define dma_map_page(dev, page, off, size, dir) \ - pci_map_page(alpha_gendev_to_pci(dev), page, off, size, dir) -#define dma_unmap_page(dev, addr, size, dir) \ - pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir) -#define dma_map_sg(dev, sg, nents, dir) \ - pci_map_sg(alpha_gendev_to_pci(dev), sg, nents, dir) -#define dma_unmap_sg(dev, sg, nents, dir) \ - pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir) -#define dma_supported(dev, mask) \ - pci_dma_supported(alpha_gendev_to_pci(dev), mask) -#define dma_mapping_error(dev, addr) \ - pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr) - -#else /* no PCI - no IOMMU. */ - -struct scatterlist; -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp); -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction); - -#define dma_free_coherent(dev, size, va, addr) \ - free_pages((unsigned long)va, get_order(size)) -#define dma_supported(dev, mask) (mask < 0x00ffffffUL ? 0 : 1) -#define dma_map_single(dev, va, size, dir) virt_to_phys(va) -#define dma_map_page(dev, page, off, size, dir) (page_to_pa(page) + off) - -#define dma_unmap_single(dev, addr, size, dir) ((void)0) -#define dma_unmap_page(dev, addr, size, dir) ((void)0) -#define dma_unmap_sg(dev, sg, nents, dir) ((void)0) - -#define dma_mapping_error(dev, addr) (0) - -#endif /* !CONFIG_PCI */ - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) - -int dma_set_mask(struct device *dev, u64 mask); - -#define dma_sync_single_for_cpu(dev, addr, size, dir) ((void)0) -#define dma_sync_single_for_device(dev, addr, size, dir) ((void)0) -#define dma_sync_single_range(dev, addr, off, size, dir) ((void)0) -#define dma_sync_sg_for_cpu(dev, sg, nents, dir) ((void)0) -#define dma_sync_sg_for_device(dev, sg, nents, dir) ((void)0) -#define dma_cache_sync(dev, va, size, dir) ((void)0) -#define dma_sync_single_range_for_cpu(dev, addr, offset, size, dir) ((void)0) -#define dma_sync_single_range_for_device(dev, addr, offset, size, dir) ((void)0) - -#define dma_get_cache_alignment() L1_CACHE_BYTES - -#endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/include/asm-alpha/dma.h b/include/asm-alpha/dma.h deleted file mode 100644 index 87cfdbdf08f..00000000000 --- a/include/asm-alpha/dma.h +++ /dev/null @@ -1,376 +0,0 @@ -/* - * include/asm-alpha/dma.h - * - * This is essentially the same as the i386 DMA stuff, as the AlphaPCs - * use ISA-compatible dma. The only extension is support for high-page - * registers that allow to set the top 8 bits of a 32-bit DMA address. - * This register should be written last when setting up a DMA address - * as this will also enable DMA across 64 KB boundaries. - */ - -/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ - * linux/include/asm/dma.h: Defines for using and allocating dma channels. - * Written by Hennus Bergman, 1992. - * High DMA channel support & info by Hannu Savolainen - * and John Boyd, Nov. 1992. - */ - -#ifndef _ASM_DMA_H -#define _ASM_DMA_H - -#include -#include - -#define dma_outb outb -#define dma_inb inb - -/* - * NOTES about DMA transfers: - * - * controller 1: channels 0-3, byte operations, ports 00-1F - * controller 2: channels 4-7, word operations, ports C0-DF - * - * - ALL registers are 8 bits only, regardless of transfer size - * - channel 4 is not used - cascades 1 into 2. - * - channels 0-3 are byte - addresses/counts are for physical bytes - * - channels 5-7 are word - addresses/counts are for physical words - * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries - * - transfer count loaded to registers is 1 less than actual count - * - controller 2 offsets are all even (2x offsets for controller 1) - * - page registers for 5-7 don't use data bit 0, represent 128K pages - * - page registers for 0-3 use bit 0, represent 64K pages - * - * DMA transfers are limited to the lower 16MB of _physical_ memory. - * Note that addresses loaded into registers must be _physical_ addresses, - * not logical addresses (which may differ if paging is active). - * - * Address mapping for channels 0-3: - * - * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * P7 ... P0 A7 ... A0 A7 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Address mapping for channels 5-7: - * - * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) - * | ... | \ \ ... \ \ \ ... \ \ - * | ... | \ \ ... \ \ \ ... \ (not used) - * | ... | \ \ ... \ \ \ ... \ - * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses - * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at - * the hardware level, so odd-byte transfers aren't possible). - * - * Transfer count (_not # bytes_) is limited to 64K, represented as actual - * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, - * and up to 128K bytes may be transferred on channels 5-7 in one operation. - * - */ - -#define MAX_DMA_CHANNELS 8 - -/* - ISA DMA limitations on Alpha platforms, - - These may be due to SIO (PCI<->ISA bridge) chipset limitation, or - just a wiring limit. -*/ - -/* The maximum address for ISA DMA transfer on Alpha XL, due to an - hardware SIO limitation, is 64MB. -*/ -#define ALPHA_XL_MAX_ISA_DMA_ADDRESS 0x04000000UL - -/* The maximum address for ISA DMA transfer on RUFFIAN, - due to an hardware SIO limitation, is 16MB. -*/ -#define ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS 0x01000000UL - -/* The maximum address for ISA DMA transfer on SABLE, and some ALCORs, - due to an hardware SIO chip limitation, is 2GB. -*/ -#define ALPHA_SABLE_MAX_ISA_DMA_ADDRESS 0x80000000UL -#define ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS 0x80000000UL - -/* - Maximum address for all the others is the complete 32-bit bus - address space. -*/ -#define ALPHA_MAX_ISA_DMA_ADDRESS 0x100000000UL - -#ifdef CONFIG_ALPHA_GENERIC -# define MAX_ISA_DMA_ADDRESS (alpha_mv.max_isa_dma_address) -#else -# if defined(CONFIG_ALPHA_XL) -# define MAX_ISA_DMA_ADDRESS ALPHA_XL_MAX_ISA_DMA_ADDRESS -# elif defined(CONFIG_ALPHA_RUFFIAN) -# define MAX_ISA_DMA_ADDRESS ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS -# elif defined(CONFIG_ALPHA_SABLE) -# define MAX_ISA_DMA_ADDRESS ALPHA_SABLE_MAX_ISA_DMA_ADDRESS -# elif defined(CONFIG_ALPHA_ALCOR) -# define MAX_ISA_DMA_ADDRESS ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS -# else -# define MAX_ISA_DMA_ADDRESS ALPHA_MAX_ISA_DMA_ADDRESS -# endif -#endif - -/* If we have the iommu, we don't have any address limitations on DMA. - Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone - like i386. */ -#define MAX_DMA_ADDRESS (alpha_mv.mv_pci_tbi ? \ - ~0UL : IDENT_ADDR + 0x01000000) - -/* 8237 DMA controllers */ -#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ -#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ - -/* DMA controller registers */ -#define DMA1_CMD_REG 0x08 /* command register (w) */ -#define DMA1_STAT_REG 0x08 /* status register (r) */ -#define DMA1_REQ_REG 0x09 /* request register (w) */ -#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ -#define DMA1_MODE_REG 0x0B /* mode register (w) */ -#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ -#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ -#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ -#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ -#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ -#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) - -#define DMA2_CMD_REG 0xD0 /* command register (w) */ -#define DMA2_STAT_REG 0xD0 /* status register (r) */ -#define DMA2_REQ_REG 0xD2 /* request register (w) */ -#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ -#define DMA2_MODE_REG 0xD6 /* mode register (w) */ -#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ -#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ -#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ -#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ -#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ -#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) - -#define DMA_ADDR_0 0x00 /* DMA address registers */ -#define DMA_ADDR_1 0x02 -#define DMA_ADDR_2 0x04 -#define DMA_ADDR_3 0x06 -#define DMA_ADDR_4 0xC0 -#define DMA_ADDR_5 0xC4 -#define DMA_ADDR_6 0xC8 -#define DMA_ADDR_7 0xCC - -#define DMA_CNT_0 0x01 /* DMA count registers */ -#define DMA_CNT_1 0x03 -#define DMA_CNT_2 0x05 -#define DMA_CNT_3 0x07 -#define DMA_CNT_4 0xC2 -#define DMA_CNT_5 0xC6 -#define DMA_CNT_6 0xCA -#define DMA_CNT_7 0xCE - -#define DMA_PAGE_0 0x87 /* DMA page registers */ -#define DMA_PAGE_1 0x83 -#define DMA_PAGE_2 0x81 -#define DMA_PAGE_3 0x82 -#define DMA_PAGE_5 0x8B -#define DMA_PAGE_6 0x89 -#define DMA_PAGE_7 0x8A - -#define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0) -#define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1) -#define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2) -#define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3) -#define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4) -#define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5) -#define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6) -#define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7) - -#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ -#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ -#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ - -#define DMA_AUTOINIT 0x10 - -extern spinlock_t dma_spin_lock; - -static __inline__ unsigned long claim_dma_lock(void) -{ - unsigned long flags; - spin_lock_irqsave(&dma_spin_lock, flags); - return flags; -} - -static __inline__ void release_dma_lock(unsigned long flags) -{ - spin_unlock_irqrestore(&dma_spin_lock, flags); -} - -/* enable/disable a specific DMA channel */ -static __inline__ void enable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr, DMA1_MASK_REG); - else - dma_outb(dmanr & 3, DMA2_MASK_REG); -} - -static __inline__ void disable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr | 4, DMA1_MASK_REG); - else - dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); -} - -/* Clear the 'DMA Pointer Flip Flop'. - * Write 0 for LSB/MSB, 1 for MSB/LSB access. - * Use this once to initialize the FF to a known state. - * After that, keep track of it. :-) - * --- In order to do that, the DMA routines below should --- - * --- only be used while interrupts are disabled! --- - */ -static __inline__ void clear_dma_ff(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(0, DMA1_CLEAR_FF_REG); - else - dma_outb(0, DMA2_CLEAR_FF_REG); -} - -/* set mode (above) for a specific DMA channel */ -static __inline__ void set_dma_mode(unsigned int dmanr, char mode) -{ - if (dmanr<=3) - dma_outb(mode | dmanr, DMA1_MODE_REG); - else - dma_outb(mode | (dmanr&3), DMA2_MODE_REG); -} - -/* set extended mode for a specific DMA channel */ -static __inline__ void set_dma_ext_mode(unsigned int dmanr, char ext_mode) -{ - if (dmanr<=3) - dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG); - else - dma_outb(ext_mode | (dmanr&3), DMA2_EXT_MODE_REG); -} - -/* Set only the page register bits of the transfer address. - * This is used for successive transfers when we know the contents of - * the lower 16 bits of the DMA current address register. - */ -static __inline__ void set_dma_page(unsigned int dmanr, unsigned int pagenr) -{ - switch(dmanr) { - case 0: - dma_outb(pagenr, DMA_PAGE_0); - dma_outb((pagenr >> 8), DMA_HIPAGE_0); - break; - case 1: - dma_outb(pagenr, DMA_PAGE_1); - dma_outb((pagenr >> 8), DMA_HIPAGE_1); - break; - case 2: - dma_outb(pagenr, DMA_PAGE_2); - dma_outb((pagenr >> 8), DMA_HIPAGE_2); - break; - case 3: - dma_outb(pagenr, DMA_PAGE_3); - dma_outb((pagenr >> 8), DMA_HIPAGE_3); - break; - case 5: - dma_outb(pagenr & 0xfe, DMA_PAGE_5); - dma_outb((pagenr >> 8), DMA_HIPAGE_5); - break; - case 6: - dma_outb(pagenr & 0xfe, DMA_PAGE_6); - dma_outb((pagenr >> 8), DMA_HIPAGE_6); - break; - case 7: - dma_outb(pagenr & 0xfe, DMA_PAGE_7); - dma_outb((pagenr >> 8), DMA_HIPAGE_7); - break; - } -} - - -/* Set transfer address & page bits for specific DMA channel. - * Assumes dma flipflop is clear. - */ -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) -{ - if (dmanr <= 3) { - dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - } else { - dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - } - set_dma_page(dmanr, a>>16); /* set hipage last to enable 32-bit mode */ -} - - -/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for - * a specific DMA channel. - * You must ensure the parameters are valid. - * NOTE: from a manual: "the number of transfers is one more - * than the initial word count"! This is taken into account. - * Assumes dma flip-flop is clear. - * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. - */ -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) -{ - count--; - if (dmanr <= 3) { - dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - } else { - dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - } -} - - -/* Get DMA residue count. After a DMA transfer, this - * should return zero. Reading this while a DMA transfer is - * still in progress will return unpredictable results. - * If called before the channel has been used, it may return 1. - * Otherwise, it returns the number of _bytes_ left to transfer. - * - * Assumes DMA flip-flop is clear. - */ -static __inline__ int get_dma_residue(unsigned int dmanr) -{ - unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE - : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; - - /* using short to get 16-bit wrap around */ - unsigned short count; - - count = 1 + dma_inb(io_port); - count += dma_inb(io_port) << 8; - - return (dmanr<=3)? count : (count<<1); -} - - -/* These are in kernel/dma.c: */ -extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ -extern void free_dma(unsigned int dmanr); /* release it again */ -#define KERNEL_HAVE_CHECK_DMA -extern int check_dma(unsigned int dmanr); - -/* From PCI */ - -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy (0) -#endif - - -#endif /* _ASM_DMA_H */ diff --git a/include/asm-alpha/elf.h b/include/asm-alpha/elf.h deleted file mode 100644 index fc1002ea1e0..00000000000 --- a/include/asm-alpha/elf.h +++ /dev/null @@ -1,165 +0,0 @@ -#ifndef __ASM_ALPHA_ELF_H -#define __ASM_ALPHA_ELF_H - -#include - -/* Special values for the st_other field in the symbol table. */ - -#define STO_ALPHA_NOPV 0x80 -#define STO_ALPHA_STD_GPLOAD 0x88 - -/* - * Alpha ELF relocation types - */ -#define R_ALPHA_NONE 0 /* No reloc */ -#define R_ALPHA_REFLONG 1 /* Direct 32 bit */ -#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */ -#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */ -#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */ -#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */ -#define R_ALPHA_GPDISP 6 /* Add displacement to GP */ -#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */ -#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */ -#define R_ALPHA_SREL16 9 /* PC relative 16 bit */ -#define R_ALPHA_SREL32 10 /* PC relative 32 bit */ -#define R_ALPHA_SREL64 11 /* PC relative 64 bit */ -#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ -#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ -#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */ -#define R_ALPHA_COPY 24 /* Copy symbol at runtime */ -#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */ -#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ -#define R_ALPHA_RELATIVE 27 /* Adjust by program base */ -#define R_ALPHA_BRSGP 28 -#define R_ALPHA_TLSGD 29 -#define R_ALPHA_TLS_LDM 30 -#define R_ALPHA_DTPMOD64 31 -#define R_ALPHA_GOTDTPREL 32 -#define R_ALPHA_DTPREL64 33 -#define R_ALPHA_DTPRELHI 34 -#define R_ALPHA_DTPRELLO 35 -#define R_ALPHA_DTPREL16 36 -#define R_ALPHA_GOTTPREL 37 -#define R_ALPHA_TPREL64 38 -#define R_ALPHA_TPRELHI 39 -#define R_ALPHA_TPRELLO 40 -#define R_ALPHA_TPREL16 41 - -#define SHF_ALPHA_GPREL 0x10000000 - -/* Legal values for e_flags field of Elf64_Ehdr. */ - -#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */ - -/* - * ELF register definitions.. - */ - -/* - * The OSF/1 version of makes gregset_t 46 entries long. - * I have no idea why that is so. For now, we just leave it at 33 - * (32 general regs + processor status word). - */ -#define ELF_NGREG 33 -#define ELF_NFPREG 32 - -typedef unsigned long elf_greg_t; -typedef elf_greg_t elf_gregset_t[ELF_NGREG]; - -typedef double elf_fpreg_t; -typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; - -/* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA) - -/* - * These are used to set parameters in the core dumps. - */ -#define ELF_CLASS ELFCLASS64 -#define ELF_DATA ELFDATA2LSB -#define ELF_ARCH EM_ALPHA - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 8192 - -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) - -/* $0 is set by ld.so to a pointer to a function which might be - registered using atexit. This provides a mean for the dynamic - linker to call DT_FINI functions for shared libraries that have - been loaded before the code runs. - - So that we can use the same startup file with static executables, - we start programs with a value of 0 to indicate that there is no - such function. */ - -#define ELF_PLAT_INIT(_r, load_addr) _r->r0 = 0 - -/* The registers are layed out in pt_regs for PAL and syscall - convenience. Re-order them for the linear elf_gregset_t. */ - -struct pt_regs; -struct thread_info; -struct task_struct; -extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, - struct thread_info *ti); -#define ELF_CORE_COPY_REGS(DEST, REGS) \ - dump_elf_thread(DEST, REGS, current_thread_info()); - -/* Similar, but for a thread other than current. */ - -extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task); -#define ELF_CORE_COPY_TASK_REGS(TASK, DEST) \ - dump_elf_task(*(DEST), TASK) - -/* Similar, but for the FP registers. */ - -extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task); -#define ELF_CORE_COPY_FPREGS(TASK, DEST) \ - dump_elf_task_fp(*(DEST), TASK) - -/* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. This is trivial on Alpha, - but not so on other machines. */ - -#define ELF_HWCAP (~amask(-1)) - -/* This yields a string that ld.so will use to load implementation - specific libraries for optimization. This is more specific in - intent than poking at uname or /proc/cpuinfo. */ - -#define ELF_PLATFORM \ -({ \ - enum implver_enum i_ = implver(); \ - ( i_ == IMPLVER_EV4 ? "ev4" \ - : i_ == IMPLVER_EV5 \ - ? (amask(AMASK_BWX) ? "ev5" : "ev56") \ - : amask (AMASK_CIX) ? "ev6" : "ev67"); \ -}) - -#define SET_PERSONALITY(EX, IBCS2) \ - set_personality(((EX).e_flags & EF_ALPHA_32BIT) \ - ? PER_LINUX_32BIT : (IBCS2) ? PER_SVR4 : PER_LINUX) - -extern int alpha_l1i_cacheshape; -extern int alpha_l1d_cacheshape; -extern int alpha_l2_cacheshape; -extern int alpha_l3_cacheshape; - -/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ -#define ARCH_DLINFO \ - do { \ - NEW_AUX_ENT(AT_L1I_CACHESHAPE, alpha_l1i_cacheshape); \ - NEW_AUX_ENT(AT_L1D_CACHESHAPE, alpha_l1d_cacheshape); \ - NEW_AUX_ENT(AT_L2_CACHESHAPE, alpha_l2_cacheshape); \ - NEW_AUX_ENT(AT_L3_CACHESHAPE, alpha_l3_cacheshape); \ - } while (0) - -#endif /* __ASM_ALPHA_ELF_H */ diff --git a/include/asm-alpha/emergency-restart.h b/include/asm-alpha/emergency-restart.h deleted file mode 100644 index 108d8c48e42..00000000000 --- a/include/asm-alpha/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/include/asm-alpha/err_common.h b/include/asm-alpha/err_common.h deleted file mode 100644 index c2509594210..00000000000 --- a/include/asm-alpha/err_common.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * linux/include/asm-alpha/err_common.h - * - * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) - * - * Contains declarations and macros to support Alpha error handling - * implementations. - */ - -#ifndef __ALPHA_ERR_COMMON_H -#define __ALPHA_ERR_COMMON_H 1 - -/* - * SCB Vector definitions - */ -#define SCB_Q_SYSERR 0x620 -#define SCB_Q_PROCERR 0x630 -#define SCB_Q_SYSMCHK 0x660 -#define SCB_Q_PROCMCHK 0x670 -#define SCB_Q_SYSEVENT 0x680 - -/* - * Disposition definitions for logout frame parser - */ -#define MCHK_DISPOSITION_UNKNOWN_ERROR 0x00 -#define MCHK_DISPOSITION_REPORT 0x01 -#define MCHK_DISPOSITION_DISMISS 0x02 - -/* - * Error Log definitions - */ -/* - * Types - */ - -#define EL_CLASS__TERMINATION (0) -# define EL_TYPE__TERMINATION__TERMINATION (0) -#define EL_CLASS__HEADER (5) -# define EL_TYPE__HEADER__SYSTEM_ERROR_FRAME (1) -# define EL_TYPE__HEADER__SYSTEM_EVENT_FRAME (2) -# define EL_TYPE__HEADER__HALT_FRAME (3) -# define EL_TYPE__HEADER__LOGOUT_FRAME (19) -#define EL_CLASS__GENERAL_NOTIFICATION (9) -#define EL_CLASS__PCI_ERROR_FRAME (11) -#define EL_CLASS__REGATTA_FAMILY (12) -# define EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME (1) -# define EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME (2) -# define EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME (3) -# define EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED (8) -# define EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED (9) -# define EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED (10) -# define EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT (11) -# define EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT (12) -#define EL_CLASS__PAL (14) -# define EL_TYPE__PAL__LOGOUT_FRAME (1) -# define EL_TYPE__PAL__EV7_PROCESSOR (4) -# define EL_TYPE__PAL__EV7_ZBOX (5) -# define EL_TYPE__PAL__EV7_RBOX (6) -# define EL_TYPE__PAL__EV7_IO (7) -# define EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE (10) -# define EL_TYPE__PAL__ENV__AIRMOVER_FAN (11) -# define EL_TYPE__PAL__ENV__VOLTAGE (12) -# define EL_TYPE__PAL__ENV__INTRUSION (13) -# define EL_TYPE__PAL__ENV__POWER_SUPPLY (14) -# define EL_TYPE__PAL__ENV__LAN (15) -# define EL_TYPE__PAL__ENV__HOT_PLUG (16) - -union el_timestamp { - struct { - u8 second; - u8 minute; - u8 hour; - u8 day; - u8 month; - u8 year; - } b; - u64 as_int; -}; - -struct el_subpacket { - u16 length; /* length of header (in bytes) */ - u16 class; /* header class and type... */ - u16 type; /* ...determine content */ - u16 revision; /* header revision */ - union { - struct { /* Class 5, Type 1 - System Error */ - u32 frame_length; - u32 frame_packet_count; - } sys_err; - struct { /* Class 5, Type 2 - System Event */ - union el_timestamp timestamp; - u32 frame_length; - u32 frame_packet_count; - } sys_event; - struct { /* Class 5, Type 3 - Double Error Halt */ - u16 halt_code; - u16 reserved; - union el_timestamp timestamp; - u32 frame_length; - u32 frame_packet_count; - } err_halt; - struct { /* Clasee 5, Type 19 - Logout Frame Header */ - u32 frame_length; - u32 frame_flags; - u32 cpu_offset; - u32 system_offset; - } logout_header; - struct { /* Class 12 - Regatta */ - u64 cpuid; - u64 data_start[1]; - } regatta_frame; - struct { /* Raw */ - u64 data_start[1]; - } raw; - } by_type; -}; - -#endif /* __ALPHA_ERR_COMMON_H */ diff --git a/include/asm-alpha/err_ev6.h b/include/asm-alpha/err_ev6.h deleted file mode 100644 index ea637791e4a..00000000000 --- a/include/asm-alpha/err_ev6.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ALPHA_ERR_EV6_H -#define __ALPHA_ERR_EV6_H 1 - -/* Dummy include for now. */ - -#endif /* __ALPHA_ERR_EV6_H */ diff --git a/include/asm-alpha/err_ev7.h b/include/asm-alpha/err_ev7.h deleted file mode 100644 index 87f99777c2e..00000000000 --- a/include/asm-alpha/err_ev7.h +++ /dev/null @@ -1,202 +0,0 @@ -#ifndef __ALPHA_ERR_EV7_H -#define __ALPHA_ERR_EV7_H 1 - -/* - * Data for el packet class PAL (14), type LOGOUT_FRAME (1) - */ -struct ev7_pal_logout_subpacket { - u32 mchk_code; - u32 subpacket_count; - u64 whami; - u64 rbox_whami; - u64 rbox_int; - u64 exc_addr; - union el_timestamp timestamp; - u64 halt_code; - u64 reserved; -}; - -/* - * Data for el packet class PAL (14), type EV7_PROCESSOR (4) - */ -struct ev7_pal_processor_subpacket { - u64 i_stat; - u64 dc_stat; - u64 c_addr; - u64 c_syndrome_1; - u64 c_syndrome_0; - u64 c_stat; - u64 c_sts; - u64 mm_stat; - u64 exc_addr; - u64 ier_cm; - u64 isum; - u64 pal_base; - u64 i_ctl; - u64 process_context; - u64 cbox_ctl; - u64 cbox_stp_ctl; - u64 cbox_acc_ctl; - u64 cbox_lcl_set; - u64 cbox_gbl_set; - u64 bbox_ctl; - u64 bbox_err_sts; - u64 bbox_err_idx; - u64 cbox_ddp_err_sts; - u64 bbox_dat_rmp; - u64 reserved[2]; -}; - -/* - * Data for el packet class PAL (14), type EV7_ZBOX (5) - */ -struct ev7_pal_zbox_subpacket { - u32 zbox0_dram_err_status_1; - u32 zbox0_dram_err_status_2; - u32 zbox0_dram_err_status_3; - u32 zbox0_dram_err_ctl; - u32 zbox0_dram_err_adr; - u32 zbox0_dift_timeout; - u32 zbox0_dram_mapper_ctl; - u32 zbox0_frc_err_adr; - u32 zbox0_dift_err_status; - u32 reserved1; - u32 zbox1_dram_err_status_1; - u32 zbox1_dram_err_status_2; - u32 zbox1_dram_err_status_3; - u32 zbox1_dram_err_ctl; - u32 zbox1_dram_err_adr; - u32 zbox1_dift_timeout; - u32 zbox1_dram_mapper_ctl; - u32 zbox1_frc_err_adr; - u32 zbox1_dift_err_status; - u32 reserved2; - u64 cbox_ctl; - u64 cbox_stp_ctl; - u64 zbox0_error_pa; - u64 zbox1_error_pa; - u64 zbox0_ored_syndrome; - u64 zbox1_ored_syndrome; - u64 reserved3[2]; -}; - -/* - * Data for el packet class PAL (14), type EV7_RBOX (6) - */ -struct ev7_pal_rbox_subpacket { - u64 rbox_cfg; - u64 rbox_n_cfg; - u64 rbox_s_cfg; - u64 rbox_e_cfg; - u64 rbox_w_cfg; - u64 rbox_n_err; - u64 rbox_s_err; - u64 rbox_e_err; - u64 rbox_w_err; - u64 rbox_io_cfg; - u64 rbox_io_err; - u64 rbox_l_err; - u64 rbox_whoami; - u64 rbox_imask; - u64 rbox_intq; - u64 rbox_int; - u64 reserved[2]; -}; - -/* - * Data for el packet class PAL (14), type EV7_IO (7) - */ -struct ev7_pal_io_one_port { - u64 pox_err_sum; - u64 pox_tlb_err; - u64 pox_spl_cmplt; - u64 pox_trans_sum; - u64 pox_first_err; - u64 pox_mult_err; - u64 pox_dm_source; - u64 pox_dm_dest; - u64 pox_dm_size; - u64 pox_dm_ctrl; - u64 reserved; -}; - -struct ev7_pal_io_subpacket { - u64 io_asic_rev; - u64 io_sys_rev; - u64 io7_uph; - u64 hpi_ctl; - u64 crd_ctl; - u64 hei_ctl; - u64 po7_error_sum; - u64 po7_uncrr_sym; - u64 po7_crrct_sym; - u64 po7_ugbge_sym; - u64 po7_err_pkt0; - u64 po7_err_pkt1; - u64 reserved[2]; - struct ev7_pal_io_one_port ports[4]; -}; - -/* - * Environmental subpacket. Data used for el packets: - * class PAL (14), type AMBIENT_TEMPERATURE (10) - * class PAL (14), type AIRMOVER_FAN (11) - * class PAL (14), type VOLTAGE (12) - * class PAL (14), type INTRUSION (13) - * class PAL (14), type POWER_SUPPLY (14) - * class PAL (14), type LAN (15) - * class PAL (14), type HOT_PLUG (16) - */ -struct ev7_pal_environmental_subpacket { - u16 cabinet; - u16 drawer; - u16 reserved1[2]; - u8 module_type; - u8 unit_id; /* unit reporting condition */ - u8 reserved2; - u8 condition; /* condition reported */ -}; - -/* - * Convert environmental type to index - */ -static inline int ev7_lf_env_index(int type) -{ - BUG_ON((type < EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE) - || (type > EL_TYPE__PAL__ENV__HOT_PLUG)); - - return type - EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE; -} - -/* - * Data for generic el packet class PAL. - */ -struct ev7_pal_subpacket { - union { - struct ev7_pal_logout_subpacket logout; /* Type 1 */ - struct ev7_pal_processor_subpacket ev7; /* Type 4 */ - struct ev7_pal_zbox_subpacket zbox; /* Type 5 */ - struct ev7_pal_rbox_subpacket rbox; /* Type 6 */ - struct ev7_pal_io_subpacket io; /* Type 7 */ - struct ev7_pal_environmental_subpacket env; /* Type 10-16 */ - u64 as_quad[1]; /* Raw u64 */ - } by_type; -}; - -/* - * Struct to contain collected logout from subpackets. - */ -struct ev7_lf_subpackets { - struct ev7_pal_logout_subpacket *logout; /* Type 1 */ - struct ev7_pal_processor_subpacket *ev7; /* Type 4 */ - struct ev7_pal_zbox_subpacket *zbox; /* Type 5 */ - struct ev7_pal_rbox_subpacket *rbox; /* Type 6 */ - struct ev7_pal_io_subpacket *io; /* Type 7 */ - struct ev7_pal_environmental_subpacket *env[7]; /* Type 10-16 */ - - unsigned int io_pid; -}; - -#endif /* __ALPHA_ERR_EV7_H */ - - diff --git a/include/asm-alpha/errno.h b/include/asm-alpha/errno.h deleted file mode 100644 index 69e2655249d..00000000000 --- a/include/asm-alpha/errno.h +++ /dev/null @@ -1,123 +0,0 @@ -#ifndef _ALPHA_ERRNO_H -#define _ALPHA_ERRNO_H - -#include - -#undef EAGAIN /* 11 in errno-base.h */ - -#define EDEADLK 11 /* Resource deadlock would occur */ - -#define EAGAIN 35 /* Try again */ -#define EWOULDBLOCK EAGAIN /* Operation would block */ -#define EINPROGRESS 36 /* Operation now in progress */ -#define EALREADY 37 /* Operation already in progress */ -#define ENOTSOCK 38 /* Socket operation on non-socket */ -#define EDESTADDRREQ 39 /* Destination address required */ -#define EMSGSIZE 40 /* Message too long */ -#define EPROTOTYPE 41 /* Protocol wrong type for socket */ -#define ENOPROTOOPT 42 /* Protocol not available */ -#define EPROTONOSUPPORT 43 /* Protocol not supported */ -#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ -#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ -#define EPFNOSUPPORT 46 /* Protocol family not supported */ -#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ -#define EADDRINUSE 48 /* Address already in use */ -#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ -#define ENETDOWN 50 /* Network is down */ -#define ENETUNREACH 51 /* Network is unreachable */ -#define ENETRESET 52 /* Network dropped connection because of reset */ -#define ECONNABORTED 53 /* Software caused connection abort */ -#define ECONNRESET 54 /* Connection reset by peer */ -#define ENOBUFS 55 /* No buffer space available */ -#define EISCONN 56 /* Transport endpoint is already connected */ -#define ENOTCONN 57 /* Transport endpoint is not connected */ -#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ -#define ETOOMANYREFS 59 /* Too many references: cannot splice */ -#define ETIMEDOUT 60 /* Connection timed out */ -#define ECONNREFUSED 61 /* Connection refused */ -#define ELOOP 62 /* Too many symbolic links encountered */ -#define ENAMETOOLONG 63 /* File name too long */ -#define EHOSTDOWN 64 /* Host is down */ -#define EHOSTUNREACH 65 /* No route to host */ -#define ENOTEMPTY 66 /* Directory not empty */ - -#define EUSERS 68 /* Too many users */ -#define EDQUOT 69 /* Quota exceeded */ -#define ESTALE 70 /* Stale NFS file handle */ -#define EREMOTE 71 /* Object is remote */ - -#define ENOLCK 77 /* No record locks available */ -#define ENOSYS 78 /* Function not implemented */ - -#define ENOMSG 80 /* No message of desired type */ -#define EIDRM 81 /* Identifier removed */ -#define ENOSR 82 /* Out of streams resources */ -#define ETIME 83 /* Timer expired */ -#define EBADMSG 84 /* Not a data message */ -#define EPROTO 85 /* Protocol error */ -#define ENODATA 86 /* No data available */ -#define ENOSTR 87 /* Device not a stream */ - -#define ENOPKG 92 /* Package not installed */ - -#define EILSEQ 116 /* Illegal byte sequence */ - -/* The following are just random noise.. */ -#define ECHRNG 88 /* Channel number out of range */ -#define EL2NSYNC 89 /* Level 2 not synchronized */ -#define EL3HLT 90 /* Level 3 halted */ -#define EL3RST 91 /* Level 3 reset */ - -#define ELNRNG 93 /* Link number out of range */ -#define EUNATCH 94 /* Protocol driver not attached */ -#define ENOCSI 95 /* No CSI structure available */ -#define EL2HLT 96 /* Level 2 halted */ -#define EBADE 97 /* Invalid exchange */ -#define EBADR 98 /* Invalid request descriptor */ -#define EXFULL 99 /* Exchange full */ -#define ENOANO 100 /* No anode */ -#define EBADRQC 101 /* Invalid request code */ -#define EBADSLT 102 /* Invalid slot */ - -#define EDEADLOCK EDEADLK - -#define EBFONT 104 /* Bad font file format */ -#define ENONET 105 /* Machine is not on the network */ -#define ENOLINK 106 /* Link has been severed */ -#define EADV 107 /* Advertise error */ -#define ESRMNT 108 /* Srmount error */ -#define ECOMM 109 /* Communication error on send */ -#define EMULTIHOP 110 /* Multihop attempted */ -#define EDOTDOT 111 /* RFS specific error */ -#define EOVERFLOW 112 /* Value too large for defined data type */ -#define ENOTUNIQ 113 /* Name not unique on network */ -#define EBADFD 114 /* File descriptor in bad state */ -#define EREMCHG 115 /* Remote address changed */ - -#define EUCLEAN 117 /* Structure needs cleaning */ -#define ENOTNAM 118 /* Not a XENIX named type file */ -#define ENAVAIL 119 /* No XENIX semaphores available */ -#define EISNAM 120 /* Is a named type file */ -#define EREMOTEIO 121 /* Remote I/O error */ - -#define ELIBACC 122 /* Can not access a needed shared library */ -#define ELIBBAD 123 /* Accessing a corrupted shared library */ -#define ELIBSCN 124 /* .lib section in a.out corrupted */ -#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ -#define ELIBEXEC 126 /* Cannot exec a shared library directly */ -#define ERESTART 127 /* Interrupted system call should be restarted */ -#define ESTRPIPE 128 /* Streams pipe error */ - -#define ENOMEDIUM 129 /* No medium found */ -#define EMEDIUMTYPE 130 /* Wrong medium type */ -#define ECANCELED 131 /* Operation Cancelled */ -#define ENOKEY 132 /* Required key not available */ -#define EKEYEXPIRED 133 /* Key has expired */ -#define EKEYREVOKED 134 /* Key has been revoked */ -#define EKEYREJECTED 135 /* Key was rejected by service */ - -/* for robust mutexes */ -#define EOWNERDEAD 136 /* Owner died */ -#define ENOTRECOVERABLE 137 /* State not recoverable */ - -#endif diff --git a/include/asm-alpha/fb.h b/include/asm-alpha/fb.h deleted file mode 100644 index fa9bbb96b2b..00000000000 --- a/include/asm-alpha/fb.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ -#include - -/* Caching is off in the I/O space quadrant by design. */ -#define fb_pgprotect(...) do {} while (0) - -static inline int fb_is_primary_device(struct fb_info *info) -{ - return 0; -} - -#endif /* _ASM_FB_H_ */ diff --git a/include/asm-alpha/fcntl.h b/include/asm-alpha/fcntl.h deleted file mode 100644 index 25da0017ec8..00000000000 --- a/include/asm-alpha/fcntl.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _ALPHA_FCNTL_H -#define _ALPHA_FCNTL_H - -/* open/fcntl - O_SYNC is only implemented on blocks devices and on files - located on an ext2 file system */ -#define O_CREAT 01000 /* not fcntl */ -#define O_TRUNC 02000 /* not fcntl */ -#define O_EXCL 04000 /* not fcntl */ -#define O_NOCTTY 010000 /* not fcntl */ - -#define O_NONBLOCK 00004 -#define O_APPEND 00010 -#define O_SYNC 040000 -#define O_DIRECTORY 0100000 /* must be a directory */ -#define O_NOFOLLOW 0200000 /* don't follow links */ -#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */ -#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */ -#define O_NOATIME 04000000 -#define O_CLOEXEC 010000000 /* set close_on_exec */ - -#define F_GETLK 7 -#define F_SETLK 8 -#define F_SETLKW 9 - -#define F_SETOWN 5 /* for sockets. */ -#define F_GETOWN 6 /* for sockets. */ -#define F_SETSIG 10 /* for sockets. */ -#define F_GETSIG 11 /* for sockets. */ - -/* for posix fcntl() and lockf() */ -#define F_RDLCK 1 -#define F_WRLCK 2 -#define F_UNLCK 8 - -/* for old implementation of bsd flock () */ -#define F_EXLCK 16 /* or 3 */ -#define F_SHLCK 32 /* or 4 */ - -#define F_INPROGRESS 64 - -#include - -#endif diff --git a/include/asm-alpha/floppy.h b/include/asm-alpha/floppy.h deleted file mode 100644 index 0be50413b2b..00000000000 --- a/include/asm-alpha/floppy.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Architecture specific parts of the Floppy driver - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1995 - */ -#ifndef __ASM_ALPHA_FLOPPY_H -#define __ASM_ALPHA_FLOPPY_H - - -#define fd_inb(port) inb_p(port) -#define fd_outb(value,port) outb_p(value,port) - -#define fd_enable_dma() enable_dma(FLOPPY_DMA) -#define fd_disable_dma() disable_dma(FLOPPY_DMA) -#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy") -#define fd_free_dma() free_dma(FLOPPY_DMA) -#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA) -#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode) -#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,virt_to_bus(addr)) -#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count) -#define fd_enable_irq() enable_irq(FLOPPY_IRQ) -#define fd_disable_irq() disable_irq(FLOPPY_IRQ) -#define fd_cacheflush(addr,size) /* nothing */ -#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ - IRQF_DISABLED, "floppy", NULL) -#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); - -#ifdef CONFIG_PCI - -#include - -#define fd_dma_setup(addr,size,mode,io) alpha_fd_dma_setup(addr,size,mode,io) - -static __inline__ int -alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io) -{ - static unsigned long prev_size; - static dma_addr_t bus_addr = 0; - static char *prev_addr; - static int prev_dir; - int dir; - - dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE; - - if (bus_addr - && (addr != prev_addr || size != prev_size || dir != prev_dir)) { - /* different from last time -- unmap prev */ - pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir); - bus_addr = 0; - } - - if (!bus_addr) /* need to map it */ - bus_addr = pci_map_single(isa_bridge, addr, size, dir); - - /* remember this one as prev */ - prev_addr = addr; - prev_size = size; - prev_dir = dir; - - fd_clear_dma_ff(); - fd_cacheflush(addr, size); - fd_set_dma_mode(mode); - set_dma_addr(FLOPPY_DMA, bus_addr); - fd_set_dma_count(size); - virtual_dma_port = io; - fd_enable_dma(); - - return 0; -} - -#endif /* CONFIG_PCI */ - -__inline__ void virtual_dma_init(void) -{ - /* Nothing to do on an Alpha */ -} - -static int FDC1 = 0x3f0; -static int FDC2 = -1; - -/* - * Again, the CMOS information doesn't work on the alpha.. - */ -#define FLOPPY0_TYPE 6 -#define FLOPPY1_TYPE 0 - -#define N_FDC 2 -#define N_DRIVE 8 - -/* - * Most Alphas have no problems with floppy DMA crossing 64k borders, - * except for certain ones, like XL and RUFFIAN. - * - * However, the test is simple and fast, and this *is* floppy, after all, - * so we do it for all platforms, just to make sure. - * - * This is advantageous in other circumstances as well, as in moving - * about the PCI DMA windows and forcing the floppy to start doing - * scatter-gather when it never had before, and there *is* a problem - * on that platform... ;-} - */ - -static inline unsigned long CROSS_64KB(void *a, unsigned long s) -{ - unsigned long p = (unsigned long)a; - return ((p + s - 1) ^ p) & ~0xffffUL; -} - -#define EXTRA_FLOPPY_PARAMS - -#endif /* __ASM_ALPHA_FLOPPY_H */ diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h deleted file mode 100644 index ecb17a72acc..00000000000 --- a/include/asm-alpha/fpu.h +++ /dev/null @@ -1,193 +0,0 @@ -#ifndef __ASM_ALPHA_FPU_H -#define __ASM_ALPHA_FPU_H - -/* - * Alpha floating-point control register defines: - */ -#define FPCR_DNOD (1UL<<47) /* denorm INV trap disable */ -#define FPCR_DNZ (1UL<<48) /* denorms to zero */ -#define FPCR_INVD (1UL<<49) /* invalid op disable (opt.) */ -#define FPCR_DZED (1UL<<50) /* division by zero disable (opt.) */ -#define FPCR_OVFD (1UL<<51) /* overflow disable (optional) */ -#define FPCR_INV (1UL<<52) /* invalid operation */ -#define FPCR_DZE (1UL<<53) /* division by zero */ -#define FPCR_OVF (1UL<<54) /* overflow */ -#define FPCR_UNF (1UL<<55) /* underflow */ -#define FPCR_INE (1UL<<56) /* inexact */ -#define FPCR_IOV (1UL<<57) /* integer overflow */ -#define FPCR_UNDZ (1UL<<60) /* underflow to zero (opt.) */ -#define FPCR_UNFD (1UL<<61) /* underflow disable (opt.) */ -#define FPCR_INED (1UL<<62) /* inexact disable (opt.) */ -#define FPCR_SUM (1UL<<63) /* summary bit */ - -#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */ -#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */ -#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */ -#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */ -#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */ -#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT) - -#define FPCR_MASK 0xffff800000000000L - -/* - * IEEE trap enables are implemented in software. These per-thread - * bits are stored in the "ieee_state" field of "struct thread_info". - * Thus, the bits are defined so as not to conflict with the - * floating-point enable bit (which is architected). On top of that, - * we want to make these bits compatible with OSF/1 so - * ieee_set_fp_control() etc. can be implemented easily and - * compatibly. The corresponding definitions are in - * /usr/include/machine/fpu.h under OSF/1. - */ -#define IEEE_TRAP_ENABLE_INV (1UL<<1) /* invalid op */ -#define IEEE_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */ -#define IEEE_TRAP_ENABLE_OVF (1UL<<3) /* overflow */ -#define IEEE_TRAP_ENABLE_UNF (1UL<<4) /* underflow */ -#define IEEE_TRAP_ENABLE_INE (1UL<<5) /* inexact */ -#define IEEE_TRAP_ENABLE_DNO (1UL<<6) /* denorm */ -#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\ - IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\ - IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO) - -/* Denorm and Underflow flushing */ -#define IEEE_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */ -#define IEEE_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */ - -#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ) - -/* status bits coming from fpcr: */ -#define IEEE_STATUS_INV (1UL<<17) -#define IEEE_STATUS_DZE (1UL<<18) -#define IEEE_STATUS_OVF (1UL<<19) -#define IEEE_STATUS_UNF (1UL<<20) -#define IEEE_STATUS_INE (1UL<<21) -#define IEEE_STATUS_DNO (1UL<<22) - -#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \ - IEEE_STATUS_OVF | IEEE_STATUS_UNF | \ - IEEE_STATUS_INE | IEEE_STATUS_DNO) - -#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \ - IEEE_STATUS_MASK | IEEE_MAP_MASK) - -#define IEEE_CURRENT_RM_SHIFT 32 -#define IEEE_CURRENT_RM_MASK (3UL<> 35) & IEEE_STATUS_MASK; - sw |= (fp >> 36) & IEEE_MAP_DMZ; - sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV - | IEEE_TRAP_ENABLE_DZE - | IEEE_TRAP_ENABLE_OVF); - sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE); - sw |= (fp >> 47) & IEEE_MAP_UMZ; - sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO; - return sw; -} - -#ifdef __KERNEL__ - -/* The following two functions don't need trapb/excb instructions - around the mf_fpcr/mt_fpcr instructions because (a) the kernel - never generates arithmetic faults and (b) call_pal instructions - are implied trap barriers. */ - -static inline unsigned long -rdfpcr(void) -{ - unsigned long tmp, ret; - -#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) - __asm__ __volatile__ ( - "ftoit $f0,%0\n\t" - "mf_fpcr $f0\n\t" - "ftoit $f0,%1\n\t" - "itoft %0,$f0" - : "=r"(tmp), "=r"(ret)); -#else - __asm__ __volatile__ ( - "stt $f0,%0\n\t" - "mf_fpcr $f0\n\t" - "stt $f0,%1\n\t" - "ldt $f0,%0" - : "=m"(tmp), "=m"(ret)); -#endif - - return ret; -} - -static inline void -wrfpcr(unsigned long val) -{ - unsigned long tmp; - -#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) - __asm__ __volatile__ ( - "ftoit $f0,%0\n\t" - "itoft %1,$f0\n\t" - "mt_fpcr $f0\n\t" - "itoft %0,$f0" - : "=&r"(tmp) : "r"(val)); -#else - __asm__ __volatile__ ( - "stt $f0,%0\n\t" - "ldt $f0,%1\n\t" - "mt_fpcr $f0\n\t" - "ldt $f0,%0" - : "=m"(tmp) : "m"(val)); -#endif -} - -static inline unsigned long -swcr_update_status(unsigned long swcr, unsigned long fpcr) -{ - /* EV6 implements most of the bits in hardware. Collect - the acrued exception bits from the real fpcr. */ - if (implver() == IMPLVER_EV6) { - swcr &= ~IEEE_STATUS_MASK; - swcr |= (fpcr >> 35) & IEEE_STATUS_MASK; - } - return swcr; -} - -extern unsigned long alpha_read_fp_reg (unsigned long reg); -extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); -extern unsigned long alpha_read_fp_reg_s (unsigned long reg); -extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val); - -#endif /* __KERNEL__ */ - -#endif /* __ASM_ALPHA_FPU_H */ diff --git a/include/asm-alpha/futex.h b/include/asm-alpha/futex.h deleted file mode 100644 index 6a332a9f099..00000000000 --- a/include/asm-alpha/futex.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#include - -#endif diff --git a/include/asm-alpha/gct.h b/include/asm-alpha/gct.h deleted file mode 100644 index 3504c704927..00000000000 --- a/include/asm-alpha/gct.h +++ /dev/null @@ -1,58 +0,0 @@ -#ifndef __ALPHA_GCT_H -#define __ALPHA_GCT_H - -typedef u64 gct_id; -typedef u64 gct6_handle; - -typedef struct __gct6_node { - u8 type; - u8 subtype; - u16 size; - u32 hd_extension; - gct6_handle owner; - gct6_handle active_user; - gct_id id; - u64 flags; - u16 rev; - u16 change_counter; - u16 max_child; - u16 reserved1; - gct6_handle saved_owner; - gct6_handle affinity; - gct6_handle parent; - gct6_handle next; - gct6_handle prev; - gct6_handle child; - u64 fw_flags; - u64 os_usage; - u64 fru_id; - u32 checksum; - u32 magic; /* 'GLXY' */ -} gct6_node; - -typedef struct { - u8 type; - u8 subtype; - void (*callout)(gct6_node *); -} gct6_search_struct; - -#define GCT_NODE_MAGIC 0x59584c47 /* 'GLXY' */ - -/* - * node types - */ -#define GCT_TYPE_HOSE 0x0E - -/* - * node subtypes - */ -#define GCT_SUBTYPE_IO_PORT_MODULE 0x2C - -#define GCT_NODE_PTR(off) ((gct6_node *)((char *)hwrpb + \ - hwrpb->frut_offset + \ - (gct6_handle)(off))) \ - -int gct6_find_nodes(gct6_node *, gct6_search_struct *); - -#endif /* __ALPHA_GCT_H */ - diff --git a/include/asm-alpha/gentrap.h b/include/asm-alpha/gentrap.h deleted file mode 100644 index ae50cc3192c..00000000000 --- a/include/asm-alpha/gentrap.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef _ASMAXP_GENTRAP_H -#define _ASMAXP_GENTRAP_H - -/* - * Definitions for gentrap causes. They are generated by user-level - * programs and therefore should be compatible with the corresponding - * OSF/1 definitions. - */ -#define GEN_INTOVF -1 /* integer overflow */ -#define GEN_INTDIV -2 /* integer division by zero */ -#define GEN_FLTOVF -3 /* fp overflow */ -#define GEN_FLTDIV -4 /* fp division by zero */ -#define GEN_FLTUND -5 /* fp underflow */ -#define GEN_FLTINV -6 /* invalid fp operand */ -#define GEN_FLTINE -7 /* inexact fp operand */ -#define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */ -#define GEN_DECDIV -9 /* decimal division by zero */ -#define GEN_DECINV -10 /* invalid decimal operand */ -#define GEN_ROPRAND -11 /* reserved operand */ -#define GEN_ASSERTERR -12 /* assertion error */ -#define GEN_NULPTRERR -13 /* null pointer error */ -#define GEN_STKOVF -14 /* stack overflow */ -#define GEN_STRLENERR -15 /* string length error */ -#define GEN_SUBSTRERR -16 /* substring error */ -#define GEN_RANGERR -17 /* range error */ -#define GEN_SUBRNG -18 -#define GEN_SUBRNG1 -19 -#define GEN_SUBRNG2 -20 -#define GEN_SUBRNG3 -21 /* these report range errors for */ -#define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */ -#define GEN_SUBRNG5 -23 -#define GEN_SUBRNG6 -24 -#define GEN_SUBRNG7 -25 - -/* the remaining codes (-26..-1023) are reserved. */ - -#endif /* _ASMAXP_GENTRAP_H */ diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h deleted file mode 100644 index d953e234daa..00000000000 --- a/include/asm-alpha/hardirq.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _ALPHA_HARDIRQ_H -#define _ALPHA_HARDIRQ_H - -#include -#include - - -/* entry.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned long __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include /* Standard mappings for irq_cpustat_t above */ - -void ack_bad_irq(unsigned int irq); - -#define HARDIRQ_BITS 12 - -/* - * The hardirq mask has to be large enough to have - * space for potentially nestable IRQ sources in the system - * to nest on a single CPU. On Alpha, interrupts are masked at the CPU - * by IPL as well as at the system level. We only have 8 IPLs (UNIX PALcode) - * so we really only have 8 nestable IRQs, but allow some overhead - */ -#if (1 << HARDIRQ_BITS) < 16 -#error HARDIRQ_BITS is too low! -#endif - -#endif /* _ALPHA_HARDIRQ_H */ diff --git a/include/asm-alpha/hw_irq.h b/include/asm-alpha/hw_irq.h deleted file mode 100644 index a37db0f9509..00000000000 --- a/include/asm-alpha/hw_irq.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _ALPHA_HW_IRQ_H -#define _ALPHA_HW_IRQ_H - - -extern volatile unsigned long irq_err_count; - -#ifdef CONFIG_ALPHA_GENERIC -#define ACTUAL_NR_IRQS alpha_mv.nr_irqs -#else -#define ACTUAL_NR_IRQS NR_IRQS -#endif - -#endif diff --git a/include/asm-alpha/hwrpb.h b/include/asm-alpha/hwrpb.h deleted file mode 100644 index 8e8f871af7c..00000000000 --- a/include/asm-alpha/hwrpb.h +++ /dev/null @@ -1,220 +0,0 @@ -#ifndef __ALPHA_HWRPB_H -#define __ALPHA_HWRPB_H - -#define INIT_HWRPB ((struct hwrpb_struct *) 0x10000000) - -/* - * DEC processor types for Alpha systems. Found in HWRPB. - * These values are architected. - */ - -#define EV3_CPU 1 /* EV3 */ -#define EV4_CPU 2 /* EV4 (21064) */ -#define LCA4_CPU 4 /* LCA4 (21066/21068) */ -#define EV5_CPU 5 /* EV5 (21164) */ -#define EV45_CPU 6 /* EV4.5 (21064/xxx) */ -#define EV56_CPU 7 /* EV5.6 (21164) */ -#define EV6_CPU 8 /* EV6 (21264) */ -#define PCA56_CPU 9 /* PCA56 (21164PC) */ -#define PCA57_CPU 10 /* PCA57 (notyet) */ -#define EV67_CPU 11 /* EV67 (21264A) */ -#define EV68CB_CPU 12 /* EV68CB (21264C) */ -#define EV68AL_CPU 13 /* EV68AL (21264B) */ -#define EV68CX_CPU 14 /* EV68CX (21264D) */ -#define EV7_CPU 15 /* EV7 (21364) */ -#define EV79_CPU 16 /* EV79 (21364??) */ -#define EV69_CPU 17 /* EV69 (21264/EV69A) */ - -/* - * DEC system types for Alpha systems. Found in HWRPB. - * These values are architected. - */ - -#define ST_ADU 1 /* Alpha ADU systype */ -#define ST_DEC_4000 2 /* Cobra systype */ -#define ST_DEC_7000 3 /* Ruby systype */ -#define ST_DEC_3000_500 4 /* Flamingo systype */ -#define ST_DEC_2000_300 6 /* Jensen systype */ -#define ST_DEC_3000_300 7 /* Pelican systype */ -#define ST_DEC_2100_A500 9 /* Sable systype */ -#define ST_DEC_AXPVME_64 10 /* AXPvme system type */ -#define ST_DEC_AXPPCI_33 11 /* NoName system type */ -#define ST_DEC_TLASER 12 /* Turbolaser systype */ -#define ST_DEC_2100_A50 13 /* Avanti systype */ -#define ST_DEC_MUSTANG 14 /* Mustang systype */ -#define ST_DEC_ALCOR 15 /* Alcor (EV5) systype */ -#define ST_DEC_1000 17 /* Mikasa systype */ -#define ST_DEC_EB64 18 /* EB64 systype */ -#define ST_DEC_EB66 19 /* EB66 systype */ -#define ST_DEC_EB64P 20 /* EB64+ systype */ -#define ST_DEC_BURNS 21 /* laptop systype */ -#define ST_DEC_RAWHIDE 22 /* Rawhide systype */ -#define ST_DEC_K2 23 /* K2 systype */ -#define ST_DEC_LYNX 24 /* Lynx systype */ -#define ST_DEC_XL 25 /* Alpha XL systype */ -#define ST_DEC_EB164 26 /* EB164 systype */ -#define ST_DEC_NORITAKE 27 /* Noritake systype */ -#define ST_DEC_CORTEX 28 /* Cortex systype */ -#define ST_DEC_MIATA 30 /* Miata systype */ -#define ST_DEC_XXM 31 /* XXM systype */ -#define ST_DEC_TAKARA 32 /* Takara systype */ -#define ST_DEC_YUKON 33 /* Yukon systype */ -#define ST_DEC_TSUNAMI 34 /* Tsunami systype */ -#define ST_DEC_WILDFIRE 35 /* Wildfire systype */ -#define ST_DEC_CUSCO 36 /* CUSCO systype */ -#define ST_DEC_EIGER 37 /* Eiger systype */ -#define ST_DEC_TITAN 38 /* Titan systype */ -#define ST_DEC_MARVEL 39 /* Marvel systype */ - -/* UNOFFICIAL!!! */ -#define ST_UNOFFICIAL_BIAS 100 -#define ST_DTI_RUFFIAN 101 /* RUFFIAN systype */ - -/* Alpha Processor, Inc. systems */ -#define ST_API_BIAS 200 -#define ST_API_NAUTILUS 201 /* UP1000 systype */ - -struct pcb_struct { - unsigned long ksp; - unsigned long usp; - unsigned long ptbr; - unsigned int pcc; - unsigned int asn; - unsigned long unique; - unsigned long flags; - unsigned long res1, res2; -}; - -struct percpu_struct { - unsigned long hwpcb[16]; - unsigned long flags; - unsigned long pal_mem_size; - unsigned long pal_scratch_size; - unsigned long pal_mem_pa; - unsigned long pal_scratch_pa; - unsigned long pal_revision; - unsigned long type; - unsigned long variation; - unsigned long revision; - unsigned long serial_no[2]; - unsigned long logout_area_pa; - unsigned long logout_area_len; - unsigned long halt_PCBB; - unsigned long halt_PC; - unsigned long halt_PS; - unsigned long halt_arg; - unsigned long halt_ra; - unsigned long halt_pv; - unsigned long halt_reason; - unsigned long res; - unsigned long ipc_buffer[21]; - unsigned long palcode_avail[16]; - unsigned long compatibility; - unsigned long console_data_log_pa; - unsigned long console_data_log_length; - unsigned long bcache_info; -}; - -struct procdesc_struct { - unsigned long weird_vms_stuff; - unsigned long address; -}; - -struct vf_map_struct { - unsigned long va; - unsigned long pa; - unsigned long count; -}; - -struct crb_struct { - struct procdesc_struct * dispatch_va; - struct procdesc_struct * dispatch_pa; - struct procdesc_struct * fixup_va; - struct procdesc_struct * fixup_pa; - /* virtual->physical map */ - unsigned long map_entries; - unsigned long map_pages; - struct vf_map_struct map[1]; -}; - -struct memclust_struct { - unsigned long start_pfn; - unsigned long numpages; - unsigned long numtested; - unsigned long bitmap_va; - unsigned long bitmap_pa; - unsigned long bitmap_chksum; - unsigned long usage; -}; - -struct memdesc_struct { - unsigned long chksum; - unsigned long optional_pa; - unsigned long numclusters; - struct memclust_struct cluster[0]; -}; - -struct dsr_struct { - long smm; /* SMM nubber used by LMF */ - unsigned long lurt_off; /* offset to LURT table */ - unsigned long sysname_off; /* offset to sysname char count */ -}; - -struct hwrpb_struct { - unsigned long phys_addr; /* check: physical address of the hwrpb */ - unsigned long id; /* check: "HWRPB\0\0\0" */ - unsigned long revision; - unsigned long size; /* size of hwrpb */ - unsigned long cpuid; - unsigned long pagesize; /* 8192, I hope */ - unsigned long pa_bits; /* number of physical address bits */ - unsigned long max_asn; - unsigned char ssn[16]; /* system serial number: big bother is watching */ - unsigned long sys_type; - unsigned long sys_variation; - unsigned long sys_revision; - unsigned long intr_freq; /* interval clock frequency * 4096 */ - unsigned long cycle_freq; /* cycle counter frequency */ - unsigned long vptb; /* Virtual Page Table Base address */ - unsigned long res1; - unsigned long tbhb_offset; /* Translation Buffer Hint Block */ - unsigned long nr_processors; - unsigned long processor_size; - unsigned long processor_offset; - unsigned long ctb_nr; - unsigned long ctb_size; /* console terminal block size */ - unsigned long ctbt_offset; /* console terminal block table offset */ - unsigned long crb_offset; /* console callback routine block */ - unsigned long mddt_offset; /* memory data descriptor table */ - unsigned long cdb_offset; /* configuration data block (or NULL) */ - unsigned long frut_offset; /* FRU table (or NULL) */ - void (*save_terminal)(unsigned long); - unsigned long save_terminal_data; - void (*restore_terminal)(unsigned long); - unsigned long restore_terminal_data; - void (*CPU_restart)(unsigned long); - unsigned long CPU_restart_data; - unsigned long res2; - unsigned long res3; - unsigned long chksum; - unsigned long rxrdy; - unsigned long txrdy; - unsigned long dsr_offset; /* "Dynamic System Recognition Data Block Table" */ -}; - -#ifdef __KERNEL__ - -extern struct hwrpb_struct *hwrpb; - -static inline void -hwrpb_update_checksum(struct hwrpb_struct *h) -{ - unsigned long sum = 0, *l; - for (l = (unsigned long *) h; l < (unsigned long *) &h->chksum; ++l) - sum += *l; - h->chksum = sum; -} - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_HWRPB_H */ diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h deleted file mode 100644 index e971ab000f9..00000000000 --- a/include/asm-alpha/io.h +++ /dev/null @@ -1,577 +0,0 @@ -#ifndef __ALPHA_IO_H -#define __ALPHA_IO_H - -#ifdef __KERNEL__ - -#include -#include -#include -#include -#include -#include -#include - -/* The generic header contains only prototypes. Including it ensures that - the implementation we have here matches that interface. */ -#include - -/* We don't use IO slowdowns on the Alpha, but.. */ -#define __SLOW_DOWN_IO do { } while (0) -#define SLOW_DOWN_IO do { } while (0) - -/* - * Virtual -> physical identity mapping starts at this offset - */ -#ifdef USE_48_BIT_KSEG -#define IDENT_ADDR 0xffff800000000000UL -#else -#define IDENT_ADDR 0xfffffc0000000000UL -#endif - -/* - * We try to avoid hae updates (thus the cache), but when we - * do need to update the hae, we need to do it atomically, so - * that any interrupts wouldn't get confused with the hae - * register not being up-to-date with respect to the hardware - * value. - */ -extern inline void __set_hae(unsigned long new_hae) -{ - unsigned long flags; - local_irq_save(flags); - - alpha_mv.hae_cache = new_hae; - *alpha_mv.hae_register = new_hae; - mb(); - /* Re-read to make sure it was written. */ - new_hae = *alpha_mv.hae_register; - - local_irq_restore(flags); -} - -extern inline void set_hae(unsigned long new_hae) -{ - if (new_hae != alpha_mv.hae_cache) - __set_hae(new_hae); -} - -/* - * Change virtual addresses to physical addresses and vv. - */ -#ifdef USE_48_BIT_KSEG -static inline unsigned long virt_to_phys(void *address) -{ - return (unsigned long)address - IDENT_ADDR; -} - -static inline void * phys_to_virt(unsigned long address) -{ - return (void *) (address + IDENT_ADDR); -} -#else -static inline unsigned long virt_to_phys(void *address) -{ - unsigned long phys = (unsigned long)address; - - /* Sign-extend from bit 41. */ - phys <<= (64 - 41); - phys = (long)phys >> (64 - 41); - - /* Crop to the physical address width of the processor. */ - phys &= (1ul << hwrpb->pa_bits) - 1; - - return phys; -} - -static inline void * phys_to_virt(unsigned long address) -{ - return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); -} -#endif - -#define page_to_phys(page) page_to_pa(page) - -static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page) -{ - return page_to_phys(page); -} - -/* This depends on working iommu. */ -#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0) - -/* Maximum PIO space address supported? */ -#define IO_SPACE_LIMIT 0xffff - -/* - * Change addresses as seen by the kernel (virtual) to addresses as - * seen by a device (bus), and vice versa. - * - * Note that this only works for a limited range of kernel addresses, - * and very well may not span all memory. Consider this interface - * deprecated in favour of the DMA-mapping API. - */ -extern unsigned long __direct_map_base; -extern unsigned long __direct_map_size; - -static inline unsigned long __deprecated virt_to_bus(void *address) -{ - unsigned long phys = virt_to_phys(address); - unsigned long bus = phys + __direct_map_base; - return phys <= __direct_map_size ? bus : 0; -} -#define isa_virt_to_bus virt_to_bus - -static inline void * __deprecated bus_to_virt(unsigned long address) -{ - void *virt; - - /* This check is a sanity check but also ensures that bus address 0 - maps to virtual address 0 which is useful to detect null pointers - (the NCR driver is much simpler if NULL pointers are preserved). */ - address -= __direct_map_base; - virt = phys_to_virt(address); - return (long)address <= 0 ? NULL : virt; -} -#define isa_bus_to_virt bus_to_virt - -/* - * There are different chipsets to interface the Alpha CPUs to the world. - */ - -#define IO_CONCAT(a,b) _IO_CONCAT(a,b) -#define _IO_CONCAT(a,b) a ## _ ## b - -#ifdef CONFIG_ALPHA_GENERIC - -/* In a generic kernel, we always go through the machine vector. */ - -#define REMAP1(TYPE, NAME, QUAL) \ -static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ -{ \ - return alpha_mv.mv_##NAME(addr); \ -} - -#define REMAP2(TYPE, NAME, QUAL) \ -static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ -{ \ - alpha_mv.mv_##NAME(b, addr); \ -} - -REMAP1(unsigned int, ioread8, /**/) -REMAP1(unsigned int, ioread16, /**/) -REMAP1(unsigned int, ioread32, /**/) -REMAP1(u8, readb, const volatile) -REMAP1(u16, readw, const volatile) -REMAP1(u32, readl, const volatile) -REMAP1(u64, readq, const volatile) - -REMAP2(u8, iowrite8, /**/) -REMAP2(u16, iowrite16, /**/) -REMAP2(u32, iowrite32, /**/) -REMAP2(u8, writeb, volatile) -REMAP2(u16, writew, volatile) -REMAP2(u32, writel, volatile) -REMAP2(u64, writeq, volatile) - -#undef REMAP1 -#undef REMAP2 - -extern inline void __iomem *generic_ioportmap(unsigned long a) -{ - return alpha_mv.mv_ioportmap(a); -} - -static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) -{ - return alpha_mv.mv_ioremap(a, s); -} - -static inline void generic_iounmap(volatile void __iomem *a) -{ - return alpha_mv.mv_iounmap(a); -} - -static inline int generic_is_ioaddr(unsigned long a) -{ - return alpha_mv.mv_is_ioaddr(a); -} - -static inline int generic_is_mmio(const volatile void __iomem *a) -{ - return alpha_mv.mv_is_mmio(a); -} - -#define __IO_PREFIX generic -#define generic_trivial_rw_bw 0 -#define generic_trivial_rw_lq 0 -#define generic_trivial_io_bw 0 -#define generic_trivial_io_lq 0 -#define generic_trivial_iounmap 0 - -#else - -#if defined(CONFIG_ALPHA_APECS) -# include -#elif defined(CONFIG_ALPHA_CIA) -# include -#elif defined(CONFIG_ALPHA_IRONGATE) -# include -#elif defined(CONFIG_ALPHA_JENSEN) -# include -#elif defined(CONFIG_ALPHA_LCA) -# include -#elif defined(CONFIG_ALPHA_MARVEL) -# include -#elif defined(CONFIG_ALPHA_MCPCIA) -# include -#elif defined(CONFIG_ALPHA_POLARIS) -# include -#elif defined(CONFIG_ALPHA_T2) -# include -#elif defined(CONFIG_ALPHA_TSUNAMI) -# include -#elif defined(CONFIG_ALPHA_TITAN) -# include -#elif defined(CONFIG_ALPHA_WILDFIRE) -# include -#else -#error "What system is this?" -#endif - -#endif /* GENERIC */ - -/* - * We always have external versions of these routines. - */ -extern u8 inb(unsigned long port); -extern u16 inw(unsigned long port); -extern u32 inl(unsigned long port); -extern void outb(u8 b, unsigned long port); -extern void outw(u16 b, unsigned long port); -extern void outl(u32 b, unsigned long port); - -extern u8 readb(const volatile void __iomem *addr); -extern u16 readw(const volatile void __iomem *addr); -extern u32 readl(const volatile void __iomem *addr); -extern u64 readq(const volatile void __iomem *addr); -extern void writeb(u8 b, volatile void __iomem *addr); -extern void writew(u16 b, volatile void __iomem *addr); -extern void writel(u32 b, volatile void __iomem *addr); -extern void writeq(u64 b, volatile void __iomem *addr); - -extern u8 __raw_readb(const volatile void __iomem *addr); -extern u16 __raw_readw(const volatile void __iomem *addr); -extern u32 __raw_readl(const volatile void __iomem *addr); -extern u64 __raw_readq(const volatile void __iomem *addr); -extern void __raw_writeb(u8 b, volatile void __iomem *addr); -extern void __raw_writew(u16 b, volatile void __iomem *addr); -extern void __raw_writel(u32 b, volatile void __iomem *addr); -extern void __raw_writeq(u64 b, volatile void __iomem *addr); - -/* - * Mapping from port numbers to __iomem space is pretty easy. - */ - -/* These two have to be extern inline because of the extern prototype from - . It is not legal to mix "extern" and "static" for - the same declaration. */ -extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) -{ - return IO_CONCAT(__IO_PREFIX,ioportmap) (port); -} - -extern inline void ioport_unmap(void __iomem *addr) -{ -} - -static inline void __iomem *ioremap(unsigned long port, unsigned long size) -{ - return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); -} - -static inline void __iomem *__ioremap(unsigned long port, unsigned long size, - unsigned long flags) -{ - return ioremap(port, size); -} - -static inline void __iomem * ioremap_nocache(unsigned long offset, - unsigned long size) -{ - return ioremap(offset, size); -} - -static inline void iounmap(volatile void __iomem *addr) -{ - IO_CONCAT(__IO_PREFIX,iounmap)(addr); -} - -static inline int __is_ioaddr(unsigned long addr) -{ - return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); -} -#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) - -static inline int __is_mmio(const volatile void __iomem *addr) -{ - return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); -} - - -/* - * If the actual I/O bits are sufficiently trivial, then expand inline. - */ - -#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) -extern inline unsigned int ioread8(void __iomem *addr) -{ - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); - mb(); - return ret; -} - -extern inline unsigned int ioread16(void __iomem *addr) -{ - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); - mb(); - return ret; -} - -extern inline void iowrite8(u8 b, void __iomem *addr) -{ - IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); - mb(); -} - -extern inline void iowrite16(u16 b, void __iomem *addr) -{ - IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); - mb(); -} - -extern inline u8 inb(unsigned long port) -{ - return ioread8(ioport_map(port, 1)); -} - -extern inline u16 inw(unsigned long port) -{ - return ioread16(ioport_map(port, 2)); -} - -extern inline void outb(u8 b, unsigned long port) -{ - iowrite8(b, ioport_map(port, 1)); -} - -extern inline void outw(u16 b, unsigned long port) -{ - iowrite16(b, ioport_map(port, 2)); -} -#endif - -#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) -extern inline unsigned int ioread32(void __iomem *addr) -{ - unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); - mb(); - return ret; -} - -extern inline void iowrite32(u32 b, void __iomem *addr) -{ - IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); - mb(); -} - -extern inline u32 inl(unsigned long port) -{ - return ioread32(ioport_map(port, 4)); -} - -extern inline void outl(u32 b, unsigned long port) -{ - iowrite32(b, ioport_map(port, 4)); -} -#endif - -#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 -extern inline u8 __raw_readb(const volatile void __iomem *addr) -{ - return IO_CONCAT(__IO_PREFIX,readb)(addr); -} - -extern inline u16 __raw_readw(const volatile void __iomem *addr) -{ - return IO_CONCAT(__IO_PREFIX,readw)(addr); -} - -extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) -{ - IO_CONCAT(__IO_PREFIX,writeb)(b, addr); -} - -extern inline void __raw_writew(u16 b, volatile void __iomem *addr) -{ - IO_CONCAT(__IO_PREFIX,writew)(b, addr); -} - -extern inline u8 readb(const volatile void __iomem *addr) -{ - u8 ret = __raw_readb(addr); - mb(); - return ret; -} - -extern inline u16 readw(const volatile void __iomem *addr) -{ - u16 ret = __raw_readw(addr); - mb(); - return ret; -} - -extern inline void writeb(u8 b, volatile void __iomem *addr) -{ - __raw_writeb(b, addr); - mb(); -} - -extern inline void writew(u16 b, volatile void __iomem *addr) -{ - __raw_writew(b, addr); - mb(); -} -#endif - -#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 -extern inline u32 __raw_readl(const volatile void __iomem *addr) -{ - return IO_CONCAT(__IO_PREFIX,readl)(addr); -} - -extern inline u64 __raw_readq(const volatile void __iomem *addr) -{ - return IO_CONCAT(__IO_PREFIX,readq)(addr); -} - -extern inline void __raw_writel(u32 b, volatile void __iomem *addr) -{ - IO_CONCAT(__IO_PREFIX,writel)(b, addr); -} - -extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) -{ - IO_CONCAT(__IO_PREFIX,writeq)(b, addr); -} - -extern inline u32 readl(const volatile void __iomem *addr) -{ - u32 ret = __raw_readl(addr); - mb(); - return ret; -} - -extern inline u64 readq(const volatile void __iomem *addr) -{ - u64 ret = __raw_readq(addr); - mb(); - return ret; -} - -extern inline void writel(u32 b, volatile void __iomem *addr) -{ - __raw_writel(b, addr); - mb(); -} - -extern inline void writeq(u64 b, volatile void __iomem *addr) -{ - __raw_writeq(b, addr); - mb(); -} -#endif - -#define inb_p inb -#define inw_p inw -#define inl_p inl -#define outb_p outb -#define outw_p outw -#define outl_p outl -#define readb_relaxed(addr) __raw_readb(addr) -#define readw_relaxed(addr) __raw_readw(addr) -#define readl_relaxed(addr) __raw_readl(addr) -#define readq_relaxed(addr) __raw_readq(addr) - -#define mmiowb() - -/* - * String version of IO memory access ops: - */ -extern void memcpy_fromio(void *, const volatile void __iomem *, long); -extern void memcpy_toio(volatile void __iomem *, const void *, long); -extern void _memset_c_io(volatile void __iomem *, unsigned long, long); - -static inline void memset_io(volatile void __iomem *addr, u8 c, long len) -{ - _memset_c_io(addr, 0x0101010101010101UL * c, len); -} - -#define __HAVE_ARCH_MEMSETW_IO -static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) -{ - _memset_c_io(addr, 0x0001000100010001UL * c, len); -} - -/* - * String versions of in/out ops: - */ -extern void insb (unsigned long port, void *dst, unsigned long count); -extern void insw (unsigned long port, void *dst, unsigned long count); -extern void insl (unsigned long port, void *dst, unsigned long count); -extern void outsb (unsigned long port, const void *src, unsigned long count); -extern void outsw (unsigned long port, const void *src, unsigned long count); -extern void outsl (unsigned long port, const void *src, unsigned long count); - -/* - * The Alpha Jensen hardware for some rather strange reason puts - * the RTC clock at 0x170 instead of 0x70. Probably due to some - * misguided idea about using 0x70 for NMI stuff. - * - * These defines will override the defaults when doing RTC queries - */ - -#ifdef CONFIG_ALPHA_GENERIC -# define RTC_PORT(x) ((x) + alpha_mv.rtc_port) -#else -# ifdef CONFIG_ALPHA_JENSEN -# define RTC_PORT(x) (0x170+(x)) -# else -# define RTC_PORT(x) (0x70 + (x)) -# endif -#endif -#define RTC_ALWAYS_BCD 0 - -/* - * Some mucking forons use if[n]def writeq to check if platform has it. - * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them - * to play with; for now just use cpp anti-recursion logics and make sure - * that damn thing is defined and expands to itself. - */ - -#define writeq writeq -#define readq readq - -/* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) - -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_IO_H */ diff --git a/include/asm-alpha/io_trivial.h b/include/asm-alpha/io_trivial.h deleted file mode 100644 index 1c77f10b4b3..00000000000 --- a/include/asm-alpha/io_trivial.h +++ /dev/null @@ -1,131 +0,0 @@ -/* Trivial implementations of basic i/o routines. Assumes that all - of the hard work has been done by ioremap and ioportmap, and that - access to i/o space is linear. */ - -/* This file may be included multiple times. */ - -#if IO_CONCAT(__IO_PREFIX,trivial_io_bw) -__EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a) -{ - return __kernel_ldbu(*(volatile u8 __force *)a); -} - -__EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a) -{ - return __kernel_ldwu(*(volatile u16 __force *)a); -} - -__EXTERN_INLINE void -IO_CONCAT(__IO_PREFIX,iowrite8)(u8 b, void __iomem *a) -{ - __kernel_stb(b, *(volatile u8 __force *)a); -} - -__EXTERN_INLINE void -IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a) -{ - __kernel_stw(b, *(volatile u16 __force *)a); -} -#endif - -#if IO_CONCAT(__IO_PREFIX,trivial_io_lq) -__EXTERN_INLINE unsigned int -IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a) -{ - return *(volatile u32 __force *)a; -} - -__EXTERN_INLINE void -IO_CONCAT(__IO_PREFIX,iowrite32)(u32 b, void __iomem *a) -{ - *(volatile u32 __force *)a = b; -} -#endif - -#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 -__EXTERN_INLINE u8 -IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) -{ - return __kernel_ldbu(*(const volatile u8 __force *)a); -} - -__EXTERN_INLINE u16 -IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) -{ - return __kernel_ldwu(*(const volatile u16 __force *)a); -} - -__EXTERN_INLINE void -IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a) -{ - __kernel_stb(b, *(volatile u8 __force *)a); -} - -__EXTERN_INLINE void -IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) -{ - __kernel_stw(b, *(volatile u16 __force *)a); -} -#elif IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 2 -__EXTERN_INLINE u8 -IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) -{ - void __iomem *addr = (void __iomem *)a; - return IO_CONCAT(__IO_PREFIX,ioread8)(addr); -} - -__EXTERN_INLINE u16 -IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) -{ - void __iomem *addr = (void __iomem *)a; - return IO_CONCAT(__IO_PREFIX,ioread16)(addr); -} - -__EXTERN_INLINE void -IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a) -{ - void __iomem *addr = (void __iomem *)a; - IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); -} - -__EXTERN_INLINE void -IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) -{ - void __iomem *addr = (void __iomem *)a; - IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); -} -#endif - -#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 -__EXTERN_INLINE u32 -IO_CONCAT(__IO_PREFIX,readl)(const volatile void __iomem *a) -{ - return *(const volatile u32 __force *)a; -} - -__EXTERN_INLINE u64 -IO_CONCAT(__IO_PREFIX,readq)(const volatile void __iomem *a) -{ - return *(const volatile u64 __force *)a; -} - -__EXTERN_INLINE void -IO_CONCAT(__IO_PREFIX,writel)(u32 b, volatile void __iomem *a) -{ - *(volatile u32 __force *)a = b; -} - -__EXTERN_INLINE void -IO_CONCAT(__IO_PREFIX,writeq)(u64 b, volatile void __iomem *a) -{ - *(volatile u64 __force *)a = b; -} -#endif - -#if IO_CONCAT(__IO_PREFIX,trivial_iounmap) -__EXTERN_INLINE void IO_CONCAT(__IO_PREFIX,iounmap)(volatile void __iomem *a) -{ -} -#endif diff --git a/include/asm-alpha/ioctl.h b/include/asm-alpha/ioctl.h deleted file mode 100644 index fc63727f417..00000000000 --- a/include/asm-alpha/ioctl.h +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef _ALPHA_IOCTL_H -#define _ALPHA_IOCTL_H - -/* - * The original linux ioctl numbering scheme was just a general - * "anything goes" setup, where more or less random numbers were - * assigned. Sorry, I was clueless when I started out on this. - * - * On the alpha, we'll try to clean it up a bit, using a more sane - * ioctl numbering, and also trying to be compatible with OSF/1 in - * the process. I'd like to clean it up for the i386 as well, but - * it's so painful recognizing both the new and the old numbers.. - */ - -#define _IOC_NRBITS 8 -#define _IOC_TYPEBITS 8 -#define _IOC_SIZEBITS 13 -#define _IOC_DIRBITS 3 - -#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) -#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) -#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) -#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) - -#define _IOC_NRSHIFT 0 -#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) -#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) -#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) - -/* - * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit. - * And this turns out useful to catch old ioctl numbers in header - * files for us. - */ -#define _IOC_NONE 1U -#define _IOC_READ 2U -#define _IOC_WRITE 4U - -#define _IOC(dir,type,nr,size) \ - ((unsigned int) \ - (((dir) << _IOC_DIRSHIFT) | \ - ((type) << _IOC_TYPESHIFT) | \ - ((nr) << _IOC_NRSHIFT) | \ - ((size) << _IOC_SIZESHIFT))) - -/* used to create numbers */ -#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) -#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) -#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) -#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) - -/* used to decode them.. */ -#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) -#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) -#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) -#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) - -/* ...and for the drivers/sound files... */ - -#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) -#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) -#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) -#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) -#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) - -#endif /* _ALPHA_IOCTL_H */ diff --git a/include/asm-alpha/ioctls.h b/include/asm-alpha/ioctls.h deleted file mode 100644 index 67bb9f6fdbe..00000000000 --- a/include/asm-alpha/ioctls.h +++ /dev/null @@ -1,112 +0,0 @@ -#ifndef _ASM_ALPHA_IOCTLS_H -#define _ASM_ALPHA_IOCTLS_H - -#include - -#define FIOCLEX _IO('f', 1) -#define FIONCLEX _IO('f', 2) -#define FIOASYNC _IOW('f', 125, int) -#define FIONBIO _IOW('f', 126, int) -#define FIONREAD _IOR('f', 127, int) -#define TIOCINQ FIONREAD -#define FIOQSIZE _IOR('f', 128, loff_t) - -#define TIOCGETP _IOR('t', 8, struct sgttyb) -#define TIOCSETP _IOW('t', 9, struct sgttyb) -#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */ - -#define TIOCSETC _IOW('t', 17, struct tchars) -#define TIOCGETC _IOR('t', 18, struct tchars) -#define TCGETS _IOR('t', 19, struct termios) -#define TCSETS _IOW('t', 20, struct termios) -#define TCSETSW _IOW('t', 21, struct termios) -#define TCSETSF _IOW('t', 22, struct termios) - -#define TCGETA _IOR('t', 23, struct termio) -#define TCSETA _IOW('t', 24, struct termio) -#define TCSETAW _IOW('t', 25, struct termio) -#define TCSETAF _IOW('t', 28, struct termio) - -#define TCSBRK _IO('t', 29) -#define TCXONC _IO('t', 30) -#define TCFLSH _IO('t', 31) - -#define TIOCSWINSZ _IOW('t', 103, struct winsize) -#define TIOCGWINSZ _IOR('t', 104, struct winsize) -#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ -#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ -#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ - -#define TIOCGLTC _IOR('t', 116, struct ltchars) -#define TIOCSLTC _IOW('t', 117, struct ltchars) -#define TIOCSPGRP _IOW('t', 118, int) -#define TIOCGPGRP _IOR('t', 119, int) - -#define TIOCEXCL 0x540C -#define TIOCNXCL 0x540D -#define TIOCSCTTY 0x540E - -#define TIOCSTI 0x5412 -#define TIOCMGET 0x5415 -#define TIOCMBIS 0x5416 -#define TIOCMBIC 0x5417 -#define TIOCMSET 0x5418 -# define TIOCM_LE 0x001 -# define TIOCM_DTR 0x002 -# define TIOCM_RTS 0x004 -# define TIOCM_ST 0x008 -# define TIOCM_SR 0x010 -# define TIOCM_CTS 0x020 -# define TIOCM_CAR 0x040 -# define TIOCM_RNG 0x080 -# define TIOCM_DSR 0x100 -# define TIOCM_CD TIOCM_CAR -# define TIOCM_RI TIOCM_RNG -# define TIOCM_OUT1 0x2000 -# define TIOCM_OUT2 0x4000 -# define TIOCM_LOOP 0x8000 - -#define TIOCGSOFTCAR 0x5419 -#define TIOCSSOFTCAR 0x541A -#define TIOCLINUX 0x541C -#define TIOCCONS 0x541D -#define TIOCGSERIAL 0x541E -#define TIOCSSERIAL 0x541F -#define TIOCPKT 0x5420 -# define TIOCPKT_DATA 0 -# define TIOCPKT_FLUSHREAD 1 -# define TIOCPKT_FLUSHWRITE 2 -# define TIOCPKT_STOP 4 -# define TIOCPKT_START 8 -# define TIOCPKT_NOSTOP 16 -# define TIOCPKT_DOSTOP 32 - - -#define TIOCNOTTY 0x5422 -#define TIOCSETD 0x5423 -#define TIOCGETD 0x5424 -#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ -#define TIOCSBRK 0x5427 /* BSD compatibility */ -#define TIOCCBRK 0x5428 /* BSD compatibility */ -#define TIOCGSID 0x5429 /* Return the session ID of FD */ -#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ -#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ - -#define TIOCSERCONFIG 0x5453 -#define TIOCSERGWILD 0x5454 -#define TIOCSERSWILD 0x5455 -#define TIOCGLCKTRMIOS 0x5456 -#define TIOCSLCKTRMIOS 0x5457 -#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ -#define TIOCSERGETLSR 0x5459 /* Get line status register */ - /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ -# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ -#define TIOCSERGETMULTI 0x545A /* Get multiport config */ -#define TIOCSERSETMULTI 0x545B /* Set multiport config */ - -#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ -#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ -#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ -#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ - -#endif /* _ASM_ALPHA_IOCTLS_H */ diff --git a/include/asm-alpha/ipcbuf.h b/include/asm-alpha/ipcbuf.h deleted file mode 100644 index d9c0e1a5070..00000000000 --- a/include/asm-alpha/ipcbuf.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _ALPHA_IPCBUF_H -#define _ALPHA_IPCBUF_H - -/* - * The ipc64_perm structure for alpha architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 32-bit seq - * - 2 miscellaneous 64-bit values - */ - -struct ipc64_perm -{ - __kernel_key_t key; - __kernel_uid_t uid; - __kernel_gid_t gid; - __kernel_uid_t cuid; - __kernel_gid_t cgid; - __kernel_mode_t mode; - unsigned short seq; - unsigned short __pad1; - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* _ALPHA_IPCBUF_H */ diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h deleted file mode 100644 index 06377400dc0..00000000000 --- a/include/asm-alpha/irq.h +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef _ALPHA_IRQ_H -#define _ALPHA_IRQ_H - -/* - * linux/include/alpha/irq.h - * - * (C) 1994 Linus Torvalds - */ - -#include - -#if defined(CONFIG_ALPHA_GENERIC) - -/* Here NR_IRQS is not exact, but rather an upper bound. This is used - many places throughout the kernel to size static arrays. That's ok, - we'll use alpha_mv.nr_irqs when we want the real thing. */ - -/* When LEGACY_START_ADDRESS is selected, we leave out: - TITAN - WILDFIRE - MARVEL - - This helps keep the kernel object size reasonable for the majority - of machines. -*/ - -# if defined(CONFIG_ALPHA_LEGACY_START_ADDRESS) -# define NR_IRQS (128) /* max is RAWHIDE/TAKARA */ -# else -# define NR_IRQS (32768 + 16) /* marvel - 32 pids */ -# endif - -#elif defined(CONFIG_ALPHA_CABRIOLET) || \ - defined(CONFIG_ALPHA_EB66P) || \ - defined(CONFIG_ALPHA_EB164) || \ - defined(CONFIG_ALPHA_PC164) || \ - defined(CONFIG_ALPHA_LX164) -# define NR_IRQS 35 - -#elif defined(CONFIG_ALPHA_EB66) || \ - defined(CONFIG_ALPHA_EB64P) || \ - defined(CONFIG_ALPHA_MIKASA) -# define NR_IRQS 32 - -#elif defined(CONFIG_ALPHA_ALCOR) || \ - defined(CONFIG_ALPHA_MIATA) || \ - defined(CONFIG_ALPHA_RUFFIAN) || \ - defined(CONFIG_ALPHA_RX164) || \ - defined(CONFIG_ALPHA_NORITAKE) -# define NR_IRQS 48 - -#elif defined(CONFIG_ALPHA_SABLE) || \ - defined(CONFIG_ALPHA_SX164) -# define NR_IRQS 40 - -#elif defined(CONFIG_ALPHA_DP264) || \ - defined(CONFIG_ALPHA_LYNX) || \ - defined(CONFIG_ALPHA_SHARK) || \ - defined(CONFIG_ALPHA_EIGER) -# define NR_IRQS 64 - -#elif defined(CONFIG_ALPHA_TITAN) -#define NR_IRQS 80 - -#elif defined(CONFIG_ALPHA_RAWHIDE) || \ - defined(CONFIG_ALPHA_TAKARA) -# define NR_IRQS 128 - -#elif defined(CONFIG_ALPHA_WILDFIRE) -# define NR_IRQS 2048 /* enuff for 8 QBBs */ - -#elif defined(CONFIG_ALPHA_MARVEL) -# define NR_IRQS (32768 + 16) /* marvel - 32 pids*/ - -#else /* everyone else */ -# define NR_IRQS 16 -#endif - -static __inline__ int irq_canonicalize(int irq) -{ - /* - * XXX is this true for all Alpha's? The old serial driver - * did it this way for years without any complaints, so.... - */ - return ((irq == 2) ? 9 : irq); -} - -struct pt_regs; -extern void (*perf_irq)(unsigned long, struct pt_regs *); - -#endif /* _ALPHA_IRQ_H */ diff --git a/include/asm-alpha/irq_regs.h b/include/asm-alpha/irq_regs.h deleted file mode 100644 index 3dd9c0b7027..00000000000 --- a/include/asm-alpha/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/include/asm-alpha/jensen.h b/include/asm-alpha/jensen.h deleted file mode 100644 index 964b06ead43..00000000000 --- a/include/asm-alpha/jensen.h +++ /dev/null @@ -1,346 +0,0 @@ -#ifndef __ALPHA_JENSEN_H -#define __ALPHA_JENSEN_H - -#include - -/* - * Defines for the AlphaPC EISA IO and memory address space. - */ - -/* - * NOTE! The memory operations do not set any memory barriers, as it's - * not needed for cases like a frame buffer that is essentially memory-like. - * You need to do them by hand if the operations depend on ordering. - * - * Similarly, the port IO operations do a "mb" only after a write operation: - * if an mb is needed before (as in the case of doing memory mapped IO - * first, and then a port IO operation to the same device), it needs to be - * done by hand. - * - * After the above has bitten me 100 times, I'll give up and just do the - * mb all the time, but right now I'm hoping this will work out. Avoiding - * mb's may potentially be a noticeable speed improvement, but I can't - * honestly say I've tested it. - * - * Handling interrupts that need to do mb's to synchronize to non-interrupts - * is another fun race area. Don't do it (because if you do, I'll have to - * do *everything* with interrupts disabled, ugh). - */ - -/* - * EISA Interrupt Acknowledge address - */ -#define EISA_INTA (IDENT_ADDR + 0x100000000UL) - -/* - * FEPROM addresses - */ -#define EISA_FEPROM0 (IDENT_ADDR + 0x180000000UL) -#define EISA_FEPROM1 (IDENT_ADDR + 0x1A0000000UL) - -/* - * VL82C106 base address - */ -#define EISA_VL82C106 (IDENT_ADDR + 0x1C0000000UL) - -/* - * EISA "Host Address Extension" address (bits 25-31 of the EISA address) - */ -#define EISA_HAE (IDENT_ADDR + 0x1D0000000UL) - -/* - * "SYSCTL" register address - */ -#define EISA_SYSCTL (IDENT_ADDR + 0x1E0000000UL) - -/* - * "spare" register address - */ -#define EISA_SPARE (IDENT_ADDR + 0x1F0000000UL) - -/* - * EISA memory address offset - */ -#define EISA_MEM (IDENT_ADDR + 0x200000000UL) - -/* - * EISA IO address offset - */ -#define EISA_IO (IDENT_ADDR + 0x300000000UL) - - -#ifdef __KERNEL__ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __IO_EXTERN_INLINE -#endif - -/* - * Handle the "host address register". This needs to be set - * to the high 7 bits of the EISA address. This is also needed - * for EISA IO addresses, which are only 16 bits wide (the - * hae needs to be set to 0). - * - * HAE isn't needed for the local IO operations, though. - */ - -#define JENSEN_HAE_ADDRESS EISA_HAE -#define JENSEN_HAE_MASK 0x1ffffff - -__EXTERN_INLINE void jensen_set_hae(unsigned long addr) -{ - /* hae on the Jensen is bits 31:25 shifted right */ - addr >>= 25; - if (addr != alpha_mv.hae_cache) - set_hae(addr); -} - -#define vuip volatile unsigned int * - -/* - * IO functions - * - * The "local" functions are those that don't go out to the EISA bus, - * but instead act on the VL82C106 chip directly.. This is mainly the - * keyboard, RTC, printer and first two serial lines.. - * - * The local stuff makes for some complications, but it seems to be - * gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H - * convinced that I need one of the newer machines. - */ - -static inline unsigned int jensen_local_inb(unsigned long addr) -{ - return 0xff & *(vuip)((addr << 9) + EISA_VL82C106); -} - -static inline void jensen_local_outb(u8 b, unsigned long addr) -{ - *(vuip)((addr << 9) + EISA_VL82C106) = b; - mb(); -} - -static inline unsigned int jensen_bus_inb(unsigned long addr) -{ - long result; - - jensen_set_hae(0); - result = *(volatile int *)((addr << 7) + EISA_IO + 0x00); - return __kernel_extbl(result, addr & 3); -} - -static inline void jensen_bus_outb(u8 b, unsigned long addr) -{ - jensen_set_hae(0); - *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101; - mb(); -} - -/* - * It seems gcc is not very good at optimizing away logical - * operations that result in operations across inline functions. - * Which is why this is a macro. - */ - -#define jensen_is_local(addr) ( \ -/* keyboard */ (addr == 0x60 || addr == 0x64) || \ -/* RTC */ (addr == 0x170 || addr == 0x171) || \ -/* mb COM2 */ (addr >= 0x2f8 && addr <= 0x2ff) || \ -/* mb LPT1 */ (addr >= 0x3bc && addr <= 0x3be) || \ -/* mb COM2 */ (addr >= 0x3f8 && addr <= 0x3ff)) - -__EXTERN_INLINE u8 jensen_inb(unsigned long addr) -{ - if (jensen_is_local(addr)) - return jensen_local_inb(addr); - else - return jensen_bus_inb(addr); -} - -__EXTERN_INLINE void jensen_outb(u8 b, unsigned long addr) -{ - if (jensen_is_local(addr)) - jensen_local_outb(b, addr); - else - jensen_bus_outb(b, addr); -} - -__EXTERN_INLINE u16 jensen_inw(unsigned long addr) -{ - long result; - - jensen_set_hae(0); - result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20); - result >>= (addr & 3) * 8; - return 0xffffUL & result; -} - -__EXTERN_INLINE u32 jensen_inl(unsigned long addr) -{ - jensen_set_hae(0); - return *(vuip) ((addr << 7) + EISA_IO + 0x60); -} - -__EXTERN_INLINE void jensen_outw(u16 b, unsigned long addr) -{ - jensen_set_hae(0); - *(vuip) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001; - mb(); -} - -__EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr) -{ - jensen_set_hae(0); - *(vuip) ((addr << 7) + EISA_IO + 0x60) = b; - mb(); -} - -/* - * Memory functions. - */ - -__EXTERN_INLINE u8 jensen_readb(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - long result; - - jensen_set_hae(addr); - addr &= JENSEN_HAE_MASK; - result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00); - result >>= (addr & 3) * 8; - return 0xffUL & result; -} - -__EXTERN_INLINE u16 jensen_readw(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - long result; - - jensen_set_hae(addr); - addr &= JENSEN_HAE_MASK; - result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20); - result >>= (addr & 3) * 8; - return 0xffffUL & result; -} - -__EXTERN_INLINE u32 jensen_readl(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - jensen_set_hae(addr); - addr &= JENSEN_HAE_MASK; - return *(vuip) ((addr << 7) + EISA_MEM + 0x60); -} - -__EXTERN_INLINE u64 jensen_readq(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - unsigned long r0, r1; - - jensen_set_hae(addr); - addr &= JENSEN_HAE_MASK; - addr = (addr << 7) + EISA_MEM + 0x60; - r0 = *(vuip) (addr); - r1 = *(vuip) (addr + (4 << 7)); - return r1 << 32 | r0; -} - -__EXTERN_INLINE void jensen_writeb(u8 b, volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - jensen_set_hae(addr); - addr &= JENSEN_HAE_MASK; - *(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101; -} - -__EXTERN_INLINE void jensen_writew(u16 b, volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - jensen_set_hae(addr); - addr &= JENSEN_HAE_MASK; - *(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001; -} - -__EXTERN_INLINE void jensen_writel(u32 b, volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - jensen_set_hae(addr); - addr &= JENSEN_HAE_MASK; - *(vuip) ((addr << 7) + EISA_MEM + 0x60) = b; -} - -__EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long) xaddr; - jensen_set_hae(addr); - addr &= JENSEN_HAE_MASK; - addr = (addr << 7) + EISA_MEM + 0x60; - *(vuip) (addr) = b; - *(vuip) (addr + (4 << 7)) = b >> 32; -} - -__EXTERN_INLINE void __iomem *jensen_ioportmap(unsigned long addr) -{ - return (void __iomem *)addr; -} - -__EXTERN_INLINE void __iomem *jensen_ioremap(unsigned long addr, - unsigned long size) -{ - return (void __iomem *)(addr + 0x100000000ul); -} - -__EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr) -{ - return (long)addr >= 0; -} - -__EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr) -{ - return (unsigned long)addr >= 0x100000000ul; -} - -/* New-style ioread interface. All the routines are so ugly for Jensen - that it doesn't make sense to merge them. */ - -#define IOPORT(OS, NS) \ -__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \ -{ \ - if (jensen_is_mmio(xaddr)) \ - return jensen_read##OS(xaddr - 0x100000000ul); \ - else \ - return jensen_in##OS((unsigned long)xaddr); \ -} \ -__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \ -{ \ - if (jensen_is_mmio(xaddr)) \ - jensen_write##OS(b, xaddr - 0x100000000ul); \ - else \ - jensen_out##OS(b, (unsigned long)xaddr); \ -} - -IOPORT(b, 8) -IOPORT(w, 16) -IOPORT(l, 32) - -#undef IOPORT - -#undef vuip - -#undef __IO_PREFIX -#define __IO_PREFIX jensen -#define jensen_trivial_rw_bw 0 -#define jensen_trivial_rw_lq 0 -#define jensen_trivial_io_bw 0 -#define jensen_trivial_io_lq 0 -#define jensen_trivial_iounmap 1 -#include - -#ifdef __IO_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __IO_EXTERN_INLINE -#endif - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_JENSEN_H */ diff --git a/include/asm-alpha/kdebug.h b/include/asm-alpha/kdebug.h deleted file mode 100644 index 6ece1b03766..00000000000 --- a/include/asm-alpha/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/include/asm-alpha/kmap_types.h b/include/asm-alpha/kmap_types.h deleted file mode 100644 index 3e6735a34c5..00000000000 --- a/include/asm-alpha/kmap_types.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef _ASM_KMAP_TYPES_H -#define _ASM_KMAP_TYPES_H - -/* Dummy header just to define km_type. */ - - -#ifdef CONFIG_DEBUG_HIGHMEM -# define D(n) __KM_FENCE_##n , -#else -# define D(n) -#endif - -enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR -}; - -#undef D - -#endif diff --git a/include/asm-alpha/linkage.h b/include/asm-alpha/linkage.h deleted file mode 100644 index 291c2d01c44..00000000000 --- a/include/asm-alpha/linkage.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_LINKAGE_H -#define __ASM_LINKAGE_H - -/* Nothing to see here... */ - -#endif diff --git a/include/asm-alpha/local.h b/include/asm-alpha/local.h deleted file mode 100644 index 6ad3ea69642..00000000000 --- a/include/asm-alpha/local.h +++ /dev/null @@ -1,118 +0,0 @@ -#ifndef _ALPHA_LOCAL_H -#define _ALPHA_LOCAL_H - -#include -#include - -typedef struct -{ - atomic_long_t a; -} local_t; - -#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } -#define local_read(l) atomic_long_read(&(l)->a) -#define local_set(l,i) atomic_long_set(&(l)->a, (i)) -#define local_inc(l) atomic_long_inc(&(l)->a) -#define local_dec(l) atomic_long_dec(&(l)->a) -#define local_add(i,l) atomic_long_add((i),(&(l)->a)) -#define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) - -static __inline__ long local_add_return(long i, local_t * l) -{ - long temp, result; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " addq %0,%3,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (l->a.counter), "=&r" (result) - :"Ir" (i), "m" (l->a.counter) : "memory"); - return result; -} - -static __inline__ long local_sub_return(long i, local_t * l) -{ - long temp, result; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " subq %0,%3,%2\n" - " subq %0,%3,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (l->a.counter), "=&r" (result) - :"Ir" (i), "m" (l->a.counter) : "memory"); - return result; -} - -#define local_cmpxchg(l, o, n) \ - (cmpxchg_local(&((l)->a.counter), (o), (n))) -#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) - -/** - * local_add_unless - add unless the number is a given value - * @l: pointer of type local_t - * @a: the amount to add to l... - * @u: ...unless l is equal to u. - * - * Atomically adds @a to @l, so long as it was not @u. - * Returns non-zero if @l was not @u, and zero otherwise. - */ -#define local_add_unless(l, a, u) \ -({ \ - long c, old; \ - c = local_read(l); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = local_cmpxchg((l), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) -#define local_inc_not_zero(l) local_add_unless((l), 1, 0) - -#define local_add_negative(a, l) (local_add_return((a), (l)) < 0) - -#define local_dec_return(l) local_sub_return(1,(l)) - -#define local_inc_return(l) local_add_return(1,(l)) - -#define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0) - -#define local_inc_and_test(l) (local_add_return(1, (l)) == 0) - -#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) - -/* Verify if faster than atomic ops */ -#define __local_inc(l) ((l)->a.counter++) -#define __local_dec(l) ((l)->a.counter++) -#define __local_add(i,l) ((l)->a.counter+=(i)) -#define __local_sub(i,l) ((l)->a.counter-=(i)) - -/* Use these for per-cpu local_t variables: on some archs they are - * much more efficient than these naive implementations. Note they take - * a variable, not an address. - */ -#define cpu_local_read(l) local_read(&__get_cpu_var(l)) -#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) - -#define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) -#define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) -#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) -#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) - -#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) -#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) -#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) -#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) - -#endif /* _ALPHA_LOCAL_H */ diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h deleted file mode 100644 index a86c083cdf7..00000000000 --- a/include/asm-alpha/machvec.h +++ /dev/null @@ -1,134 +0,0 @@ -#ifndef __ALPHA_MACHVEC_H -#define __ALPHA_MACHVEC_H 1 - -#include - -/* - * This file gets pulled in by asm/io.h from user space. We don't - * want most of this escaping. - */ - -#ifdef __KERNEL__ - -/* The following structure vectors all of the I/O and IRQ manipulation - from the generic kernel to the hardware specific backend. */ - -struct task_struct; -struct mm_struct; -struct vm_area_struct; -struct linux_hose_info; -struct pci_dev; -struct pci_ops; -struct pci_controller; -struct _alpha_agp_info; - -struct alpha_machine_vector -{ - /* This "belongs" down below with the rest of the runtime - variables, but it is convenient for entry.S if these - two slots are at the beginning of the struct. */ - unsigned long hae_cache; - unsigned long *hae_register; - - int nr_irqs; - int rtc_port; - unsigned int max_asn; - unsigned long max_isa_dma_address; - unsigned long irq_probe_mask; - unsigned long iack_sc; - unsigned long min_io_address; - unsigned long min_mem_address; - unsigned long pci_dac_offset; - - void (*mv_pci_tbi)(struct pci_controller *hose, - dma_addr_t start, dma_addr_t end); - - unsigned int (*mv_ioread8)(void __iomem *); - unsigned int (*mv_ioread16)(void __iomem *); - unsigned int (*mv_ioread32)(void __iomem *); - - void (*mv_iowrite8)(u8, void __iomem *); - void (*mv_iowrite16)(u16, void __iomem *); - void (*mv_iowrite32)(u32, void __iomem *); - - u8 (*mv_readb)(const volatile void __iomem *); - u16 (*mv_readw)(const volatile void __iomem *); - u32 (*mv_readl)(const volatile void __iomem *); - u64 (*mv_readq)(const volatile void __iomem *); - - void (*mv_writeb)(u8, volatile void __iomem *); - void (*mv_writew)(u16, volatile void __iomem *); - void (*mv_writel)(u32, volatile void __iomem *); - void (*mv_writeq)(u64, volatile void __iomem *); - - void __iomem *(*mv_ioportmap)(unsigned long); - void __iomem *(*mv_ioremap)(unsigned long, unsigned long); - void (*mv_iounmap)(volatile void __iomem *); - int (*mv_is_ioaddr)(unsigned long); - int (*mv_is_mmio)(const volatile void __iomem *); - - void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *, - struct task_struct *); - void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *); - - void (*mv_flush_tlb_current)(struct mm_struct *); - void (*mv_flush_tlb_current_page)(struct mm_struct * mm, - struct vm_area_struct *vma, - unsigned long addr); - - void (*update_irq_hw)(unsigned long, unsigned long, int); - void (*ack_irq)(unsigned long); - void (*device_interrupt)(unsigned long vector); - void (*machine_check)(u64 vector, u64 la); - - void (*smp_callin)(void); - void (*init_arch)(void); - void (*init_irq)(void); - void (*init_rtc)(void); - void (*init_pci)(void); - void (*kill_arch)(int); - - u8 (*pci_swizzle)(struct pci_dev *, u8 *); - int (*pci_map_irq)(struct pci_dev *, u8, u8); - struct pci_ops *pci_ops; - - struct _alpha_agp_info *(*agp_info)(void); - - const char *vector_name; - - /* NUMA information */ - int (*pa_to_nid)(unsigned long); - int (*cpuid_to_nid)(int); - unsigned long (*node_mem_start)(int); - unsigned long (*node_mem_size)(int); - - /* System specific parameters. */ - union { - struct { - unsigned long gru_int_req_bits; - } cia; - - struct { - unsigned long gamma_bias; - } t2; - - struct { - unsigned int route_tab; - } sio; - } sys; -}; - -extern struct alpha_machine_vector alpha_mv; - -#ifdef CONFIG_ALPHA_GENERIC -extern int alpha_using_srm; -#else -#ifdef CONFIG_ALPHA_SRM -#define alpha_using_srm 1 -#else -#define alpha_using_srm 0 -#endif -#endif /* GENERIC */ - -#endif -#endif /* __ALPHA_MACHVEC_H */ diff --git a/include/asm-alpha/mc146818rtc.h b/include/asm-alpha/mc146818rtc.h deleted file mode 100644 index 097703f1c8c..00000000000 --- a/include/asm-alpha/mc146818rtc.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Machine dependent access functions for RTC registers. - */ -#ifndef __ASM_ALPHA_MC146818RTC_H -#define __ASM_ALPHA_MC146818RTC_H - -#include - -#ifndef RTC_PORT -#define RTC_PORT(x) (0x70 + (x)) -#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ -#endif - -/* - * The yet supported machines all access the RTC index register via - * an ISA port access but the way to access the date register differs ... - */ -#define CMOS_READ(addr) ({ \ -outb_p((addr),RTC_PORT(0)); \ -inb_p(RTC_PORT(1)); \ -}) -#define CMOS_WRITE(val, addr) ({ \ -outb_p((addr),RTC_PORT(0)); \ -outb_p((val),RTC_PORT(1)); \ -}) - -#endif /* __ASM_ALPHA_MC146818RTC_H */ diff --git a/include/asm-alpha/md.h b/include/asm-alpha/md.h deleted file mode 100644 index 6c9b8222a4f..00000000000 --- a/include/asm-alpha/md.h +++ /dev/null @@ -1,13 +0,0 @@ -/* $Id: md.h,v 1.1 1997/12/15 15:11:48 jj Exp $ - * md.h: High speed xor_block operation for RAID4/5 - * - */ - -#ifndef __ASM_MD_H -#define __ASM_MD_H - -/* #define HAVE_ARCH_XORBLOCK */ - -#define MD_XORBLOCK_ALIGNMENT sizeof(long) - -#endif /* __ASM_MD_H */ diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h deleted file mode 100644 index 90d7c35d286..00000000000 --- a/include/asm-alpha/mman.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef __ALPHA_MMAN_H__ -#define __ALPHA_MMAN_H__ - -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_SEM 0x8 /* page may be used for atomic ops */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ - -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping (OSF/1 is _wrong_) */ -#define MAP_FIXED 0x100 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x10 /* don't use a file */ - -/* not used by linux, but here to make sure we don't clash with OSF/1 defines */ -#define _MAP_HASSEMAPHORE 0x0200 -#define _MAP_INHERIT 0x0400 -#define _MAP_UNALIGNED 0x0800 - -/* These are linux-specific */ -#define MAP_GROWSDOWN 0x01000 /* stack-like segment */ -#define MAP_DENYWRITE 0x02000 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x04000 /* mark it as an executable */ -#define MAP_LOCKED 0x08000 /* lock the mapping */ -#define MAP_NORESERVE 0x10000 /* don't check for reservations */ -#define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x40000 /* do not block on IO */ - -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_SYNC 2 /* synchronous memory sync */ -#define MS_INVALIDATE 4 /* invalidate the caches */ - -#define MCL_CURRENT 8192 /* lock all currently mapped pages */ -#define MCL_FUTURE 16384 /* lock all additions to address space */ - -#define MADV_NORMAL 0 /* no further special treatment */ -#define MADV_RANDOM 1 /* expect random page references */ -#define MADV_SEQUENTIAL 2 /* expect sequential page references */ -#define MADV_WILLNEED 3 /* will need these pages */ -#define MADV_SPACEAVAIL 5 /* ensure resources are available */ -#define MADV_DONTNEED 6 /* don't need these pages */ - -/* common/generic parameters */ -#define MADV_REMOVE 9 /* remove these pages & resources */ -#define MADV_DONTFORK 10 /* don't inherit across fork */ -#define MADV_DOFORK 11 /* do inherit across fork */ - -/* compatibility flags */ -#define MAP_FILE 0 - -#endif /* __ALPHA_MMAN_H__ */ diff --git a/include/asm-alpha/mmu.h b/include/asm-alpha/mmu.h deleted file mode 100644 index 3dc12777932..00000000000 --- a/include/asm-alpha/mmu.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ALPHA_MMU_H -#define __ALPHA_MMU_H - -/* The alpha MMU context is one "unsigned long" bitmap per CPU */ -typedef unsigned long mm_context_t[NR_CPUS]; - -#endif diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h deleted file mode 100644 index 86c08a02d23..00000000000 --- a/include/asm-alpha/mmu_context.h +++ /dev/null @@ -1,260 +0,0 @@ -#ifndef __ALPHA_MMU_CONTEXT_H -#define __ALPHA_MMU_CONTEXT_H - -/* - * get a new mmu context.. - * - * Copyright (C) 1996, Linus Torvalds - */ - -#include -#include -#include -#include - -/* - * Force a context reload. This is needed when we change the page - * table pointer or when we update the ASN of the current process. - */ - -/* Don't get into trouble with dueling __EXTERN_INLINEs. */ -#ifndef __EXTERN_INLINE -#include -#endif - - -static inline unsigned long -__reload_thread(struct pcb_struct *pcb) -{ - register unsigned long a0 __asm__("$16"); - register unsigned long v0 __asm__("$0"); - - a0 = virt_to_phys(pcb); - __asm__ __volatile__( - "call_pal %2 #__reload_thread" - : "=r"(v0), "=r"(a0) - : "i"(PAL_swpctx), "r"(a0) - : "$1", "$22", "$23", "$24", "$25"); - - return v0; -} - - -/* - * The maximum ASN's the processor supports. On the EV4 this is 63 - * but the PAL-code doesn't actually use this information. On the - * EV5 this is 127, and EV6 has 255. - * - * On the EV4, the ASNs are more-or-less useless anyway, as they are - * only used as an icache tag, not for TB entries. On the EV5 and EV6, - * ASN's also validate the TB entries, and thus make a lot more sense. - * - * The EV4 ASN's don't even match the architecture manual, ugh. And - * I quote: "If a processor implements address space numbers (ASNs), - * and the old PTE has the Address Space Match (ASM) bit clear (ASNs - * in use) and the Valid bit set, then entries can also effectively be - * made coherent by assigning a new, unused ASN to the currently - * running process and not reusing the previous ASN before calling the - * appropriate PALcode routine to invalidate the translation buffer (TB)". - * - * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually - * work correctly and can thus not be used (explaining the lack of PAL-code - * support). - */ -#define EV4_MAX_ASN 63 -#define EV5_MAX_ASN 127 -#define EV6_MAX_ASN 255 - -#ifdef CONFIG_ALPHA_GENERIC -# define MAX_ASN (alpha_mv.max_asn) -#else -# ifdef CONFIG_ALPHA_EV4 -# define MAX_ASN EV4_MAX_ASN -# elif defined(CONFIG_ALPHA_EV5) -# define MAX_ASN EV5_MAX_ASN -# else -# define MAX_ASN EV6_MAX_ASN -# endif -#endif - -/* - * cpu_last_asn(processor): - * 63 0 - * +-------------+----------------+--------------+ - * | asn version | this processor | hardware asn | - * +-------------+----------------+--------------+ - */ - -#include -#ifdef CONFIG_SMP -#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) -#else -extern unsigned long last_asn; -#define cpu_last_asn(cpuid) last_asn -#endif /* CONFIG_SMP */ - -#define WIDTH_HARDWARE_ASN 8 -#define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN) -#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1) - -/* - * NOTE! The way this is set up, the high bits of the "asn_cache" (and - * the "mm->context") are the ASN _version_ code. A version of 0 is - * always considered invalid, so to invalidate another process you only - * need to do "p->mm->context = 0". - * - * If we need more ASN's than the processor has, we invalidate the old - * user TLB's (tbiap()) and start a new ASN version. That will automatically - * force a new asn for any other processes the next time they want to - * run. - */ - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __MMU_EXTERN_INLINE -#endif - -extern inline unsigned long -__get_new_mm_context(struct mm_struct *mm, long cpu) -{ - unsigned long asn = cpu_last_asn(cpu); - unsigned long next = asn + 1; - - if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) { - tbiap(); - imb(); - next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; - } - cpu_last_asn(cpu) = next; - return next; -} - -__EXTERN_INLINE void -ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, - struct task_struct *next) -{ - /* Check if our ASN is of an older version, and thus invalid. */ - unsigned long asn; - unsigned long mmc; - long cpu = smp_processor_id(); - -#ifdef CONFIG_SMP - cpu_data[cpu].asn_lock = 1; - barrier(); -#endif - asn = cpu_last_asn(cpu); - mmc = next_mm->context[cpu]; - if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) { - mmc = __get_new_mm_context(next_mm, cpu); - next_mm->context[cpu] = mmc; - } -#ifdef CONFIG_SMP - else - cpu_data[cpu].need_new_asn = 1; -#endif - - /* Always update the PCB ASN. Another thread may have allocated - a new mm->context (via flush_tlb_mm) without the ASN serial - number wrapping. We have no way to detect when this is needed. */ - task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK; -} - -__EXTERN_INLINE void -ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, - struct task_struct *next) -{ - /* As described, ASN's are broken for TLB usage. But we can - optimize for switching between threads -- if the mm is - unchanged from current we needn't flush. */ - /* ??? May not be needed because EV4 PALcode recognizes that - ASN's are broken and does a tbiap itself on swpctx, under - the "Must set ASN or flush" rule. At least this is true - for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com). - I'm going to leave this here anyway, just to Be Sure. -- r~ */ - if (prev_mm != next_mm) - tbiap(); - - /* Do continue to allocate ASNs, because we can still use them - to avoid flushing the icache. */ - ev5_switch_mm(prev_mm, next_mm, next); -} - -extern void __load_new_mm_context(struct mm_struct *); - -#ifdef CONFIG_SMP -#define check_mmu_context() \ -do { \ - int cpu = smp_processor_id(); \ - cpu_data[cpu].asn_lock = 0; \ - barrier(); \ - if (cpu_data[cpu].need_new_asn) { \ - struct mm_struct * mm = current->active_mm; \ - cpu_data[cpu].need_new_asn = 0; \ - if (!mm->context[cpu]) \ - __load_new_mm_context(mm); \ - } \ -} while(0) -#else -#define check_mmu_context() do { } while(0) -#endif - -__EXTERN_INLINE void -ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) -{ - __load_new_mm_context(next_mm); -} - -__EXTERN_INLINE void -ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) -{ - __load_new_mm_context(next_mm); - tbiap(); -} - -#define deactivate_mm(tsk,mm) do { } while (0) - -#ifdef CONFIG_ALPHA_GENERIC -# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c)) -# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y)) -#else -# ifdef CONFIG_ALPHA_EV4 -# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c)) -# define activate_mm(x,y) ev4_activate_mm((x),(y)) -# else -# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c)) -# define activate_mm(x,y) ev5_activate_mm((x),(y)) -# endif -#endif - -static inline int -init_new_context(struct task_struct *tsk, struct mm_struct *mm) -{ - int i; - - for_each_online_cpu(i) - mm->context[i] = 0; - if (tsk != current) - task_thread_info(tsk)->pcb.ptbr - = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; - return 0; -} - -extern inline void -destroy_context(struct mm_struct *mm) -{ - /* Nothing to do. */ -} - -static inline void -enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ - task_thread_info(tsk)->pcb.ptbr - = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; -} - -#ifdef __MMU_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __MMU_EXTERN_INLINE -#endif - -#endif /* __ALPHA_MMU_CONTEXT_H */ diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h deleted file mode 100644 index 8af56ce346a..00000000000 --- a/include/asm-alpha/mmzone.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99 - * Adapted for the alpha wildfire architecture Jan 2001. - */ -#ifndef _ASM_MMZONE_H_ -#define _ASM_MMZONE_H_ - -#include - -struct bootmem_data_t; /* stupid forward decl. */ - -/* - * Following are macros that are specific to this numa platform. - */ - -extern pg_data_t node_data[]; - -#define alpha_pa_to_nid(pa) \ - (alpha_mv.pa_to_nid \ - ? alpha_mv.pa_to_nid(pa) \ - : (0)) -#define node_mem_start(nid) \ - (alpha_mv.node_mem_start \ - ? alpha_mv.node_mem_start(nid) \ - : (0UL)) -#define node_mem_size(nid) \ - (alpha_mv.node_mem_size \ - ? alpha_mv.node_mem_size(nid) \ - : ((nid) ? (0UL) : (~0UL))) - -#define pa_to_nid(pa) alpha_pa_to_nid(pa) -#define NODE_DATA(nid) (&node_data[(nid)]) - -#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) - -#if 1 -#define PLAT_NODE_DATA_LOCALNR(p, n) \ - (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn) -#else -static inline unsigned long -PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) -{ - unsigned long temp; - temp = p >> PAGE_SHIFT; - return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn; -} -#endif - -#ifdef CONFIG_DISCONTIGMEM - -/* - * Following are macros that each numa implementation must define. - */ - -/* - * Given a kernel address, find the home node of the underlying memory. - */ -#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) - -/* - * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory - * and returns the kaddr corresponding to first physical page in the - * node's mem_map. - */ -#define LOCAL_BASE_ADDR(kaddr) \ - ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \ - << PAGE_SHIFT)) - -/* XXX: FIXME -- wli */ -#define kern_addr_valid(kaddr) (0) - -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) - -#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) - -#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) -#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32)) -#define pte_pfn(pte) (pte_val(pte) >> 32) - -#define mk_pte(page, pgprot) \ -({ \ - pte_t pte; \ - unsigned long pfn; \ - \ - pfn = page_to_pfn(page) << 32; \ - pte_val(pte) = pfn | pgprot_val(pgprot); \ - \ - pte; \ -}) - -#define pte_page(x) \ -({ \ - unsigned long kvirt; \ - struct page * __xx; \ - \ - kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \ - __xx = virt_to_page(kvirt); \ - \ - __xx; \ -}) - -#define page_to_pa(page) \ - (page_to_pfn(page) << PAGE_SHIFT) - -#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) -#define pfn_valid(pfn) \ - (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \ - node_spanned_pages(pfn_to_nid(pfn))) \ - -#define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT)) - -#endif /* CONFIG_DISCONTIGMEM */ - -#endif /* _ASM_MMZONE_H_ */ diff --git a/include/asm-alpha/module.h b/include/asm-alpha/module.h deleted file mode 100644 index 7b63743c534..00000000000 --- a/include/asm-alpha/module.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _ALPHA_MODULE_H -#define _ALPHA_MODULE_H - -struct mod_arch_specific -{ - unsigned int gotsecindex; -}; - -#define Elf_Sym Elf64_Sym -#define Elf_Shdr Elf64_Shdr -#define Elf_Ehdr Elf64_Ehdr -#define Elf_Phdr Elf64_Phdr -#define Elf_Dyn Elf64_Dyn -#define Elf_Rel Elf64_Rel -#define Elf_Rela Elf64_Rela - -#define ARCH_SHF_SMALL SHF_ALPHA_GPREL - -#ifdef MODULE -asm(".section .got,\"aws\",@progbits; .align 3; .previous"); -#endif - -#endif /*_ALPHA_MODULE_H*/ diff --git a/include/asm-alpha/msgbuf.h b/include/asm-alpha/msgbuf.h deleted file mode 100644 index 98496501a2b..00000000000 --- a/include/asm-alpha/msgbuf.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef _ALPHA_MSGBUF_H -#define _ALPHA_MSGBUF_H - -/* - * The msqid64_ds structure for alpha architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ - __kernel_time_t msg_rtime; /* last msgrcv time */ - __kernel_time_t msg_ctime; /* last change time */ - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* _ALPHA_MSGBUF_H */ diff --git a/include/asm-alpha/mutex.h b/include/asm-alpha/mutex.h deleted file mode 100644 index 458c1f7fbc1..00000000000 --- a/include/asm-alpha/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h deleted file mode 100644 index 0995f9d1341..00000000000 --- a/include/asm-alpha/page.h +++ /dev/null @@ -1,98 +0,0 @@ -#ifndef _ALPHA_PAGE_H -#define _ALPHA_PAGE_H - -#include -#include - -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 13 -#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -#ifndef __ASSEMBLY__ - -#define STRICT_MM_TYPECHECKS - -extern void clear_page(void *page); -#define clear_user_page(page, vaddr, pg) clear_page(page) - -#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ - alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr) -#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE - -extern void copy_page(void * _to, void * _from); -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - -#ifdef STRICT_MM_TYPECHECKS -/* - * These are used to make use of C type-checking.. - */ -typedef struct { unsigned long pte; } pte_t; -typedef struct { unsigned long pmd; } pmd_t; -typedef struct { unsigned long pgd; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; - -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((x).pmd) -#define pgd_val(x) ((x).pgd) -#define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) -#define __pgd(x) ((pgd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#else -/* - * .. while these make it easier on the compiler - */ -typedef unsigned long pte_t; -typedef unsigned long pmd_t; -typedef unsigned long pgd_t; -typedef unsigned long pgprot_t; - -#define pte_val(x) (x) -#define pmd_val(x) (x) -#define pgd_val(x) (x) -#define pgprot_val(x) (x) - -#define __pte(x) (x) -#define __pgd(x) (x) -#define __pgprot(x) (x) - -#endif /* STRICT_MM_TYPECHECKS */ - -typedef struct page *pgtable_t; - -#ifdef USE_48_BIT_KSEG -#define PAGE_OFFSET 0xffff800000000000UL -#else -#define PAGE_OFFSET 0xfffffc0000000000UL -#endif - -#else - -#ifdef USE_48_BIT_KSEG -#define PAGE_OFFSET 0xffff800000000000 -#else -#define PAGE_OFFSET 0xfffffc0000000000 -#endif - -#endif /* !__ASSEMBLY__ */ - -#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) -#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) -#ifndef CONFIG_DISCONTIGMEM -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) - -#define pfn_valid(pfn) ((pfn) < max_mapnr) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#endif /* CONFIG_DISCONTIGMEM */ - -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#include -#include - -#endif /* _ALPHA_PAGE_H */ diff --git a/include/asm-alpha/pal.h b/include/asm-alpha/pal.h deleted file mode 100644 index 9b4ba0d6f00..00000000000 --- a/include/asm-alpha/pal.h +++ /dev/null @@ -1,51 +0,0 @@ -#ifndef __ALPHA_PAL_H -#define __ALPHA_PAL_H - -/* - * Common PAL-code - */ -#define PAL_halt 0 -#define PAL_cflush 1 -#define PAL_draina 2 -#define PAL_bpt 128 -#define PAL_bugchk 129 -#define PAL_chmk 131 -#define PAL_callsys 131 -#define PAL_imb 134 -#define PAL_rduniq 158 -#define PAL_wruniq 159 -#define PAL_gentrap 170 -#define PAL_nphalt 190 - -/* - * VMS specific PAL-code - */ -#define PAL_swppal 10 -#define PAL_mfpr_vptb 41 - -/* - * OSF specific PAL-code - */ -#define PAL_cserve 9 -#define PAL_wripir 13 -#define PAL_rdmces 16 -#define PAL_wrmces 17 -#define PAL_wrfen 43 -#define PAL_wrvptptr 45 -#define PAL_jtopal 46 -#define PAL_swpctx 48 -#define PAL_wrval 49 -#define PAL_rdval 50 -#define PAL_tbi 51 -#define PAL_wrent 52 -#define PAL_swpipl 53 -#define PAL_rdps 54 -#define PAL_wrkgp 55 -#define PAL_wrusp 56 -#define PAL_wrperfmon 57 -#define PAL_rdusp 58 -#define PAL_whami 60 -#define PAL_retsys 61 -#define PAL_rti 63 - -#endif /* __ALPHA_PAL_H */ diff --git a/include/asm-alpha/param.h b/include/asm-alpha/param.h deleted file mode 100644 index e691ecfedb2..00000000000 --- a/include/asm-alpha/param.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef _ASM_ALPHA_PARAM_H -#define _ASM_ALPHA_PARAM_H - -/* ??? Gross. I don't want to parameterize this, and supposedly the - hardware ignores reprogramming. We also need userland buy-in to the - change in HZ, since this is visible in the wait4 resources etc. */ - -#ifdef __KERNEL__ -#define HZ CONFIG_HZ -#define USER_HZ HZ -#else -#define HZ 1024 -#endif - -#define EXEC_PAGESIZE 8192 - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - -#ifdef __KERNEL__ -# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ -#endif - -#endif /* _ASM_ALPHA_PARAM_H */ diff --git a/include/asm-alpha/parport.h b/include/asm-alpha/parport.h deleted file mode 100644 index c5ee7cbb2fc..00000000000 --- a/include/asm-alpha/parport.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * parport.h: platform-specific PC-style parport initialisation - * - * Copyright (C) 1999, 2000 Tim Waugh - * - * This file should only be included by drivers/parport/parport_pc.c. - */ - -#ifndef _ASM_AXP_PARPORT_H -#define _ASM_AXP_PARPORT_H 1 - -static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) -{ - return parport_pc_find_isa_ports (autoirq, autodma); -} - -#endif /* !(_ASM_AXP_PARPORT_H) */ diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h deleted file mode 100644 index 2a14302c17a..00000000000 --- a/include/asm-alpha/pci.h +++ /dev/null @@ -1,276 +0,0 @@ -#ifndef __ALPHA_PCI_H -#define __ALPHA_PCI_H - -#ifdef __KERNEL__ - -#include -#include -#include -#include - -/* - * The following structure is used to manage multiple PCI busses. - */ - -struct pci_dev; -struct pci_bus; -struct resource; -struct pci_iommu_arena; -struct page; - -/* A controller. Used to manage multiple PCI busses. */ - -struct pci_controller { - struct pci_controller *next; - struct pci_bus *bus; - struct resource *io_space; - struct resource *mem_space; - - /* The following are for reporting to userland. The invariant is - that if we report a BWX-capable dense memory, we do not report - a sparse memory at all, even if it exists. */ - unsigned long sparse_mem_base; - unsigned long dense_mem_base; - unsigned long sparse_io_base; - unsigned long dense_io_base; - - /* This one's for the kernel only. It's in KSEG somewhere. */ - unsigned long config_space_base; - - unsigned int index; - /* For compatibility with current (as of July 2003) pciutils - and XFree86. Eventually will be removed. */ - unsigned int need_domain_info; - - struct pci_iommu_arena *sg_pci; - struct pci_iommu_arena *sg_isa; - - void *sysdata; -}; - -/* Override the logic in pci_scan_bus for skipping already-configured - bus numbers. */ - -#define pcibios_assign_all_busses() 1 -#define pcibios_scan_all_fns(a, b) 0 - -#define PCIBIOS_MIN_IO alpha_mv.min_io_address -#define PCIBIOS_MIN_MEM alpha_mv.min_mem_address - -extern void pcibios_set_master(struct pci_dev *dev); - -extern inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - -/* IOMMU controls. */ - -/* The PCI address space does not equal the physical memory address space. - The networking and block device layers use this boolean for bounce buffer - decisions. */ -#define PCI_DMA_BUS_IS_PHYS 0 - -/* Allocate and map kernel buffer using consistent mode DMA for PCI - device. Returns non-NULL cpu-view pointer to the buffer if - successful and sets *DMA_ADDRP to the pci side dma address as well, - else DMA_ADDRP is undefined. */ - -extern void *__pci_alloc_consistent(struct pci_dev *, size_t, - dma_addr_t *, gfp_t); -static inline void * -pci_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *dma) -{ - return __pci_alloc_consistent(dev, size, dma, GFP_ATOMIC); -} - -/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must - be values that were returned from pci_alloc_consistent. SIZE must - be the same as what as passed into pci_alloc_consistent. - References to the memory and mappings associated with CPU_ADDR or - DMA_ADDR past this call are illegal. */ - -extern void pci_free_consistent(struct pci_dev *, size_t, void *, dma_addr_t); - -/* Map a single buffer of the indicate size for PCI DMA in streaming mode. - The 32-bit PCI bus mastering address to use is returned. Once the device - is given the dma address, the device owns this memory until either - pci_unmap_single or pci_dma_sync_single_for_cpu is performed. */ - -extern dma_addr_t pci_map_single(struct pci_dev *, void *, size_t, int); - -/* Likewise, but for a page instead of an address. */ -extern dma_addr_t pci_map_page(struct pci_dev *, struct page *, - unsigned long, size_t, int); - -/* Test for pci_map_single or pci_map_page having generated an error. */ - -static inline int -pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) -{ - return dma_addr == 0; -} - -/* Unmap a single streaming mode DMA translation. The DMA_ADDR and - SIZE must match what was provided for in a previous pci_map_single - call. All other usages are undefined. After this call, reads by - the cpu to the buffer are guaranteed to see whatever the device - wrote there. */ - -extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int); -extern void pci_unmap_page(struct pci_dev *, dma_addr_t, size_t, int); - -/* pci_unmap_{single,page} is not a nop, thus... */ -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ - dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ - __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) \ - ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ - (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) \ - ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ - (((PTR)->LEN_NAME) = (VAL)) - -/* Map a set of buffers described by scatterlist in streaming mode for - PCI DMA. This is the scatter-gather version of the above - pci_map_single interface. Here the scatter gather list elements - are each tagged with the appropriate PCI dma address and length. - They are obtained via sg_dma_{address,length}(SG). - - NOTE: An implementation may be able to use a smaller number of DMA - address/length pairs than there are SG table elements. (for - example via virtual mapping capabilities) The routine returns the - number of addr/length pairs actually used, at most nents. - - Device ownership issues as mentioned above for pci_map_single are - the same here. */ - -extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int, int); - -/* Unmap a set of streaming mode DMA translations. Again, cpu read - rules concerning calls here are the same as for pci_unmap_single() - above. */ - -extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int, int); - -/* Make physical memory consistent for a single streaming mode DMA - translation after a transfer and device currently has ownership - of the buffer. - - If you perform a pci_map_single() but wish to interrogate the - buffer using the cpu, yet do not wish to teardown the PCI dma - mapping, you must call this function before doing so. At the next - point you give the PCI dma address back to the card, you must first - perform a pci_dma_sync_for_device, and then the device again owns - the buffer. */ - -static inline void -pci_dma_sync_single_for_cpu(struct pci_dev *dev, dma_addr_t dma_addr, - long size, int direction) -{ - /* Nothing to do. */ -} - -static inline void -pci_dma_sync_single_for_device(struct pci_dev *dev, dma_addr_t dma_addr, - size_t size, int direction) -{ - /* Nothing to do. */ -} - -/* Make physical memory consistent for a set of streaming mode DMA - translations after a transfer. The same as pci_dma_sync_single_* - but for a scatter-gather list, same rules and usage. */ - -static inline void -pci_dma_sync_sg_for_cpu(struct pci_dev *dev, struct scatterlist *sg, - int nents, int direction) -{ - /* Nothing to do. */ -} - -static inline void -pci_dma_sync_sg_for_device(struct pci_dev *dev, struct scatterlist *sg, - int nents, int direction) -{ - /* Nothing to do. */ -} - -/* Return whether the given PCI device DMA address mask can - be supported properly. For example, if your device can - only drive the low 24-bits during PCI bus mastering, then - you would pass 0x00ffffff as the mask to this function. */ - -extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); - -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - unsigned long cacheline_size; - u8 byte; - - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - *strat = PCI_DMA_BURST_BOUNDARY; - *strategy_parameter = cacheline_size; -} -#endif - -/* TODO: integrate with include/asm-generic/pci.h ? */ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - return channel ? 15 : 14; -} - -extern void pcibios_resource_to_bus(struct pci_dev *, struct pci_bus_region *, - struct resource *); - -extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, - struct pci_bus_region *region); - -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - -#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index - -static inline int pci_proc_domain(struct pci_bus *bus) -{ - struct pci_controller *hose = bus->sysdata; - return hose->need_domain_info; -} - -struct pci_dev *alpha_gendev_to_pci(struct device *dev); - -#endif /* __KERNEL__ */ - -/* Values for the `which' argument to sys_pciconfig_iobase. */ -#define IOBASE_HOSE 0 -#define IOBASE_SPARSE_MEM 1 -#define IOBASE_DENSE_MEM 2 -#define IOBASE_SPARSE_IO 3 -#define IOBASE_DENSE_IO 4 -#define IOBASE_ROOT_BUS 5 -#define IOBASE_FROM_HOSE 0x10000 - -extern struct pci_dev *isa_bridge; - -#endif /* __ALPHA_PCI_H */ diff --git a/include/asm-alpha/percpu.h b/include/asm-alpha/percpu.h deleted file mode 100644 index 3495e8e00d7..00000000000 --- a/include/asm-alpha/percpu.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef __ALPHA_PERCPU_H -#define __ALPHA_PERCPU_H -#include -#include - -/* - * Determine the real variable name from the name visible in the - * kernel sources. - */ -#define per_cpu_var(var) per_cpu__##var - -#ifdef CONFIG_SMP - -/* - * per_cpu_offset() is the offset that has to be added to a - * percpu variable to get to the instance for a certain processor. - */ -extern unsigned long __per_cpu_offset[NR_CPUS]; - -#define per_cpu_offset(x) (__per_cpu_offset[x]) - -#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) -#ifdef CONFIG_DEBUG_PREEMPT -#define my_cpu_offset per_cpu_offset(smp_processor_id()) -#else -#define my_cpu_offset __my_cpu_offset -#endif - -#ifndef MODULE -#define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) -#define PER_CPU_ATTRIBUTES -#else -/* - * To calculate addresses of locally defined variables, GCC uses 32-bit - * displacement from the GP. Which doesn't work for per cpu variables in - * modules, as an offset to the kernel per cpu area is way above 4G. - * - * This forces allocation of a GOT entry for per cpu variable using - * ldq instruction with a 'literal' relocation. - */ -#define SHIFT_PERCPU_PTR(var, offset) ({ \ - extern int simple_identifier_##var(void); \ - unsigned long __ptr, tmp_gp; \ - asm ( "br %1, 1f \n\ - 1: ldgp %1, 0(%1) \n\ - ldq %0, per_cpu__" #var"(%1)\t!literal" \ - : "=&r"(__ptr), "=&r"(tmp_gp)); \ - (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) - -#define PER_CPU_ATTRIBUTES __used - -#endif /* MODULE */ - -/* - * A percpu variable may point to a discarded regions. The following are - * established ways to produce a usable pointer from the percpu variable - * offset. - */ -#define per_cpu(var, cpu) \ - (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu))) -#define __get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(var, my_cpu_offset)) -#define __raw_get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(var, __my_cpu_offset)) - -#else /* ! SMP */ - -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) -#define __get_cpu_var(var) per_cpu_var(var) -#define __raw_get_cpu_var(var) per_cpu_var(var) - -#define PER_CPU_ATTRIBUTES - -#endif /* SMP */ - -#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name) - -#endif /* __ALPHA_PERCPU_H */ diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h deleted file mode 100644 index fd090155dcc..00000000000 --- a/include/asm-alpha/pgalloc.h +++ /dev/null @@ -1,83 +0,0 @@ -#ifndef _ALPHA_PGALLOC_H -#define _ALPHA_PGALLOC_H - -#include -#include - -/* - * Allocate and free page tables. The xxx_kernel() versions are - * used to allocate a kernel page table - this turns on ASN bits - * if any. - */ - -static inline void -pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) -{ - pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET)); -} -#define pmd_pgtable(pmd) pmd_page(pmd) - -static inline void -pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) -{ - pmd_set(pmd, pte); -} - -static inline void -pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) -{ - pgd_set(pgd, pmd); -} - -extern pgd_t *pgd_alloc(struct mm_struct *mm); - -static inline void -pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_page((unsigned long)pgd); -} - -static inline pmd_t * -pmd_alloc_one(struct mm_struct *mm, unsigned long address) -{ - pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); - return ret; -} - -static inline void -pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_page((unsigned long)pmd); -} - -extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); - -static inline void -pte_free_kernel(struct mm_struct *mm, pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline pgtable_t -pte_alloc_one(struct mm_struct *mm, unsigned long address) -{ - pte_t *pte = pte_alloc_one_kernel(mm, address); - struct page *page; - - if (!pte) - return NULL; - page = virt_to_page(pte); - pgtable_page_ctor(page); - return page; -} - -static inline void -pte_free(struct mm_struct *mm, pgtable_t page) -{ - pgtable_page_dtor(page); - __free_page(page); -} - -#define check_pgt_cache() do { } while (0) - -#endif /* _ALPHA_PGALLOC_H */ diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h deleted file mode 100644 index 3f0c59f6d8a..00000000000 --- a/include/asm-alpha/pgtable.h +++ /dev/null @@ -1,380 +0,0 @@ -#ifndef _ALPHA_PGTABLE_H -#define _ALPHA_PGTABLE_H - -#include - -/* - * This file contains the functions and defines necessary to modify and use - * the Alpha page table tree. - * - * This hopefully works with any standard Alpha page-size, as defined - * in (currently 8192). - */ -#include - -#include -#include /* For TASK_SIZE */ -#include - -struct mm_struct; -struct vm_area_struct; - -/* Certain architectures need to do special things when PTEs - * within a page table are directly modified. Thus, the following - * hook is made available. - */ -#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) - -/* PMD_SHIFT determines the size of the area a second-level page table can map */ -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) - -/* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) - -/* - * Entries per page directory level: the Alpha is three-level, with - * all levels having a one-page page table. - */ -#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) -#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) -#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) -#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) -#define FIRST_USER_ADDRESS 0 - -/* Number of pointers that fit on a page: this will go away. */ -#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3)) - -#ifdef CONFIG_ALPHA_LARGE_VMALLOC -#define VMALLOC_START 0xfffffe0000000000 -#else -#define VMALLOC_START (-2*PGDIR_SIZE) -#endif -#define VMALLOC_END (-PGDIR_SIZE) - -/* - * OSF/1 PAL-code-imposed page table bits - */ -#define _PAGE_VALID 0x0001 -#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ -#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ -#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ -#define _PAGE_ASM 0x0010 -#define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */ -#define _PAGE_URE 0x0200 /* xxx */ -#define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */ -#define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */ - -/* .. and these are ours ... */ -#define _PAGE_DIRTY 0x20000 -#define _PAGE_ACCESSED 0x40000 -#define _PAGE_FILE 0x80000 /* set:pagecache, unset:swap */ - -/* - * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly - * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it. - * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use - * the KRE/URE bits to watch for it. That way we don't need to overload the - * KWE/UWE bits with both handling dirty and accessed. - * - * Note that the kernel uses the accessed bit just to check whether to page - * out a page or not, so it doesn't have to be exact anyway. - */ - -#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE) -#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) - -#define _PFN_MASK 0xFFFFFFFF00000000UL - -#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) -#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS) - -/* - * All the normal masks have the "page accessed" bits on, as any time they are used, - * the page is accessed. They are cleared only by the page-out routines - */ -#define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE) -#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) -#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) -#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) -#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) - -#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) - -#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW)) -#define _PAGE_S(x) _PAGE_NORMAL(x) - -/* - * The hardware can handle write-only mappings, but as the Alpha - * architecture does byte-wide writes with a read-modify-write - * sequence, it's not practical to have write-without-read privs. - * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in - * arch/alpha/mm/fault.c) - */ - /* xwr */ -#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) -#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW) -#define __P010 _PAGE_P(_PAGE_FOE) -#define __P011 _PAGE_P(_PAGE_FOE) -#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR) -#define __P101 _PAGE_P(_PAGE_FOW) -#define __P110 _PAGE_P(0) -#define __P111 _PAGE_P(0) - -#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) -#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW) -#define __S010 _PAGE_S(_PAGE_FOE) -#define __S011 _PAGE_S(_PAGE_FOE) -#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR) -#define __S101 _PAGE_S(_PAGE_FOW) -#define __S110 _PAGE_S(0) -#define __S111 _PAGE_S(0) - -/* - * pgprot_noncached() is only for infiniband pci support, and a real - * implementation for RAM would be more complicated. - */ -#define pgprot_noncached(prot) (prot) - -/* - * BAD_PAGETABLE is used when we need a bogus page-table, while - * BAD_PAGE is used for a bogus page. - * - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern pte_t __bad_page(void); -extern pmd_t * __bad_pagetable(void); - -extern unsigned long __zero_page(void); - -#define BAD_PAGETABLE __bad_pagetable() -#define BAD_PAGE __bad_page() -#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE)) - -/* number of bits that fit into a memory pointer */ -#define BITS_PER_PTR (8*sizeof(unsigned long)) - -/* to align the pointer to a pointer address */ -#define PTR_MASK (~(sizeof(void*)-1)) - -/* sizeof(void*)==1<>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) - -/* - * On certain platforms whose physical address space can overlap KSEG, - * namely EV6 and above, we must re-twiddle the physaddr to restore the - * correct high-order bits. - * - * This is extremely confusing until you realize that this is actually - * just working around a userspace bug. The X server was intending to - * provide the physical address but instead provided the KSEG address. - * Or tried to, except it's not representable. - * - * On Tsunami there's nothing meaningful at 0x40000000000, so this is - * a safe thing to do. Come the first core logic that does put something - * in this area -- memory or whathaveyou -- then this hack will have - * to go away. So be prepared! - */ - -#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG) -#error "EV6-only feature in a generic kernel" -#endif -#if defined(CONFIG_ALPHA_GENERIC) || \ - (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG)) -#define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT) -#define PHYS_TWIDDLE(pfn) \ - ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \ - ? ((pfn) ^= KSEG_PFN) : (pfn)) -#else -#define PHYS_TWIDDLE(pfn) (pfn) -#endif - -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#ifndef CONFIG_DISCONTIGMEM -#define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT) - -#define pte_pfn(pte) (pte_val(pte) >> 32) -#define pte_page(pte) pfn_to_page(pte_pfn(pte)) -#define mk_pte(page, pgprot) \ -({ \ - pte_t pte; \ - \ - pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \ - pte; \ -}) -#endif - -extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot) -{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; } - -extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) -{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } - -extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) -{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } - -extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) -{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } - - -extern inline unsigned long -pmd_page_vaddr(pmd_t pmd) -{ - return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET; -} - -#ifndef CONFIG_DISCONTIGMEM -#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32)) -#define pgd_page(pgd) (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32)) -#endif - -extern inline unsigned long pgd_page_vaddr(pgd_t pgd) -{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); } - -extern inline int pte_none(pte_t pte) { return !pte_val(pte); } -extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } -extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_val(*ptep) = 0; -} - -extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } -extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; } -extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; } -extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; } - -extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); } -extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; } -extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; } -extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; } - -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); } -extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } -extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } -extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -extern inline int pte_special(pte_t pte) { return 0; } - -extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; } -extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; } -extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; } -extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; } -extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; } -extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } -extern inline pte_t pte_mkspecial(pte_t pte) { return pte; } - -#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) - -/* to find an entry in a page-table-directory. */ -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) - -/* - * The smp_read_barrier_depends() in the following functions are required to - * order the load of *dir (the pointer in the top level page table) with any - * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir). - * - * If this ordering is not enforced, the CPU might load an older value of - * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for - * more details. - * - * Note that we never change the mm->pgd pointer after the task is running, so - * pgd_offset does not require such a barrier. - */ - -/* Find an entry in the second-level page table.. */ -extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) -{ - pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); - smp_read_barrier_depends(); /* see above */ - return ret; -} - -/* Find an entry in the third-level page table.. */ -extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address) -{ - pte_t *ret = (pte_t *) pmd_page_vaddr(*dir) - + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1)); - smp_read_barrier_depends(); /* see above */ - return ret; -} - -#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) - -extern pgd_t swapper_pg_dir[1024]; - -/* - * The Alpha doesn't have any external MMU info: the kernel page - * tables contain all the necessary information. - */ -extern inline void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) -{ -} - -/* - * Non-present pages: high 24 bits are offset, next 8 bits type, - * low 32 bits zero. - */ -extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) -{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } - -#define __swp_type(x) (((x).val >> 32) & 0xff) -#define __swp_offset(x) ((x).val >> 40) -#define __swp_entry(type, off) ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) - -#define pte_to_pgoff(pte) (pte_val(pte) >> 32) -#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE }) - -#define PTE_FILE_MAX_BITS 32 - -#ifndef CONFIG_DISCONTIGMEM -#define kern_addr_valid(addr) (1) -#endif - -#define io_remap_pfn_range(vma, start, pfn, size, prot) \ - remap_pfn_range(vma, start, pfn, size, prot) - -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) - -extern void paging_init(void); - -#include - -/* - * No page table caches to initialise - */ -#define pgtable_cache_init() do { } while (0) - -/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ -#define HAVE_ARCH_UNMAPPED_AREA - -#endif /* _ALPHA_PGTABLE_H */ diff --git a/include/asm-alpha/poll.h b/include/asm-alpha/poll.h deleted file mode 100644 index c98509d3149..00000000000 --- a/include/asm-alpha/poll.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/include/asm-alpha/posix_types.h b/include/asm-alpha/posix_types.h deleted file mode 100644 index db167413300..00000000000 --- a/include/asm-alpha/posix_types.h +++ /dev/null @@ -1,123 +0,0 @@ -#ifndef _ALPHA_POSIX_TYPES_H -#define _ALPHA_POSIX_TYPES_H - -/* - * This file is generally used by user-level software, so you need to - * be a little careful about namespace pollution etc. Also, we cannot - * assume GCC is being used. - */ - -typedef unsigned int __kernel_ino_t; -typedef unsigned int __kernel_mode_t; -typedef unsigned int __kernel_nlink_t; -typedef long __kernel_off_t; -typedef long long __kernel_loff_t; -typedef int __kernel_pid_t; -typedef int __kernel_ipc_pid_t; -typedef unsigned int __kernel_uid_t; -typedef unsigned int __kernel_gid_t; -typedef unsigned long __kernel_size_t; -typedef long __kernel_ssize_t; -typedef long __kernel_ptrdiff_t; -typedef long __kernel_time_t; -typedef long __kernel_suseconds_t; -typedef long __kernel_clock_t; -typedef int __kernel_daddr_t; -typedef char * __kernel_caddr_t; -typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ -typedef unsigned short __kernel_uid16_t; -typedef unsigned short __kernel_gid16_t; -typedef int __kernel_clockid_t; -typedef int __kernel_timer_t; - -typedef struct { - int val[2]; -} __kernel_fsid_t; - -typedef __kernel_uid_t __kernel_old_uid_t; -typedef __kernel_gid_t __kernel_old_gid_t; -typedef __kernel_uid_t __kernel_uid32_t; -typedef __kernel_gid_t __kernel_gid32_t; - -typedef unsigned int __kernel_old_dev_t; - -#ifdef __KERNEL__ - -#ifndef __GNUC__ - -#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d)) -#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d)) -#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0) -#define __FD_ZERO(set) \ - ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set))) - -#else /* __GNUC__ */ - -/* With GNU C, use inline functions instead so args are evaluated only once: */ - -#undef __FD_SET -static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) -{ - unsigned long _tmp = fd / __NFDBITS; - unsigned long _rem = fd % __NFDBITS; - fdsetp->fds_bits[_tmp] |= (1UL<<_rem); -} - -#undef __FD_CLR -static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) -{ - unsigned long _tmp = fd / __NFDBITS; - unsigned long _rem = fd % __NFDBITS; - fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem); -} - -#undef __FD_ISSET -static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p) -{ - unsigned long _tmp = fd / __NFDBITS; - unsigned long _rem = fd % __NFDBITS; - return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0; -} - -/* - * This will unroll the loop for the normal constant case (8 ints, - * for a 256-bit fd_set) - */ -#undef __FD_ZERO -static __inline__ void __FD_ZERO(__kernel_fd_set *p) -{ - unsigned long *tmp = p->fds_bits; - int i; - - if (__builtin_constant_p(__FDSET_LONGS)) { - switch (__FDSET_LONGS) { - case 16: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; - tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; - tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; - return; - - case 8: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; - return; - - case 4: - tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; - return; - } - } - i = __FDSET_LONGS; - while (i) { - i--; - *tmp = 0; - tmp++; - } -} - -#endif /* __GNUC__ */ - -#endif /* __KERNEL__ */ - -#endif /* _ALPHA_POSIX_TYPES_H */ diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h deleted file mode 100644 index 94afe585930..00000000000 --- a/include/asm-alpha/processor.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * include/asm-alpha/processor.h - * - * Copyright (C) 1994 Linus Torvalds - */ - -#ifndef __ASM_ALPHA_PROCESSOR_H -#define __ASM_ALPHA_PROCESSOR_H - -#include /* for ADDR_LIMIT_32BIT */ - -/* - * Returns current instruction pointer ("program counter"). - */ -#define current_text_addr() \ - ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; }) - -/* - * We have a 42-bit user address space: 4TB user VM... - */ -#define TASK_SIZE (0x40000000000UL) - -#define STACK_TOP \ - (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) - -#define STACK_TOP_MAX 0x00120000000UL - -/* This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ -#define TASK_UNMAPPED_BASE \ - ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2) - -typedef struct { - unsigned long seg; -} mm_segment_t; - -/* This is dead. Everything has been moved to thread_info. */ -struct thread_struct { }; -#define INIT_THREAD { } - -/* Return saved PC of a blocked thread. */ -struct task_struct; -extern unsigned long thread_saved_pc(struct task_struct *); - -/* Do necessary setup to start up a newly executed thread. */ -extern void start_thread(struct pt_regs *, unsigned long, unsigned long); - -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - -/* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) - -/* Create a kernel thread without removing it from tasklists. */ -extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); - -unsigned long get_wchan(struct task_struct *p); - -#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) - -#define KSTK_ESP(tsk) \ - ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp) - -#define cpu_relax() barrier() - -#define ARCH_HAS_PREFETCH -#define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH - -#ifndef CONFIG_SMP -/* Nothing to prefetch. */ -#define spin_lock_prefetch(lock) do { } while (0) -#endif - -extern inline void prefetch(const void *ptr) -{ - __builtin_prefetch(ptr, 0, 3); -} - -extern inline void prefetchw(const void *ptr) -{ - __builtin_prefetch(ptr, 1, 3); -} - -#ifdef CONFIG_SMP -extern inline void spin_lock_prefetch(const void *ptr) -{ - __builtin_prefetch(ptr, 1, 3); -} -#endif - -#endif /* __ASM_ALPHA_PROCESSOR_H */ diff --git a/include/asm-alpha/ptrace.h b/include/asm-alpha/ptrace.h deleted file mode 100644 index 32c7a5cddd5..00000000000 --- a/include/asm-alpha/ptrace.h +++ /dev/null @@ -1,83 +0,0 @@ -#ifndef _ASMAXP_PTRACE_H -#define _ASMAXP_PTRACE_H - - -/* - * This struct defines the way the registers are stored on the - * kernel stack during a system call or other kernel entry - * - * NOTE! I want to minimize the overhead of system calls, so this - * struct has as little information as possible. I does not have - * - * - floating point regs: the kernel doesn't change those - * - r9-15: saved by the C compiler - * - * This makes "fork()" and "exec()" a bit more complex, but should - * give us low system call latency. - */ - -struct pt_regs { - unsigned long r0; - unsigned long r1; - unsigned long r2; - unsigned long r3; - unsigned long r4; - unsigned long r5; - unsigned long r6; - unsigned long r7; - unsigned long r8; - unsigned long r19; - unsigned long r20; - unsigned long r21; - unsigned long r22; - unsigned long r23; - unsigned long r24; - unsigned long r25; - unsigned long r26; - unsigned long r27; - unsigned long r28; - unsigned long hae; -/* JRP - These are the values provided to a0-a2 by PALcode */ - unsigned long trap_a0; - unsigned long trap_a1; - unsigned long trap_a2; -/* These are saved by PAL-code: */ - unsigned long ps; - unsigned long pc; - unsigned long gp; - unsigned long r16; - unsigned long r17; - unsigned long r18; -}; - -/* - * This is the extended stack used by signal handlers and the context - * switcher: it's pushed after the normal "struct pt_regs". - */ -struct switch_stack { - unsigned long r9; - unsigned long r10; - unsigned long r11; - unsigned long r12; - unsigned long r13; - unsigned long r14; - unsigned long r15; - unsigned long r26; - unsigned long fp[32]; /* fp[31] is fpcr */ -}; - -#ifdef __KERNEL__ - -#define user_mode(regs) (((regs)->ps & 8) != 0) -#define instruction_pointer(regs) ((regs)->pc) -#define profile_pc(regs) instruction_pointer(regs) -extern void show_regs(struct pt_regs *); - -#define task_pt_regs(task) \ - ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) - -#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0) - -#endif - -#endif diff --git a/include/asm-alpha/reg.h b/include/asm-alpha/reg.h deleted file mode 100644 index 86ff916fb06..00000000000 --- a/include/asm-alpha/reg.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef __reg_h__ -#define __reg_h__ - -/* - * Exception frame offsets. - */ -#define EF_V0 0 -#define EF_T0 1 -#define EF_T1 2 -#define EF_T2 3 -#define EF_T3 4 -#define EF_T4 5 -#define EF_T5 6 -#define EF_T6 7 -#define EF_T7 8 -#define EF_S0 9 -#define EF_S1 10 -#define EF_S2 11 -#define EF_S3 12 -#define EF_S4 13 -#define EF_S5 14 -#define EF_S6 15 -#define EF_A3 16 -#define EF_A4 17 -#define EF_A5 18 -#define EF_T8 19 -#define EF_T9 20 -#define EF_T10 21 -#define EF_T11 22 -#define EF_RA 23 -#define EF_T12 24 -#define EF_AT 25 -#define EF_SP 26 -#define EF_PS 27 -#define EF_PC 28 -#define EF_GP 29 -#define EF_A0 30 -#define EF_A1 31 -#define EF_A2 32 - -#define EF_SIZE (33*8) -#define HWEF_SIZE (6*8) /* size of PAL frame (PS-A2) */ - -#define EF_SSIZE (EF_SIZE - HWEF_SIZE) - -/* - * Map register number into core file offset. - */ -#define CORE_REG(reg, ubase) \ - (((unsigned long *)((unsigned long)(ubase)))[reg]) - -#endif /* __reg_h__ */ diff --git a/include/asm-alpha/regdef.h b/include/asm-alpha/regdef.h deleted file mode 100644 index 142df9c4f8b..00000000000 --- a/include/asm-alpha/regdef.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef __alpha_regdef_h__ -#define __alpha_regdef_h__ - -#define v0 $0 /* function return value */ - -#define t0 $1 /* temporary registers (caller-saved) */ -#define t1 $2 -#define t2 $3 -#define t3 $4 -#define t4 $5 -#define t5 $6 -#define t6 $7 -#define t7 $8 - -#define s0 $9 /* saved-registers (callee-saved registers) */ -#define s1 $10 -#define s2 $11 -#define s3 $12 -#define s4 $13 -#define s5 $14 -#define s6 $15 -#define fp s6 /* frame-pointer (s6 in frame-less procedures) */ - -#define a0 $16 /* argument registers (caller-saved) */ -#define a1 $17 -#define a2 $18 -#define a3 $19 -#define a4 $20 -#define a5 $21 - -#define t8 $22 /* more temps (caller-saved) */ -#define t9 $23 -#define t10 $24 -#define t11 $25 -#define ra $26 /* return address register */ -#define t12 $27 - -#define pv t12 /* procedure-variable register */ -#define AT $at /* assembler temporary */ -#define gp $29 /* global pointer */ -#define sp $30 /* stack pointer */ -#define zero $31 /* reads as zero, writes are noops */ - -#endif /* __alpha_regdef_h__ */ diff --git a/include/asm-alpha/resource.h b/include/asm-alpha/resource.h deleted file mode 100644 index c10874ff597..00000000000 --- a/include/asm-alpha/resource.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _ALPHA_RESOURCE_H -#define _ALPHA_RESOURCE_H - -/* - * Alpha/Linux-specific ordering of these four resource limit IDs, - * the rest comes from the generic header: - */ -#define RLIMIT_NOFILE 6 /* max number of open files */ -#define RLIMIT_AS 7 /* address space limit */ -#define RLIMIT_NPROC 8 /* max number of processes */ -#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ - -/* - * SuS says limits have to be unsigned. Fine, it's unsigned, but - * we retain the old value for compatibility, especially with DU. - * When you run into the 2^63 barrier, you call me. - */ -#define RLIM_INFINITY 0x7ffffffffffffffful - -#include - -#endif /* _ALPHA_RESOURCE_H */ diff --git a/include/asm-alpha/rtc.h b/include/asm-alpha/rtc.h deleted file mode 100644 index 4e854b1333e..00000000000 --- a/include/asm-alpha/rtc.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _ALPHA_RTC_H -#define _ALPHA_RTC_H - -/* - * Alpha uses the default access methods for the RTC. - */ - -#include - -#endif diff --git a/include/asm-alpha/rwsem.h b/include/asm-alpha/rwsem.h deleted file mode 100644 index 1570c0b5433..00000000000 --- a/include/asm-alpha/rwsem.h +++ /dev/null @@ -1,259 +0,0 @@ -#ifndef _ALPHA_RWSEM_H -#define _ALPHA_RWSEM_H - -/* - * Written by Ivan Kokshaysky , 2001. - * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h - */ - -#ifndef _LINUX_RWSEM_H -#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" -#endif - -#ifdef __KERNEL__ - -#include -#include -#include - -struct rwsem_waiter; - -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -/* - * the semaphore definition - */ -struct rw_semaphore { - long count; -#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L -#define RWSEM_ACTIVE_BIAS 0x0000000000000001L -#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL -#define RWSEM_WAITING_BIAS (-0x0000000100000000L) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct list_head wait_list; -}; - -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - LIST_HEAD_INIT((name).wait_list) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} - -static inline void __down_read(struct rw_semaphore *sem) -{ - long oldcount; -#ifndef CONFIG_SMP - oldcount = sem->count; - sem->count += RWSEM_ACTIVE_READ_BIAS; -#else - long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - " mb\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) - :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); -#endif - if (unlikely(oldcount < 0)) - rwsem_down_read_failed(sem); -} - -/* - * trylock for reading -- returns 1 if successful, 0 if contention - */ -static inline int __down_read_trylock(struct rw_semaphore *sem) -{ - long old, new, res; - - res = sem->count; - do { - new = res + RWSEM_ACTIVE_READ_BIAS; - if (new <= 0) - break; - old = res; - res = cmpxchg(&sem->count, old, new); - } while (res != old); - return res >= 0 ? 1 : 0; -} - -static inline void __down_write(struct rw_semaphore *sem) -{ - long oldcount; -#ifndef CONFIG_SMP - oldcount = sem->count; - sem->count += RWSEM_ACTIVE_WRITE_BIAS; -#else - long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - " mb\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) - :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); -#endif - if (unlikely(oldcount)) - rwsem_down_write_failed(sem); -} - -/* - * trylock for writing -- returns 1 if successful, 0 if contention - */ -static inline int __down_write_trylock(struct rw_semaphore *sem) -{ - long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, - RWSEM_ACTIVE_WRITE_BIAS); - if (ret == RWSEM_UNLOCKED_VALUE) - return 1; - return 0; -} - -static inline void __up_read(struct rw_semaphore *sem) -{ - long oldcount; -#ifndef CONFIG_SMP - oldcount = sem->count; - sem->count -= RWSEM_ACTIVE_READ_BIAS; -#else - long temp; - __asm__ __volatile__( - " mb\n" - "1: ldq_l %0,%1\n" - " subq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) - :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); -#endif - if (unlikely(oldcount < 0)) - if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0) - rwsem_wake(sem); -} - -static inline void __up_write(struct rw_semaphore *sem) -{ - long count; -#ifndef CONFIG_SMP - sem->count -= RWSEM_ACTIVE_WRITE_BIAS; - count = sem->count; -#else - long temp; - __asm__ __volatile__( - " mb\n" - "1: ldq_l %0,%1\n" - " subq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - " subq %0,%3,%0\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (count), "=m" (sem->count), "=&r" (temp) - :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); -#endif - if (unlikely(count)) - if ((int)count == 0) - rwsem_wake(sem); -} - -/* - * downgrade write lock to read lock - */ -static inline void __downgrade_write(struct rw_semaphore *sem) -{ - long oldcount; -#ifndef CONFIG_SMP - oldcount = sem->count; - sem->count -= RWSEM_WAITING_BIAS; -#else - long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - " mb\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) - :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory"); -#endif - if (unlikely(oldcount < 0)) - rwsem_downgrade_wake(sem); -} - -static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem) -{ -#ifndef CONFIG_SMP - sem->count += val; -#else - long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%2,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (sem->count) - :"Ir" (val), "m" (sem->count)); -#endif -} - -static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem) -{ -#ifndef CONFIG_SMP - sem->count += val; - return sem->count; -#else - long ret, temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " addq %0,%3,%0\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (ret), "=m" (sem->count), "=&r" (temp) - :"Ir" (val), "m" (sem->count)); - - return ret; -#endif -} - -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - -#endif /* __KERNEL__ */ -#endif /* _ALPHA_RWSEM_H */ diff --git a/include/asm-alpha/scatterlist.h b/include/asm-alpha/scatterlist.h deleted file mode 100644 index 440747ca634..00000000000 --- a/include/asm-alpha/scatterlist.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _ALPHA_SCATTERLIST_H -#define _ALPHA_SCATTERLIST_H - -#include -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - - unsigned int length; - - dma_addr_t dma_address; - __u32 dma_length; -}; - -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->dma_length) - -#define ISA_DMA_THRESHOLD (~0UL) - -#endif /* !(_ALPHA_SCATTERLIST_H) */ diff --git a/include/asm-alpha/sections.h b/include/asm-alpha/sections.h deleted file mode 100644 index 43b40edd6e4..00000000000 --- a/include/asm-alpha/sections.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ALPHA_SECTIONS_H -#define _ALPHA_SECTIONS_H - -/* nothing to see, move along */ -#include - -#endif diff --git a/include/asm-alpha/segment.h b/include/asm-alpha/segment.h deleted file mode 100644 index 0453d97daae..00000000000 --- a/include/asm-alpha/segment.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ALPHA_SEGMENT_H -#define __ALPHA_SEGMENT_H - -/* Only here because we have some old header files that expect it.. */ - -#endif diff --git a/include/asm-alpha/sembuf.h b/include/asm-alpha/sembuf.h deleted file mode 100644 index 7b38b153478..00000000000 --- a/include/asm-alpha/sembuf.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _ALPHA_SEMBUF_H -#define _ALPHA_SEMBUF_H - -/* - * The semid64_ds structure for alpha architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ - __kernel_time_t sem_ctime; /* last change time */ - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* _ALPHA_SEMBUF_H */ diff --git a/include/asm-alpha/serial.h b/include/asm-alpha/serial.h deleted file mode 100644 index 9d263e8d8cc..00000000000 --- a/include/asm-alpha/serial.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * include/asm-alpha/serial.h - */ - - -/* - * This assumes you have a 1.8432 MHz clock for your UART. - * - * It'd be nice if someone built a serial card with a 24.576 MHz - * clock, since the 16550A is capable of handling a top speed of 1.5 - * megabits/second; but this requires the faster clock. - */ -#define BASE_BAUD ( 1843200 / 16 ) - -/* Standard COM flags (except for COM4, because of the 8514 problem) */ -#ifdef CONFIG_SERIAL_DETECT_IRQ -#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) -#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) -#else -#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) -#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF -#endif - -#define SERIAL_PORT_DFNS \ - /* UART CLK PORT IRQ FLAGS */ \ - { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ - { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ - { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ - { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ diff --git a/include/asm-alpha/setup.h b/include/asm-alpha/setup.h deleted file mode 100644 index 2e023a4aa31..00000000000 --- a/include/asm-alpha/setup.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ALPHA_SETUP_H -#define __ALPHA_SETUP_H - -#define COMMAND_LINE_SIZE 256 - -#endif diff --git a/include/asm-alpha/sfp-machine.h b/include/asm-alpha/sfp-machine.h deleted file mode 100644 index 5fe63afbd47..00000000000 --- a/include/asm-alpha/sfp-machine.h +++ /dev/null @@ -1,82 +0,0 @@ -/* Machine-dependent software floating-point definitions. - Alpha kernel version. - Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Richard Henderson (rth@cygnus.com), - Jakub Jelinek (jakub@redhat.com) and - David S. Miller (davem@redhat.com). - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public License as - published by the Free Software Foundation; either version 2 of the - License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with the GNU C Library; see the file COPYING.LIB. If - not, write to the Free Software Foundation, Inc., - 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ - -#ifndef _SFP_MACHINE_H -#define _SFP_MACHINE_H - -#define _FP_W_TYPE_SIZE 64 -#define _FP_W_TYPE unsigned long -#define _FP_WS_TYPE signed long -#define _FP_I_TYPE long - -#define _FP_MUL_MEAT_S(R,X,Y) \ - _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y) -#define _FP_MUL_MEAT_D(R,X,Y) \ - _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) -#define _FP_MUL_MEAT_Q(R,X,Y) \ - _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) - -#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) -#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y) -#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y) - -#define _FP_NANFRAC_S _FP_QNANBIT_S -#define _FP_NANFRAC_D _FP_QNANBIT_D -#define _FP_NANFRAC_Q _FP_QNANBIT_Q -#define _FP_NANSIGN_S 1 -#define _FP_NANSIGN_D 1 -#define _FP_NANSIGN_Q 1 - -#define _FP_KEEPNANFRACP 1 - -/* Alpha Architecture Handbook, 4.7.10.4 sais that - * we should prefer any type of NaN in Fb, then Fa. - */ -#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ - do { \ - R##_s = Y##_s; \ - _FP_FRAC_COPY_##wc(R,X); \ - R##_c = FP_CLS_NAN; \ - } while (0) - -/* Obtain the current rounding mode. */ -#define FP_ROUNDMODE mode -#define FP_RND_NEAREST (FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT) -#define FP_RND_ZERO (FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT) -#define FP_RND_PINF (FPCR_DYN_PLUS >> FPCR_DYN_SHIFT) -#define FP_RND_MINF (FPCR_DYN_MINUS >> FPCR_DYN_SHIFT) - -/* Exception flags. */ -#define FP_EX_INVALID IEEE_TRAP_ENABLE_INV -#define FP_EX_OVERFLOW IEEE_TRAP_ENABLE_OVF -#define FP_EX_UNDERFLOW IEEE_TRAP_ENABLE_UNF -#define FP_EX_DIVZERO IEEE_TRAP_ENABLE_DZE -#define FP_EX_INEXACT IEEE_TRAP_ENABLE_INE -#define FP_EX_DENORM IEEE_TRAP_ENABLE_DNO - -#define FP_DENORM_ZERO (swcr & IEEE_MAP_DMZ) - -/* We write the results always */ -#define FP_INHIBIT_RESULTS 0 - -#endif diff --git a/include/asm-alpha/shmbuf.h b/include/asm-alpha/shmbuf.h deleted file mode 100644 index 37ee84f0508..00000000000 --- a/include/asm-alpha/shmbuf.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef _ALPHA_SHMBUF_H -#define _ALPHA_SHMBUF_H - -/* - * The shmid64_ds structure for alpha architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - __kernel_time_t shm_dtime; /* last detach time */ - __kernel_time_t shm_ctime; /* last change time */ - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused1; - unsigned long __unused2; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _ALPHA_SHMBUF_H */ diff --git a/include/asm-alpha/shmparam.h b/include/asm-alpha/shmparam.h deleted file mode 100644 index cc901d58aeb..00000000000 --- a/include/asm-alpha/shmparam.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASMAXP_SHMPARAM_H -#define _ASMAXP_SHMPARAM_H - -#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ - -#endif /* _ASMAXP_SHMPARAM_H */ diff --git a/include/asm-alpha/sigcontext.h b/include/asm-alpha/sigcontext.h deleted file mode 100644 index 323cdb02619..00000000000 --- a/include/asm-alpha/sigcontext.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef _ASMAXP_SIGCONTEXT_H -#define _ASMAXP_SIGCONTEXT_H - -struct sigcontext { - /* - * What should we have here? I'd probably better use the same - * stack layout as OSF/1, just in case we ever want to try - * running their binaries.. - * - * This is the basic layout, but I don't know if we'll ever - * actually fill in all the values.. - */ - long sc_onstack; - long sc_mask; - long sc_pc; - long sc_ps; - long sc_regs[32]; - long sc_ownedfp; - long sc_fpregs[32]; - unsigned long sc_fpcr; - unsigned long sc_fp_control; - unsigned long sc_reserved1, sc_reserved2; - unsigned long sc_ssize; - char * sc_sbase; - unsigned long sc_traparg_a0; - unsigned long sc_traparg_a1; - unsigned long sc_traparg_a2; - unsigned long sc_fp_trap_pc; - unsigned long sc_fp_trigger_sum; - unsigned long sc_fp_trigger_inst; -}; - - -#endif diff --git a/include/asm-alpha/siginfo.h b/include/asm-alpha/siginfo.h deleted file mode 100644 index 9822362a842..00000000000 --- a/include/asm-alpha/siginfo.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _ALPHA_SIGINFO_H -#define _ALPHA_SIGINFO_H - -#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#define __ARCH_SI_TRAPNO - -#include - -#endif diff --git a/include/asm-alpha/signal.h b/include/asm-alpha/signal.h deleted file mode 100644 index 13c2305d35e..00000000000 --- a/include/asm-alpha/signal.h +++ /dev/null @@ -1,172 +0,0 @@ -#ifndef _ASMAXP_SIGNAL_H -#define _ASMAXP_SIGNAL_H - -#include - -/* Avoid too many header ordering problems. */ -struct siginfo; - -#ifdef __KERNEL__ -/* Digital Unix defines 64 signals. Most things should be clean enough - to redefine this at will, if care is taken to make libc match. */ - -#define _NSIG 64 -#define _NSIG_BPW 64 -#define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -typedef unsigned long old_sigset_t; /* at least 32 bits */ - -typedef struct { - unsigned long sig[_NSIG_WORDS]; -} sigset_t; - -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -#define NSIG 32 -typedef unsigned long sigset_t; - -#endif /* __KERNEL__ */ - - -/* - * Linux/AXP has different signal numbers that Linux/i386: I'm trying - * to make it OSF/1 binary compatible, at least for normal binaries. - */ -#define SIGHUP 1 -#define SIGINT 2 -#define SIGQUIT 3 -#define SIGILL 4 -#define SIGTRAP 5 -#define SIGABRT 6 -#define SIGEMT 7 -#define SIGFPE 8 -#define SIGKILL 9 -#define SIGBUS 10 -#define SIGSEGV 11 -#define SIGSYS 12 -#define SIGPIPE 13 -#define SIGALRM 14 -#define SIGTERM 15 -#define SIGURG 16 -#define SIGSTOP 17 -#define SIGTSTP 18 -#define SIGCONT 19 -#define SIGCHLD 20 -#define SIGTTIN 21 -#define SIGTTOU 22 -#define SIGIO 23 -#define SIGXCPU 24 -#define SIGXFSZ 25 -#define SIGVTALRM 26 -#define SIGPROF 27 -#define SIGWINCH 28 -#define SIGINFO 29 -#define SIGUSR1 30 -#define SIGUSR2 31 - -#define SIGPOLL SIGIO -#define SIGPWR SIGINFO -#define SIGIOT SIGABRT - -/* These should not be considered constants from userland. */ -#define SIGRTMIN 32 -#define SIGRTMAX _NSIG - -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ - -#define SA_ONSTACK 0x00000001 -#define SA_RESTART 0x00000002 -#define SA_NOCLDSTOP 0x00000004 -#define SA_NODEFER 0x00000008 -#define SA_RESETHAND 0x00000010 -#define SA_NOCLDWAIT 0x00000020 -#define SA_SIGINFO 0x00000040 - -#define SA_ONESHOT SA_RESETHAND -#define SA_NOMASK SA_NODEFER - -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - -#define MINSIGSTKSZ 4096 -#define SIGSTKSZ 16384 - -#define SIG_BLOCK 1 /* for blocking signals */ -#define SIG_UNBLOCK 2 /* for unblocking signals */ -#define SIG_SETMASK 3 /* for setting the signal mask */ - -#include - -#ifdef __KERNEL__ -struct osf_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - int sa_flags; -}; - -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; - __sigrestore_t ka_restorer; -}; -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -struct sigaction { - union { - __sighandler_t _sa_handler; - void (*_sa_sigaction)(int, struct siginfo *, void *); - } _u; - sigset_t sa_mask; - int sa_flags; -}; - -#define sa_handler _u._sa_handler -#define sa_sigaction _u._sa_sigaction - -#endif /* __KERNEL__ */ - -typedef struct sigaltstack { - void __user *ss_sp; - int ss_flags; - size_t ss_size; -} stack_t; - -/* sigstack(2) is deprecated, and will be withdrawn in a future version - of the X/Open CAE Specification. Use sigaltstack instead. It is only - implemented here for OSF/1 compatibility. */ - -struct sigstack { - void __user *ss_sp; - int ss_onstack; -}; - -#ifdef __KERNEL__ -#include - -#define ptrace_signal_deliver(regs, cookie) do { } while (0) - -#endif - -#endif diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h deleted file mode 100644 index 544c69af816..00000000000 --- a/include/asm-alpha/smp.h +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - -#include -#include -#include -#include - -/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */ - -static __inline__ unsigned char -__hard_smp_processor_id(void) -{ - register unsigned char __r0 __asm__("$0"); - __asm__ __volatile__( - "call_pal %1 #whami" - : "=r"(__r0) - :"i" (PAL_whami) - : "$1", "$22", "$23", "$24", "$25"); - return __r0; -} - -#ifdef CONFIG_SMP - -#include - -struct cpuinfo_alpha { - unsigned long loops_per_jiffy; - unsigned long last_asn; - int need_new_asn; - int asn_lock; - unsigned long ipi_count; - unsigned long prof_multiplier; - unsigned long prof_counter; - unsigned char mcheck_expected; - unsigned char mcheck_taken; - unsigned char mcheck_extra; -} __attribute__((aligned(64))); - -extern struct cpuinfo_alpha cpu_data[NR_CPUS]; - -#define PROC_CHANGE_PENALTY 20 - -#define hard_smp_processor_id() __hard_smp_processor_id() -#define raw_smp_processor_id() (current_thread_info()->cpu) - -extern int smp_num_cpus; -#define cpu_possible_map cpu_present_map - -extern void arch_send_call_function_single_ipi(int cpu); -extern void arch_send_call_function_ipi(cpumask_t mask); - -#else /* CONFIG_SMP */ - -#define hard_smp_processor_id() 0 -#define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; }) - -#endif /* CONFIG_SMP */ - -#define NO_PROC_ID (-1) - -#endif diff --git a/include/asm-alpha/socket.h b/include/asm-alpha/socket.h deleted file mode 100644 index a1057c2d95e..00000000000 --- a/include/asm-alpha/socket.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef _ASM_SOCKET_H -#define _ASM_SOCKET_H - -#include - -/* For setsockopt(2) */ -/* - * Note: we only bother about making the SOL_SOCKET options - * same as OSF/1, as that's all that "normal" programs are - * likely to set. We don't necessarily want to be binary - * compatible with _everything_. - */ -#define SOL_SOCKET 0xffff - -#define SO_DEBUG 0x0001 -#define SO_REUSEADDR 0x0004 -#define SO_KEEPALIVE 0x0008 -#define SO_DONTROUTE 0x0010 -#define SO_BROADCAST 0x0020 -#define SO_LINGER 0x0080 -#define SO_OOBINLINE 0x0100 -/* To add :#define SO_REUSEPORT 0x0200 */ - -#define SO_TYPE 0x1008 -#define SO_ERROR 0x1007 -#define SO_SNDBUF 0x1001 -#define SO_RCVBUF 0x1002 -#define SO_SNDBUFFORCE 0x100a -#define SO_RCVBUFFORCE 0x100b -#define SO_RCVLOWAT 0x1010 -#define SO_SNDLOWAT 0x1011 -#define SO_RCVTIMEO 0x1012 -#define SO_SNDTIMEO 0x1013 -#define SO_ACCEPTCONN 0x1014 - -/* linux-specific, might as well be the same as on i386 */ -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_BSDCOMPAT 14 - -#define SO_PASSCRED 17 -#define SO_PEERCRED 18 -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_PEERSEC 30 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 19 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 20 -#define SO_SECURITY_ENCRYPTION_NETWORK 21 - -#define SO_MARK 36 - -/* O_NONBLOCK clashes with the bits used for socket types. Therefore we - * have to define SOCK_NONBLOCK to a different value here. - */ -#define SOCK_NONBLOCK 0x40000000 - -#endif /* _ASM_SOCKET_H */ diff --git a/include/asm-alpha/sockios.h b/include/asm-alpha/sockios.h deleted file mode 100644 index 7932c7ab4a4..00000000000 --- a/include/asm-alpha/sockios.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _ASM_ALPHA_SOCKIOS_H -#define _ASM_ALPHA_SOCKIOS_H - -/* Socket-level I/O control calls. */ - -#define FIOGETOWN _IOR('f', 123, int) -#define FIOSETOWN _IOW('f', 124, int) - -#define SIOCATMARK _IOR('s', 7, int) -#define SIOCSPGRP _IOW('s', 8, pid_t) -#define SIOCGPGRP _IOR('s', 9, pid_t) - -#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ -#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ - -#endif /* _ASM_ALPHA_SOCKIOS_H */ diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h deleted file mode 100644 index aeeb125f685..00000000000 --- a/include/asm-alpha/spinlock.h +++ /dev/null @@ -1,173 +0,0 @@ -#ifndef _ALPHA_SPINLOCK_H -#define _ALPHA_SPINLOCK_H - -#include -#include -#include - -/* - * Simple spin lock operations. There are two variants, one clears IRQ's - * on the local processor, one does not. - * - * We make no fairness assumptions. They have a cost. - */ - -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(x) \ - do { cpu_relax(); } while ((x)->lock) - -static inline void __raw_spin_unlock(raw_spinlock_t * lock) -{ - mb(); - lock->lock = 0; -} - -static inline void __raw_spin_lock(raw_spinlock_t * lock) -{ - long tmp; - - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " bne %0,2f\n" - " lda %0,1\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - " mb\n" - ".subsection 2\n" - "2: ldl %0,%1\n" - " bne %0,2b\n" - " br 1b\n" - ".previous" - : "=&r" (tmp), "=m" (lock->lock) - : "m"(lock->lock) : "memory"); -} - -static inline int __raw_spin_trylock(raw_spinlock_t *lock) -{ - return !test_and_set_bit(0, &lock->lock); -} - -/***********************************************************/ - -static inline int __raw_read_can_lock(raw_rwlock_t *lock) -{ - return (lock->lock & 1) == 0; -} - -static inline int __raw_write_can_lock(raw_rwlock_t *lock) -{ - return lock->lock == 0; -} - -static inline void __raw_read_lock(raw_rwlock_t *lock) -{ - long regx; - - __asm__ __volatile__( - "1: ldl_l %1,%0\n" - " blbs %1,6f\n" - " subl %1,2,%1\n" - " stl_c %1,%0\n" - " beq %1,6f\n" - " mb\n" - ".subsection 2\n" - "6: ldl %1,%0\n" - " blbs %1,6b\n" - " br 1b\n" - ".previous" - : "=m" (*lock), "=&r" (regx) - : "m" (*lock) : "memory"); -} - -static inline void __raw_write_lock(raw_rwlock_t *lock) -{ - long regx; - - __asm__ __volatile__( - "1: ldl_l %1,%0\n" - " bne %1,6f\n" - " lda %1,1\n" - " stl_c %1,%0\n" - " beq %1,6f\n" - " mb\n" - ".subsection 2\n" - "6: ldl %1,%0\n" - " bne %1,6b\n" - " br 1b\n" - ".previous" - : "=m" (*lock), "=&r" (regx) - : "m" (*lock) : "memory"); -} - -static inline int __raw_read_trylock(raw_rwlock_t * lock) -{ - long regx; - int success; - - __asm__ __volatile__( - "1: ldl_l %1,%0\n" - " lda %2,0\n" - " blbs %1,2f\n" - " subl %1,2,%2\n" - " stl_c %2,%0\n" - " beq %2,6f\n" - "2: mb\n" - ".subsection 2\n" - "6: br 1b\n" - ".previous" - : "=m" (*lock), "=&r" (regx), "=&r" (success) - : "m" (*lock) : "memory"); - - return success; -} - -static inline int __raw_write_trylock(raw_rwlock_t * lock) -{ - long regx; - int success; - - __asm__ __volatile__( - "1: ldl_l %1,%0\n" - " lda %2,0\n" - " bne %1,2f\n" - " lda %2,1\n" - " stl_c %2,%0\n" - " beq %2,6f\n" - "2: mb\n" - ".subsection 2\n" - "6: br 1b\n" - ".previous" - : "=m" (*lock), "=&r" (regx), "=&r" (success) - : "m" (*lock) : "memory"); - - return success; -} - -static inline void __raw_read_unlock(raw_rwlock_t * lock) -{ - long regx; - __asm__ __volatile__( - " mb\n" - "1: ldl_l %1,%0\n" - " addl %1,2,%1\n" - " stl_c %1,%0\n" - " beq %1,6f\n" - ".subsection 2\n" - "6: br 1b\n" - ".previous" - : "=m" (*lock), "=&r" (regx) - : "m" (*lock) : "memory"); -} - -static inline void __raw_write_unlock(raw_rwlock_t * lock) -{ - mb(); - lock->lock = 0; -} - -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() - -#endif /* _ALPHA_SPINLOCK_H */ diff --git a/include/asm-alpha/spinlock_types.h b/include/asm-alpha/spinlock_types.h deleted file mode 100644 index 8141eb5ebf0..00000000000 --- a/include/asm-alpha/spinlock_types.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _ALPHA_SPINLOCK_TYPES_H -#define _ALPHA_SPINLOCK_TYPES_H - -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - -typedef struct { - volatile unsigned int lock; -} raw_spinlock_t; - -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } - -typedef struct { - volatile unsigned int lock; -} raw_rwlock_t; - -#define __RAW_RW_LOCK_UNLOCKED { 0 } - -#endif diff --git a/include/asm-alpha/stat.h b/include/asm-alpha/stat.h deleted file mode 100644 index 07ad3e6b3f3..00000000000 --- a/include/asm-alpha/stat.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef _ALPHA_STAT_H -#define _ALPHA_STAT_H - -struct stat { - unsigned int st_dev; - unsigned int st_ino; - unsigned int st_mode; - unsigned int st_nlink; - unsigned int st_uid; - unsigned int st_gid; - unsigned int st_rdev; - long st_size; - unsigned long st_atime; - unsigned long st_mtime; - unsigned long st_ctime; - unsigned int st_blksize; - unsigned int st_blocks; - unsigned int st_flags; - unsigned int st_gen; -}; - -/* The stat64 structure increases the size of dev_t, blkcnt_t, adds - nanosecond resolution times, and padding for expansion. */ - -struct stat64 { - unsigned long st_dev; - unsigned long st_ino; - unsigned long st_rdev; - long st_size; - unsigned long st_blocks; - - unsigned int st_mode; - unsigned int st_uid; - unsigned int st_gid; - unsigned int st_blksize; - unsigned int st_nlink; - unsigned int __pad0; - - unsigned long st_atime; - unsigned long st_atime_nsec; - unsigned long st_mtime; - unsigned long st_mtime_nsec; - unsigned long st_ctime; - unsigned long st_ctime_nsec; - long __unused[3]; -}; - -#endif diff --git a/include/asm-alpha/statfs.h b/include/asm-alpha/statfs.h deleted file mode 100644 index ad15830baef..00000000000 --- a/include/asm-alpha/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ALPHA_STATFS_H -#define _ALPHA_STATFS_H - -#include - -#endif diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h deleted file mode 100644 index b02b8a28294..00000000000 --- a/include/asm-alpha/string.h +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef __ALPHA_STRING_H__ -#define __ALPHA_STRING_H__ - -#ifdef __KERNEL__ - -/* - * GCC of any recent vintage doesn't do stupid things with bcopy. - * EGCS 1.1 knows all about expanding memcpy inline, others don't. - * - * Similarly for a memset with data = 0. - */ - -#define __HAVE_ARCH_MEMCPY -extern void * memcpy(void *, const void *, size_t); -#define __HAVE_ARCH_MEMMOVE -extern void * memmove(void *, const void *, size_t); - -/* For backward compatibility with modules. Unused otherwise. */ -extern void * __memcpy(void *, const void *, size_t); - -#define memcpy __builtin_memcpy - -#define __HAVE_ARCH_MEMSET -extern void * __constant_c_memset(void *, unsigned long, size_t); -extern void * __memset(void *, int, size_t); -extern void * memset(void *, int, size_t); - -#define memset(s, c, n) \ -(__builtin_constant_p(c) \ - ? (__builtin_constant_p(n) && (c) == 0 \ - ? __builtin_memset((s),0,(n)) \ - : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \ - : __memset((s),(c),(n))) - -#define __HAVE_ARCH_STRCPY -extern char * strcpy(char *,const char *); -#define __HAVE_ARCH_STRNCPY -extern char * strncpy(char *, const char *, size_t); -#define __HAVE_ARCH_STRCAT -extern char * strcat(char *, const char *); -#define __HAVE_ARCH_STRNCAT -extern char * strncat(char *, const char *, size_t); -#define __HAVE_ARCH_STRCHR -extern char * strchr(const char *,int); -#define __HAVE_ARCH_STRRCHR -extern char * strrchr(const char *,int); -#define __HAVE_ARCH_STRLEN -extern size_t strlen(const char *); -#define __HAVE_ARCH_MEMCHR -extern void * memchr(const void *, int, size_t); - -/* The following routine is like memset except that it writes 16-bit - aligned values. The DEST and COUNT parameters must be even for - correct operation. */ - -#define __HAVE_ARCH_MEMSETW -extern void * __memsetw(void *dest, unsigned short, size_t count); - -#define memsetw(s, c, n) \ -(__builtin_constant_p(c) \ - ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \ - : __memsetw((s),(c),(n))) - -#endif /* __KERNEL__ */ - -#endif /* __ALPHA_STRING_H__ */ diff --git a/include/asm-alpha/suspend.h b/include/asm-alpha/suspend.h deleted file mode 100644 index c7042d57585..00000000000 --- a/include/asm-alpha/suspend.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ALPHA_SUSPEND_H -#define __ALPHA_SUSPEND_H - -/* Dummy include. */ - -#endif /* __ALPHA_SUSPEND_H */ diff --git a/include/asm-alpha/sysinfo.h b/include/asm-alpha/sysinfo.h deleted file mode 100644 index 086aba284df..00000000000 --- a/include/asm-alpha/sysinfo.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * include/asm-alpha/sysinfo.h - */ - -#ifndef __ASM_ALPHA_SYSINFO_H -#define __ASM_ALPHA_SYSINFO_H - -/* This defines the subset of the OSF/1 getsysinfo/setsysinfo calls - that we support. */ - -#define GSI_UACPROC 8 -#define GSI_IEEE_FP_CONTROL 45 -#define GSI_IEEE_STATE_AT_SIGNAL 46 -#define GSI_PROC_TYPE 60 -#define GSI_GET_HWRPB 101 - -#define SSI_NVPAIRS 1 -#define SSI_IEEE_FP_CONTROL 14 -#define SSI_IEEE_STATE_AT_SIGNAL 15 -#define SSI_IEEE_IGNORE_STATE_AT_SIGNAL 16 -#define SSI_IEEE_RAISE_EXCEPTION 1001 /* linux specific */ - -#define SSIN_UACPROC 6 - -#define UAC_BITMASK 7 -#define UAC_NOPRINT 1 -#define UAC_NOFIX 2 -#define UAC_SIGBUS 4 - - -#ifdef __KERNEL__ - -/* This is the shift that is applied to the UAC bits as stored in the - per-thread flags. See thread_info.h. */ -#define UAC_SHIFT 6 - -#endif - -#endif /* __ASM_ALPHA_SYSINFO_H */ diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h deleted file mode 100644 index afe20fa58c9..00000000000 --- a/include/asm-alpha/system.h +++ /dev/null @@ -1,829 +0,0 @@ -#ifndef __ALPHA_SYSTEM_H -#define __ALPHA_SYSTEM_H - -#include -#include -#include - -/* - * System defines.. Note that this is included both from .c and .S - * files, so it does only defines, not any C code. - */ - -/* - * We leave one page for the initial stack page, and one page for - * the initial process structure. Also, the console eats 3 MB for - * the initial bootloader (one of which we can reclaim later). - */ -#define BOOT_PCB 0x20000000 -#define BOOT_ADDR 0x20000000 -/* Remove when official MILO sources have ELF support: */ -#define BOOT_SIZE (16*1024) - -#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS -#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ -#else -#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ -#endif - -#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) -#define SWAPPER_PGD KERNEL_START -#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) -#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) -#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) -#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) - -#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) - -/* - * This is setup by the secondary bootstrap loader. Because - * the zero page is zeroed out as soon as the vm system is - * initialized, we need to copy things out into a more permanent - * place. - */ -#define PARAM ZERO_PGE -#define COMMAND_LINE ((char*)(PARAM + 0x0000)) -#define INITRD_START (*(unsigned long *) (PARAM+0x100)) -#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) - -#ifndef __ASSEMBLY__ -#include -#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */ - -/* - * This is the logout header that should be common to all platforms - * (assuming they are running OSF/1 PALcode, I guess). - */ -struct el_common { - unsigned int size; /* size in bytes of logout area */ - unsigned int sbz1 : 30; /* should be zero */ - unsigned int err2 : 1; /* second error */ - unsigned int retry : 1; /* retry flag */ - unsigned int proc_offset; /* processor-specific offset */ - unsigned int sys_offset; /* system-specific offset */ - unsigned int code; /* machine check code */ - unsigned int frame_rev; /* frame revision */ -}; - -/* Machine Check Frame for uncorrectable errors (Large format) - * --- This is used to log uncorrectable errors such as - * double bit ECC errors. - * --- These errors are detected by both processor and systems. - */ -struct el_common_EV5_uncorrectable_mcheck { - unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ - unsigned long paltemp[24]; /* PAL TEMP REGS. */ - unsigned long exc_addr; /* Address of excepting instruction*/ - unsigned long exc_sum; /* Summary of arithmetic traps. */ - unsigned long exc_mask; /* Exception mask (from exc_sum). */ - unsigned long pal_base; /* Base address for PALcode. */ - unsigned long isr; /* Interrupt Status Reg. */ - unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ - unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity - <12> set TAG parity*/ - unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: - <2> Data error in bank 0 - <3> Data error in bank 1 - <4> Tag error in bank 0 - <5> Tag error in bank 1 */ - unsigned long va; /* Effective VA of fault or miss. */ - unsigned long mm_stat; /* Holds the reason for D-stream - fault or D-cache parity errors */ - unsigned long sc_addr; /* Address that was being accessed - when EV5 detected Secondary cache - failure. */ - unsigned long sc_stat; /* Helps determine if the error was - TAG/Data parity(Secondary Cache)*/ - unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ - unsigned long ei_addr; /* Physical address of any transfer - that is logged in EV5 EI_STAT */ - unsigned long fill_syndrome; /* For correcting ECC errors. */ - unsigned long ei_stat; /* Helps identify reason of any - processor uncorrectable error - at its external interface. */ - unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ -}; - -struct el_common_EV6_mcheck { - unsigned int FrameSize; /* Bytes, including this field */ - unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ - unsigned int CpuOffset; /* Offset to CPU-specific info */ - unsigned int SystemOffset; /* Offset to system-specific info */ - unsigned int MCHK_Code; - unsigned int MCHK_Frame_Rev; - unsigned long I_STAT; /* EV6 Internal Processor Registers */ - unsigned long DC_STAT; /* (See the 21264 Spec) */ - unsigned long C_ADDR; - unsigned long DC1_SYNDROME; - unsigned long DC0_SYNDROME; - unsigned long C_STAT; - unsigned long C_STS; - unsigned long MM_STAT; - unsigned long EXC_ADDR; - unsigned long IER_CM; - unsigned long ISUM; - unsigned long RESERVED0; - unsigned long PAL_BASE; - unsigned long I_CTL; - unsigned long PCTX; -}; - -extern void halt(void) __attribute__((noreturn)); -#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) - -#define switch_to(P,N,L) \ - do { \ - (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \ - check_mmu_context(); \ - } while (0) - -struct task_struct; -extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); - -#define imb() \ -__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") - -#define draina() \ -__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") - -enum implver_enum { - IMPLVER_EV4, - IMPLVER_EV5, - IMPLVER_EV6 -}; - -#ifdef CONFIG_ALPHA_GENERIC -#define implver() \ -({ unsigned long __implver; \ - __asm__ ("implver %0" : "=r"(__implver)); \ - (enum implver_enum) __implver; }) -#else -/* Try to eliminate some dead code. */ -#ifdef CONFIG_ALPHA_EV4 -#define implver() IMPLVER_EV4 -#endif -#ifdef CONFIG_ALPHA_EV5 -#define implver() IMPLVER_EV5 -#endif -#if defined(CONFIG_ALPHA_EV6) -#define implver() IMPLVER_EV6 -#endif -#endif - -enum amask_enum { - AMASK_BWX = (1UL << 0), - AMASK_FIX = (1UL << 1), - AMASK_CIX = (1UL << 2), - AMASK_MAX = (1UL << 8), - AMASK_PRECISE_TRAP = (1UL << 9), -}; - -#define amask(mask) \ -({ unsigned long __amask, __input = (mask); \ - __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ - __amask; }) - -#define __CALL_PAL_R0(NAME, TYPE) \ -extern inline TYPE NAME(void) \ -{ \ - register TYPE __r0 __asm__("$0"); \ - __asm__ __volatile__( \ - "call_pal %1 # " #NAME \ - :"=r" (__r0) \ - :"i" (PAL_ ## NAME) \ - :"$1", "$16", "$22", "$23", "$24", "$25"); \ - return __r0; \ -} - -#define __CALL_PAL_W1(NAME, TYPE0) \ -extern inline void NAME(TYPE0 arg0) \ -{ \ - register TYPE0 __r16 __asm__("$16") = arg0; \ - __asm__ __volatile__( \ - "call_pal %1 # "#NAME \ - : "=r"(__r16) \ - : "i"(PAL_ ## NAME), "0"(__r16) \ - : "$1", "$22", "$23", "$24", "$25"); \ -} - -#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ -extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \ -{ \ - register TYPE0 __r16 __asm__("$16") = arg0; \ - register TYPE1 __r17 __asm__("$17") = arg1; \ - __asm__ __volatile__( \ - "call_pal %2 # "#NAME \ - : "=r"(__r16), "=r"(__r17) \ - : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ - : "$1", "$22", "$23", "$24", "$25"); \ -} - -#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ -extern inline RTYPE NAME(TYPE0 arg0) \ -{ \ - register RTYPE __r0 __asm__("$0"); \ - register TYPE0 __r16 __asm__("$16") = arg0; \ - __asm__ __volatile__( \ - "call_pal %2 # "#NAME \ - : "=r"(__r16), "=r"(__r0) \ - : "i"(PAL_ ## NAME), "0"(__r16) \ - : "$1", "$22", "$23", "$24", "$25"); \ - return __r0; \ -} - -#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ -extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ -{ \ - register RTYPE __r0 __asm__("$0"); \ - register TYPE0 __r16 __asm__("$16") = arg0; \ - register TYPE1 __r17 __asm__("$17") = arg1; \ - __asm__ __volatile__( \ - "call_pal %3 # "#NAME \ - : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ - : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ - : "$1", "$22", "$23", "$24", "$25"); \ - return __r0; \ -} - -__CALL_PAL_W1(cflush, unsigned long); -__CALL_PAL_R0(rdmces, unsigned long); -__CALL_PAL_R0(rdps, unsigned long); -__CALL_PAL_R0(rdusp, unsigned long); -__CALL_PAL_RW1(swpipl, unsigned long, unsigned long); -__CALL_PAL_R0(whami, unsigned long); -__CALL_PAL_W2(wrent, void*, unsigned long); -__CALL_PAL_W1(wripir, unsigned long); -__CALL_PAL_W1(wrkgp, unsigned long); -__CALL_PAL_W1(wrmces, unsigned long); -__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); -__CALL_PAL_W1(wrusp, unsigned long); -__CALL_PAL_W1(wrvptptr, unsigned long); - -#define IPL_MIN 0 -#define IPL_SW0 1 -#define IPL_SW1 2 -#define IPL_DEV0 3 -#define IPL_DEV1 4 -#define IPL_TIMER 5 -#define IPL_PERF 6 -#define IPL_POWERFAIL 6 -#define IPL_MCHECK 7 -#define IPL_MAX 7 - -#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK -#undef IPL_MIN -#define IPL_MIN __min_ipl -extern int __min_ipl; -#endif - -#define getipl() (rdps() & 7) -#define setipl(ipl) ((void) swpipl(ipl)) - -#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) -#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) -#define local_save_flags(flags) ((flags) = rdps()) -#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) -#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) - -#define irqs_disabled() (getipl() == IPL_MAX) - -/* - * TB routines.. - */ -#define __tbi(nr,arg,arg1...) \ -({ \ - register unsigned long __r16 __asm__("$16") = (nr); \ - register unsigned long __r17 __asm__("$17"); arg; \ - __asm__ __volatile__( \ - "call_pal %3 #__tbi" \ - :"=r" (__r16),"=r" (__r17) \ - :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ - :"$0", "$1", "$22", "$23", "$24", "$25"); \ -}) - -#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) -#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) -#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) -#define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) -#define tbiap() __tbi(-1, /* no second argument */) -#define tbia() __tbi(-2, /* no second argument */) - -/* - * Atomic exchange. - * Since it can be used to implement critical sections - * it must clobber "memory" (also for interrupts in UP). - */ - -static inline unsigned long -__xchg_u8(volatile char *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " insbl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extbl %2,%4,%0\n" - " mskbl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u16(volatile short *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " inswl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extwl %2,%4,%0\n" - " mskwl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u32(volatile int *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldl_l %0,%4\n" - " bis $31,%3,%1\n" - " stl_c %1,%2\n" - " beq %1,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -static inline unsigned long -__xchg_u64(volatile long *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldq_l %0,%4\n" - " bis $31,%3,%1\n" - " stq_c %1,%2\n" - " beq %1,2f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid xchg(). */ -extern void __xchg_called_with_bad_pointer(void); - -#define __xchg(ptr, x, size) \ -({ \ - unsigned long __xchg__res; \ - volatile void *__xchg__ptr = (ptr); \ - switch (size) { \ - case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ - case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ - case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ - case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ - default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ - } \ - __xchg__res; \ -}) - -#define xchg(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ - }) - -static inline unsigned long -__xchg_u8_local(volatile char *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " insbl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extbl %2,%4,%0\n" - " mskbl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u16_local(volatile short *m, unsigned long val) -{ - unsigned long ret, tmp, addr64; - - __asm__ __volatile__( - " andnot %4,7,%3\n" - " inswl %1,%4,%1\n" - "1: ldq_l %2,0(%3)\n" - " extwl %2,%4,%0\n" - " mskwl %2,%4,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%3)\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) - : "r" ((long)m), "1" (val) : "memory"); - - return ret; -} - -static inline unsigned long -__xchg_u32_local(volatile int *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldl_l %0,%4\n" - " bis $31,%3,%1\n" - " stl_c %1,%2\n" - " beq %1,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -static inline unsigned long -__xchg_u64_local(volatile long *m, unsigned long val) -{ - unsigned long dummy; - - __asm__ __volatile__( - "1: ldq_l %0,%4\n" - " bis $31,%3,%1\n" - " stq_c %1,%2\n" - " beq %1,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (val), "=&r" (dummy), "=m" (*m) - : "rI" (val), "m" (*m) : "memory"); - - return val; -} - -#define __xchg_local(ptr, x, size) \ -({ \ - unsigned long __xchg__res; \ - volatile void *__xchg__ptr = (ptr); \ - switch (size) { \ - case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \ - case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \ - case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \ - case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \ - default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ - } \ - __xchg__res; \ -}) - -#define xchg_local(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ - sizeof(*(ptr))); \ - }) - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - * - * The memory barrier should be placed in SMP only when we actually - * make the change. If we don't change anything (so if the returned - * prev is equal to old) then we aren't acquiring anything new and - * we don't need any memory barrier as far I can tell. - */ - -#define __HAVE_ARCH_CMPXCHG 1 - -static inline unsigned long -__cmpxchg_u8(volatile char *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " insbl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extbl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskbl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u16(volatile short *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " inswl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extwl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskwl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u32(volatile int *m, int old, int new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldl_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stl_c %1,%2\n" - " beq %1,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldq_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stq_c %1,%2\n" - " beq %1,3f\n" -#ifdef CONFIG_SMP - " mb\n" -#endif - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) -{ - switch (size) { - case 1: - return __cmpxchg_u8(ptr, old, new); - case 2: - return __cmpxchg_u16(ptr, old, new); - case 4: - return __cmpxchg_u32(ptr, old, new); - case 8: - return __cmpxchg_u64(ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) -#define cmpxchg64(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ - }) - -static inline unsigned long -__cmpxchg_u8_local(volatile char *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " insbl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extbl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskbl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u16_local(volatile short *m, long old, long new) -{ - unsigned long prev, tmp, cmp, addr64; - - __asm__ __volatile__( - " andnot %5,7,%4\n" - " inswl %1,%5,%1\n" - "1: ldq_l %2,0(%4)\n" - " extwl %2,%5,%0\n" - " cmpeq %0,%6,%3\n" - " beq %3,2f\n" - " mskwl %2,%5,%2\n" - " or %1,%2,%2\n" - " stq_c %2,0(%4)\n" - " beq %2,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) - : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u32_local(volatile int *m, int old, int new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldl_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stl_c %1,%2\n" - " beq %1,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -static inline unsigned long -__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new) -{ - unsigned long prev, cmp; - - __asm__ __volatile__( - "1: ldq_l %0,%5\n" - " cmpeq %0,%3,%1\n" - " beq %1,2f\n" - " mov %4,%1\n" - " stq_c %1,%2\n" - " beq %1,3f\n" - "2:\n" - ".subsection 2\n" - "3: br 1b\n" - ".previous" - : "=&r"(prev), "=&r"(cmp), "=m"(*m) - : "r"((long) old), "r"(new), "m"(*m) : "memory"); - - return prev; -} - -static __always_inline unsigned long -__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, - int size) -{ - switch (size) { - case 1: - return __cmpxchg_u8_local(ptr, old, new); - case 2: - return __cmpxchg_u16_local(ptr, old, new); - case 4: - return __cmpxchg_u32_local(ptr, old, new); - case 8: - return __cmpxchg_u64_local(ptr, old, new); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg_local(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) - - -#endif /* __ASSEMBLY__ */ - -#define arch_align_stack(x) (x) - -#endif diff --git a/include/asm-alpha/termbits.h b/include/asm-alpha/termbits.h deleted file mode 100644 index ad854a4a3af..00000000000 --- a/include/asm-alpha/termbits.h +++ /dev/null @@ -1,200 +0,0 @@ -#ifndef _ALPHA_TERMBITS_H -#define _ALPHA_TERMBITS_H - -#include - -typedef unsigned char cc_t; -typedef unsigned int speed_t; -typedef unsigned int tcflag_t; - -/* - * termios type and macro definitions. Be careful about adding stuff - * to this file since it's used in GNU libc and there are strict rules - * concerning namespace pollution. - */ - -#define NCCS 19 -struct termios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_cc[NCCS]; /* control characters */ - cc_t c_line; /* line discipline (== c_cc[19]) */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -/* Alpha has matching termios and ktermios */ - -struct ktermios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_cc[NCCS]; /* control characters */ - cc_t c_line; /* line discipline (== c_cc[19]) */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -/* c_cc characters */ -#define VEOF 0 -#define VEOL 1 -#define VEOL2 2 -#define VERASE 3 -#define VWERASE 4 -#define VKILL 5 -#define VREPRINT 6 -#define VSWTC 7 -#define VINTR 8 -#define VQUIT 9 -#define VSUSP 10 -#define VSTART 12 -#define VSTOP 13 -#define VLNEXT 14 -#define VDISCARD 15 -#define VMIN 16 -#define VTIME 17 - -/* c_iflag bits */ -#define IGNBRK 0000001 -#define BRKINT 0000002 -#define IGNPAR 0000004 -#define PARMRK 0000010 -#define INPCK 0000020 -#define ISTRIP 0000040 -#define INLCR 0000100 -#define IGNCR 0000200 -#define ICRNL 0000400 -#define IXON 0001000 -#define IXOFF 0002000 -#define IXANY 0004000 -#define IUCLC 0010000 -#define IMAXBEL 0020000 -#define IUTF8 0040000 - -/* c_oflag bits */ -#define OPOST 0000001 -#define ONLCR 0000002 -#define OLCUC 0000004 - -#define OCRNL 0000010 -#define ONOCR 0000020 -#define ONLRET 0000040 - -#define OFILL 00000100 -#define OFDEL 00000200 -#define NLDLY 00001400 -#define NL0 00000000 -#define NL1 00000400 -#define NL2 00001000 -#define NL3 00001400 -#define TABDLY 00006000 -#define TAB0 00000000 -#define TAB1 00002000 -#define TAB2 00004000 -#define TAB3 00006000 -#define CRDLY 00030000 -#define CR0 00000000 -#define CR1 00010000 -#define CR2 00020000 -#define CR3 00030000 -#define FFDLY 00040000 -#define FF0 00000000 -#define FF1 00040000 -#define BSDLY 00100000 -#define BS0 00000000 -#define BS1 00100000 -#define VTDLY 00200000 -#define VT0 00000000 -#define VT1 00200000 -#define XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */ - -/* c_cflag bit meaning */ -#define CBAUD 0000037 -#define B0 0000000 /* hang up */ -#define B50 0000001 -#define B75 0000002 -#define B110 0000003 -#define B134 0000004 -#define B150 0000005 -#define B200 0000006 -#define B300 0000007 -#define B600 0000010 -#define B1200 0000011 -#define B1800 0000012 -#define B2400 0000013 -#define B4800 0000014 -#define B9600 0000015 -#define B19200 0000016 -#define B38400 0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CBAUDEX 0000000 -#define B57600 00020 -#define B115200 00021 -#define B230400 00022 -#define B460800 00023 -#define B500000 00024 -#define B576000 00025 -#define B921600 00026 -#define B1000000 00027 -#define B1152000 00030 -#define B1500000 00031 -#define B2000000 00032 -#define B2500000 00033 -#define B3000000 00034 -#define B3500000 00035 -#define B4000000 00036 - -#define CSIZE 00001400 -#define CS5 00000000 -#define CS6 00000400 -#define CS7 00001000 -#define CS8 00001400 - -#define CSTOPB 00002000 -#define CREAD 00004000 -#define PARENB 00010000 -#define PARODD 00020000 -#define HUPCL 00040000 - -#define CLOCAL 00100000 -#define CMSPAR 010000000000 /* mark or space (stick) parity */ -#define CRTSCTS 020000000000 /* flow control */ - -/* c_lflag bits */ -#define ISIG 0x00000080 -#define ICANON 0x00000100 -#define XCASE 0x00004000 -#define ECHO 0x00000008 -#define ECHOE 0x00000002 -#define ECHOK 0x00000004 -#define ECHONL 0x00000010 -#define NOFLSH 0x80000000 -#define TOSTOP 0x00400000 -#define ECHOCTL 0x00000040 -#define ECHOPRT 0x00000020 -#define ECHOKE 0x00000001 -#define FLUSHO 0x00800000 -#define PENDIN 0x20000000 -#define IEXTEN 0x00000400 - -/* Values for the ACTION argument to `tcflow'. */ -#define TCOOFF 0 -#define TCOON 1 -#define TCIOFF 2 -#define TCION 3 - -/* Values for the QUEUE_SELECTOR argument to `tcflush'. */ -#define TCIFLUSH 0 -#define TCOFLUSH 1 -#define TCIOFLUSH 2 - -/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */ -#define TCSANOW 0 -#define TCSADRAIN 1 -#define TCSAFLUSH 2 - -#endif /* _ALPHA_TERMBITS_H */ diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h deleted file mode 100644 index fa13716a11c..00000000000 --- a/include/asm-alpha/termios.h +++ /dev/null @@ -1,146 +0,0 @@ -#ifndef _ALPHA_TERMIOS_H -#define _ALPHA_TERMIOS_H - -#include -#include - -struct sgttyb { - char sg_ispeed; - char sg_ospeed; - char sg_erase; - char sg_kill; - short sg_flags; -}; - -struct tchars { - char t_intrc; - char t_quitc; - char t_startc; - char t_stopc; - char t_eofc; - char t_brkc; -}; - -struct ltchars { - char t_suspc; - char t_dsuspc; - char t_rprntc; - char t_flushc; - char t_werasc; - char t_lnextc; -}; - -struct winsize { - unsigned short ws_row; - unsigned short ws_col; - unsigned short ws_xpixel; - unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { - unsigned short c_iflag; /* input mode flags */ - unsigned short c_oflag; /* output mode flags */ - unsigned short c_cflag; /* control mode flags */ - unsigned short c_lflag; /* local mode flags */ - unsigned char c_line; /* line discipline */ - unsigned char c_cc[NCC]; /* control characters */ -}; - -/* - * c_cc characters in the termio structure. Oh, how I love being - * backwardly compatible. Notice that character 4 and 5 are - * interpreted differently depending on whether ICANON is set in - * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise - * as _VMIN and V_TIME. This is for compatibility with OSF/1 (which - * is compatible with sysV)... - */ -#define _VINTR 0 -#define _VQUIT 1 -#define _VERASE 2 -#define _VKILL 3 -#define _VEOF 4 -#define _VMIN 4 -#define _VEOL 5 -#define _VTIME 5 -#define _VEOL2 6 -#define _VSWTC 7 - -#ifdef __KERNEL__ -/* eof=^D eol=\0 eol2=\0 erase=del - werase=^W kill=^U reprint=^R sxtc=\0 - intr=^C quit=^\ susp=^Z - start=^Q stop=^S lnext=^V discard=^U - vmin=\1 vtime=\0 -*/ -#define INIT_C_CC "\004\000\000\177\027\025\022\000\003\034\032\000\021\023\026\025\001\000" - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ - -#define user_termio_to_kernel_termios(a_termios, u_termio) \ -({ \ - struct ktermios *k_termios = (a_termios); \ - struct termio k_termio; \ - int canon, ret; \ - \ - ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio)); \ - if (!ret) { \ - /* Overwrite only the low bits. */ \ - *(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag; \ - *(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag; \ - *(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag; \ - *(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag; \ - canon = k_termio.c_lflag & ICANON; \ - \ - k_termios->c_cc[VINTR] = k_termio.c_cc[_VINTR]; \ - k_termios->c_cc[VQUIT] = k_termio.c_cc[_VQUIT]; \ - k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE]; \ - k_termios->c_cc[VKILL] = k_termio.c_cc[_VKILL]; \ - k_termios->c_cc[VEOL2] = k_termio.c_cc[_VEOL2]; \ - k_termios->c_cc[VSWTC] = k_termio.c_cc[_VSWTC]; \ - k_termios->c_cc[canon ? VEOF : VMIN] = k_termio.c_cc[_VEOF]; \ - k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL]; \ - } \ - ret; \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - * - * Note the "fun" _VMIN overloading. - */ -#define kernel_termios_to_user_termio(u_termio, a_termios) \ -({ \ - struct ktermios *k_termios = (a_termios); \ - struct termio k_termio; \ - int canon; \ - \ - k_termio.c_iflag = k_termios->c_iflag; \ - k_termio.c_oflag = k_termios->c_oflag; \ - k_termio.c_cflag = k_termios->c_cflag; \ - canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON; \ - \ - k_termio.c_line = k_termios->c_line; \ - k_termio.c_cc[_VINTR] = k_termios->c_cc[VINTR]; \ - k_termio.c_cc[_VQUIT] = k_termios->c_cc[VQUIT]; \ - k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE]; \ - k_termio.c_cc[_VKILL] = k_termios->c_cc[VKILL]; \ - k_termio.c_cc[_VEOF] = k_termios->c_cc[canon ? VEOF : VMIN]; \ - k_termio.c_cc[_VEOL] = k_termios->c_cc[canon ? VEOL : VTIME]; \ - k_termio.c_cc[_VEOL2] = k_termios->c_cc[VEOL2]; \ - k_termio.c_cc[_VSWTC] = k_termios->c_cc[VSWTC]; \ - \ - copy_to_user(u_termio, &k_termio, sizeof(k_termio)); \ -}) - -#define user_termios_to_kernel_termios(k, u) \ - copy_from_user(k, u, sizeof(struct termios)) - -#define kernel_termios_to_user_termios(u, k) \ - copy_to_user(u, k, sizeof(struct termios)) - -#endif /* __KERNEL__ */ - -#endif /* _ALPHA_TERMIOS_H */ diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h deleted file mode 100644 index 15fda434442..00000000000 --- a/include/asm-alpha/thread_info.h +++ /dev/null @@ -1,114 +0,0 @@ -#ifndef _ALPHA_THREAD_INFO_H -#define _ALPHA_THREAD_INFO_H - -#ifdef __KERNEL__ - -#ifndef __ASSEMBLY__ -#include -#include -#include -#endif - -#ifndef __ASSEMBLY__ -struct thread_info { - struct pcb_struct pcb; /* palcode state */ - - struct task_struct *task; /* main task structure */ - unsigned int flags; /* low level flags */ - unsigned int ieee_state; /* see fpu.h */ - - struct exec_domain *exec_domain; /* execution domain */ - mm_segment_t addr_limit; /* thread address space */ - unsigned cpu; /* current CPU */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ - - int bpt_nsaved; - unsigned long bpt_addr[2]; /* breakpoint handling */ - unsigned int bpt_insn[2]; - - struct restart_block restart_block; -}; - -/* - * Macros/functions for gaining access to the thread information structure. - */ -#define INIT_THREAD_INFO(tsk) \ -{ \ - .task = &tsk, \ - .exec_domain = &default_exec_domain, \ - .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ -} - -#define init_thread_info (init_thread_union.thread_info) -#define init_stack (init_thread_union.stack) - -/* How to get the thread information struct from C. */ -register struct thread_info *__current_thread_info __asm__("$8"); -#define current_thread_info() __current_thread_info - -/* Thread information allocation. */ -#define THREAD_SIZE_ORDER 1 -#define THREAD_SIZE (2*PAGE_SIZE) - -#endif /* __ASSEMBLY__ */ - -#define PREEMPT_ACTIVE 0x40000000 - -/* - * Thread information flags: - * - these are process state flags and used from assembly - * - pending work-to-be-done flags come first to fit in and immediate operand. - * - * TIF_SYSCALL_TRACE is known to be 0 via blbs. - */ -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_SIGPENDING 1 /* signal pending */ -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_POLLING_NRFLAG 3 /* poll_idle is polling NEED_RESCHED */ -#define TIF_DIE_IF_KERNEL 4 /* dik recursion lock */ -#define TIF_UAC_NOPRINT 5 /* see sysinfo.h */ -#define TIF_UAC_NOFIX 6 -#define TIF_UAC_SIGBUS 7 -#define TIF_MEMDIE 8 -#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ - -#define _TIF_SYSCALL_TRACE (1<flags = ((task_thread_info(task)->flags & \ - ~ALPHA_UAC_MASK) \ - | (((value) << ALPHA_UAC_SHIFT) & (1<flags & (1 << TIF_UAC_NOPRINT))\ - >> ALPHA_UAC_SHIFT \ - | (task_thread_info(task)->flags & (1 << TIF_UAC_SIGBUS))\ - >> (ALPHA_UAC_SHIFT + 1) \ - | (task_thread_info(task)->flags & (1 << TIF_UAC_NOFIX))\ - >> (ALPHA_UAC_SHIFT - 1), \ - (int __user *)(value)); \ - }) - -#endif /* __KERNEL__ */ -#endif /* _ALPHA_THREAD_INFO_H */ diff --git a/include/asm-alpha/timex.h b/include/asm-alpha/timex.h deleted file mode 100644 index afa0c45e3e9..00000000000 --- a/include/asm-alpha/timex.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * linux/include/asm-alpha/timex.h - * - * ALPHA architecture timex specifications - */ -#ifndef _ASMALPHA_TIMEX_H -#define _ASMALPHA_TIMEX_H - -/* With only one or two oddballs, we use the RTC as the ticker, selecting - the 32.768kHz reference clock, which nicely divides down to our HZ. */ -#define CLOCK_TICK_RATE 32768 - -/* - * Standard way to access the cycle counter. - * Currently only used on SMP for scheduling. - * - * Only the low 32 bits are available as a continuously counting entity. - * But this only means we'll force a reschedule every 8 seconds or so, - * which isn't an evil thing. - */ - -typedef unsigned int cycles_t; - -static inline cycles_t get_cycles (void) -{ - cycles_t ret; - __asm__ __volatile__ ("rpcc %0" : "=r"(ret)); - return ret; -} - -#endif diff --git a/include/asm-alpha/tlb.h b/include/asm-alpha/tlb.h deleted file mode 100644 index c13636575fb..00000000000 --- a/include/asm-alpha/tlb.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _ALPHA_TLB_H -#define _ALPHA_TLB_H - -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0) - -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - -#include - -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) -#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) - -#endif diff --git a/include/asm-alpha/tlbflush.h b/include/asm-alpha/tlbflush.h deleted file mode 100644 index 9d87aaa08c0..00000000000 --- a/include/asm-alpha/tlbflush.h +++ /dev/null @@ -1,151 +0,0 @@ -#ifndef _ALPHA_TLBFLUSH_H -#define _ALPHA_TLBFLUSH_H - -#include -#include -#include - -#ifndef __EXTERN_INLINE -#define __EXTERN_INLINE extern inline -#define __MMU_EXTERN_INLINE -#endif - -extern void __load_new_mm_context(struct mm_struct *); - - -/* Use a few helper functions to hide the ugly broken ASN - numbers on early Alphas (ev4 and ev45). */ - -__EXTERN_INLINE void -ev4_flush_tlb_current(struct mm_struct *mm) -{ - __load_new_mm_context(mm); - tbiap(); -} - -__EXTERN_INLINE void -ev5_flush_tlb_current(struct mm_struct *mm) -{ - __load_new_mm_context(mm); -} - -/* Flush just one page in the current TLB set. We need to be very - careful about the icache here, there is no way to invalidate a - specific icache page. */ - -__EXTERN_INLINE void -ev4_flush_tlb_current_page(struct mm_struct * mm, - struct vm_area_struct *vma, - unsigned long addr) -{ - int tbi_flag = 2; - if (vma->vm_flags & VM_EXEC) { - __load_new_mm_context(mm); - tbi_flag = 3; - } - tbi(tbi_flag, addr); -} - -__EXTERN_INLINE void -ev5_flush_tlb_current_page(struct mm_struct * mm, - struct vm_area_struct *vma, - unsigned long addr) -{ - if (vma->vm_flags & VM_EXEC) - __load_new_mm_context(mm); - else - tbi(2, addr); -} - - -#ifdef CONFIG_ALPHA_GENERIC -# define flush_tlb_current alpha_mv.mv_flush_tlb_current -# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page -#else -# ifdef CONFIG_ALPHA_EV4 -# define flush_tlb_current ev4_flush_tlb_current -# define flush_tlb_current_page ev4_flush_tlb_current_page -# else -# define flush_tlb_current ev5_flush_tlb_current -# define flush_tlb_current_page ev5_flush_tlb_current_page -# endif -#endif - -#ifdef __MMU_EXTERN_INLINE -#undef __EXTERN_INLINE -#undef __MMU_EXTERN_INLINE -#endif - -/* Flush current user mapping. */ -static inline void -flush_tlb(void) -{ - flush_tlb_current(current->active_mm); -} - -/* Flush someone else's user mapping. */ -static inline void -flush_tlb_other(struct mm_struct *mm) -{ - unsigned long *mmc = &mm->context[smp_processor_id()]; - /* Check it's not zero first to avoid cacheline ping pong - when possible. */ - if (*mmc) *mmc = 0; -} - -#ifndef CONFIG_SMP -/* Flush everything (kernel mapping may also have changed - due to vmalloc/vfree). */ -static inline void flush_tlb_all(void) -{ - tbia(); -} - -/* Flush a specified user mapping. */ -static inline void -flush_tlb_mm(struct mm_struct *mm) -{ - if (mm == current->active_mm) - flush_tlb_current(mm); - else - flush_tlb_other(mm); -} - -/* Page-granular tlb flush. */ -static inline void -flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) -{ - struct mm_struct *mm = vma->vm_mm; - - if (mm == current->active_mm) - flush_tlb_current_page(mm, vma, addr); - else - flush_tlb_other(mm); -} - -/* Flush a specified range of user mapping. On the Alpha we flush - the whole user tlb. */ -static inline void -flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) -{ - flush_tlb_mm(vma->vm_mm); -} - -#else /* CONFIG_SMP */ - -extern void flush_tlb_all(void); -extern void flush_tlb_mm(struct mm_struct *); -extern void flush_tlb_page(struct vm_area_struct *, unsigned long); -extern void flush_tlb_range(struct vm_area_struct *, unsigned long, - unsigned long); - -#endif /* CONFIG_SMP */ - -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - flush_tlb_all(); -} - -#endif /* _ALPHA_TLBFLUSH_H */ diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h deleted file mode 100644 index 149532e162c..00000000000 --- a/include/asm-alpha/topology.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef _ASM_ALPHA_TOPOLOGY_H -#define _ASM_ALPHA_TOPOLOGY_H - -#include -#include -#include - -#ifdef CONFIG_NUMA -static inline int cpu_to_node(int cpu) -{ - int node; - - if (!alpha_mv.cpuid_to_nid) - return 0; - - node = alpha_mv.cpuid_to_nid(cpu); - -#ifdef DEBUG_NUMA - BUG_ON(node < 0); -#endif - - return node; -} - -static inline cpumask_t node_to_cpumask(int node) -{ - cpumask_t node_cpu_mask = CPU_MASK_NONE; - int cpu; - - for_each_online_cpu(cpu) { - if (cpu_to_node(cpu) == node) - cpu_set(cpu, node_cpu_mask); - } - -#ifdef DEBUG_NUMA - printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask); -#endif - - return node_cpu_mask; -} - -#define pcibus_to_cpumask(bus) (cpu_online_map) - -#endif /* !CONFIG_NUMA */ -# include - -#endif /* _ASM_ALPHA_TOPOLOGY_H */ diff --git a/include/asm-alpha/types.h b/include/asm-alpha/types.h deleted file mode 100644 index c1541353cce..00000000000 --- a/include/asm-alpha/types.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _ALPHA_TYPES_H -#define _ALPHA_TYPES_H - -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ -#include - -#ifndef __ASSEMBLY__ - -typedef unsigned int umode_t; - -#endif /* __ASSEMBLY__ */ - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ - -#define BITS_PER_LONG 64 - -#ifndef __ASSEMBLY__ - -typedef u64 dma_addr_t; -typedef u64 dma64_addr_t; - -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ -#endif /* _ALPHA_TYPES_H */ diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h deleted file mode 100644 index 22de3b434a2..00000000000 --- a/include/asm-alpha/uaccess.h +++ /dev/null @@ -1,511 +0,0 @@ -#ifndef __ALPHA_UACCESS_H -#define __ALPHA_UACCESS_H - -#include -#include - - -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * Or at least it did once upon a time. Nowadays it is a mask that - * defines which bits of the address space are off limits. This is a - * wee bit faster than the above. - * - * For historical reasons, these macros are grossly misnamed. - */ - -#define KERNEL_DS ((mm_segment_t) { 0UL }) -#define USER_DS ((mm_segment_t) { -0x40000000000UL }) - -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - -#define get_fs() (current_thread_info()->addr_limit) -#define get_ds() (KERNEL_DS) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -#define segment_eq(a,b) ((a).seg == (b).seg) - -/* - * Is a address valid? This does a straightforward calculation rather - * than tests. - * - * Address valid if: - * - "addr" doesn't have any high-bits set - * - AND "size" doesn't have any high-bits set - * - AND "addr+size" doesn't have any high-bits set - * - OR we are in kernel mode. - */ -#define __access_ok(addr,size,segment) \ - (((segment).seg & (addr | size | (addr+size))) == 0) - -#define access_ok(type,addr,size) \ -({ \ - __chk_user_ptr(addr); \ - __access_ok(((unsigned long)(addr)),(size),get_fs()); \ -}) - -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - * - * As the alpha uses the same address space for kernel and user - * data, we can just do these as direct assignments. (Of course, the - * exception handling means that it's no longer "just"...) - * - * Careful to not - * (a) re-use the arguments for side effects (sizeof/typeof is ok) - * (b) require any knowledge of processes at this stage - */ -#define put_user(x,ptr) \ - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs()) -#define get_user(x,ptr) \ - __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs()) - -/* - * The "__xxx" versions do not do address space checking, useful when - * doing multiple accesses to the same area (the programmer has to do the - * checks by hand with "access_ok()") - */ -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) - -/* - * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to - * encode the bits we need for resolving the exception. See the - * more extensive comments with fixup_inline_exception below for - * more information. - */ - -extern void __get_user_unknown(void); - -#define __get_user_nocheck(x,ptr,size) \ -({ \ - long __gu_err = 0; \ - unsigned long __gu_val; \ - __chk_user_ptr(ptr); \ - switch (size) { \ - case 1: __get_user_8(ptr); break; \ - case 2: __get_user_16(ptr); break; \ - case 4: __get_user_32(ptr); break; \ - case 8: __get_user_64(ptr); break; \ - default: __get_user_unknown(); break; \ - } \ - (x) = (__typeof__(*(ptr))) __gu_val; \ - __gu_err; \ -}) - -#define __get_user_check(x,ptr,size,segment) \ -({ \ - long __gu_err = -EFAULT; \ - unsigned long __gu_val = 0; \ - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - if (__access_ok((unsigned long)__gu_addr,size,segment)) { \ - __gu_err = 0; \ - switch (size) { \ - case 1: __get_user_8(__gu_addr); break; \ - case 2: __get_user_16(__gu_addr); break; \ - case 4: __get_user_32(__gu_addr); break; \ - case 8: __get_user_64(__gu_addr); break; \ - default: __get_user_unknown(); break; \ - } \ - } \ - (x) = (__typeof__(*(ptr))) __gu_val; \ - __gu_err; \ -}) - -struct __large_struct { unsigned long buf[100]; }; -#define __m(x) (*(struct __large_struct __user *)(x)) - -#define __get_user_64(addr) \ - __asm__("1: ldq %0,%2\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ - : "=r"(__gu_val), "=r"(__gu_err) \ - : "m"(__m(addr)), "1"(__gu_err)) - -#define __get_user_32(addr) \ - __asm__("1: ldl %0,%2\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ - : "=r"(__gu_val), "=r"(__gu_err) \ - : "m"(__m(addr)), "1"(__gu_err)) - -#ifdef __alpha_bwx__ -/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ - -#define __get_user_16(addr) \ - __asm__("1: ldwu %0,%2\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ - : "=r"(__gu_val), "=r"(__gu_err) \ - : "m"(__m(addr)), "1"(__gu_err)) - -#define __get_user_8(addr) \ - __asm__("1: ldbu %0,%2\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ - : "=r"(__gu_val), "=r"(__gu_err) \ - : "m"(__m(addr)), "1"(__gu_err)) -#else -/* Unfortunately, we can't get an unaligned access trap for the sub-word - load, so we have to do a general unaligned operation. */ - -#define __get_user_16(addr) \ -{ \ - long __gu_tmp; \ - __asm__("1: ldq_u %0,0(%3)\n" \ - "2: ldq_u %1,1(%3)\n" \ - " extwl %0,%3,%0\n" \ - " extwh %1,%3,%1\n" \ - " or %0,%1,%0\n" \ - "3:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 3b-1b(%2)\n" \ - " .long 2b - .\n" \ - " lda %0, 3b-2b(%2)\n" \ - ".previous" \ - : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ - : "r"(addr), "2"(__gu_err)); \ -} - -#define __get_user_8(addr) \ - __asm__("1: ldq_u %0,0(%2)\n" \ - " extbl %0,%2,%0\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ - : "=&r"(__gu_val), "=r"(__gu_err) \ - : "r"(addr), "1"(__gu_err)) -#endif - -extern void __put_user_unknown(void); - -#define __put_user_nocheck(x,ptr,size) \ -({ \ - long __pu_err = 0; \ - __chk_user_ptr(ptr); \ - switch (size) { \ - case 1: __put_user_8(x,ptr); break; \ - case 2: __put_user_16(x,ptr); break; \ - case 4: __put_user_32(x,ptr); break; \ - case 8: __put_user_64(x,ptr); break; \ - default: __put_user_unknown(); break; \ - } \ - __pu_err; \ -}) - -#define __put_user_check(x,ptr,size,segment) \ -({ \ - long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (__access_ok((unsigned long)__pu_addr,size,segment)) { \ - __pu_err = 0; \ - switch (size) { \ - case 1: __put_user_8(x,__pu_addr); break; \ - case 2: __put_user_16(x,__pu_addr); break; \ - case 4: __put_user_32(x,__pu_addr); break; \ - case 8: __put_user_64(x,__pu_addr); break; \ - default: __put_user_unknown(); break; \ - } \ - } \ - __pu_err; \ -}) - -/* - * The "__put_user_xx()" macros tell gcc they read from memory - * instead of writing: this is because they do not write to - * any memory gcc knows about, so there are no aliasing issues - */ -#define __put_user_64(x,addr) \ -__asm__ __volatile__("1: stq %r2,%1\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31,2b-1b(%0)\n" \ - ".previous" \ - : "=r"(__pu_err) \ - : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) - -#define __put_user_32(x,addr) \ -__asm__ __volatile__("1: stl %r2,%1\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31,2b-1b(%0)\n" \ - ".previous" \ - : "=r"(__pu_err) \ - : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) - -#ifdef __alpha_bwx__ -/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ - -#define __put_user_16(x,addr) \ -__asm__ __volatile__("1: stw %r2,%1\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31,2b-1b(%0)\n" \ - ".previous" \ - : "=r"(__pu_err) \ - : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) - -#define __put_user_8(x,addr) \ -__asm__ __volatile__("1: stb %r2,%1\n" \ - "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31,2b-1b(%0)\n" \ - ".previous" \ - : "=r"(__pu_err) \ - : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) -#else -/* Unfortunately, we can't get an unaligned access trap for the sub-word - write, so we have to do a general unaligned operation. */ - -#define __put_user_16(x,addr) \ -{ \ - long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ - __asm__ __volatile__( \ - "1: ldq_u %2,1(%5)\n" \ - "2: ldq_u %1,0(%5)\n" \ - " inswh %6,%5,%4\n" \ - " inswl %6,%5,%3\n" \ - " mskwh %2,%5,%2\n" \ - " mskwl %1,%5,%1\n" \ - " or %2,%4,%2\n" \ - " or %1,%3,%1\n" \ - "3: stq_u %2,1(%5)\n" \ - "4: stq_u %1,0(%5)\n" \ - "5:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31, 5b-1b(%0)\n" \ - " .long 2b - .\n" \ - " lda $31, 5b-2b(%0)\n" \ - " .long 3b - .\n" \ - " lda $31, 5b-3b(%0)\n" \ - " .long 4b - .\n" \ - " lda $31, 5b-4b(%0)\n" \ - ".previous" \ - : "=r"(__pu_err), "=&r"(__pu_tmp1), \ - "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ - "=&r"(__pu_tmp4) \ - : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ -} - -#define __put_user_8(x,addr) \ -{ \ - long __pu_tmp1, __pu_tmp2; \ - __asm__ __volatile__( \ - "1: ldq_u %1,0(%4)\n" \ - " insbl %3,%4,%2\n" \ - " mskbl %1,%4,%1\n" \ - " or %1,%2,%1\n" \ - "2: stq_u %1,0(%4)\n" \ - "3:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31, 3b-1b(%0)\n" \ - " .long 2b - .\n" \ - " lda $31, 3b-2b(%0)\n" \ - ".previous" \ - : "=r"(__pu_err), \ - "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ - : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ -} -#endif - - -/* - * Complex access routines - */ - -/* This little bit of silliness is to get the GP loaded for a function - that ordinarily wouldn't. Otherwise we could have it done by the macro - directly, which can be optimized the linker. */ -#ifdef MODULE -#define __module_address(sym) "r"(sym), -#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym -#else -#define __module_address(sym) -#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" -#endif - -extern void __copy_user(void); - -extern inline long -__copy_tofrom_user_nocheck(void *to, const void *from, long len) -{ - register void * __cu_to __asm__("$6") = to; - register const void * __cu_from __asm__("$7") = from; - register long __cu_len __asm__("$0") = len; - - __asm__ __volatile__( - __module_call(28, 3, __copy_user) - : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) - : __module_address(__copy_user) - "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) - : "$1","$2","$3","$4","$5","$28","memory"); - - return __cu_len; -} - -extern inline long -__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate) -{ - if (__access_ok((unsigned long)validate, len, get_fs())) - len = __copy_tofrom_user_nocheck(to, from, len); - return len; -} - -#define __copy_to_user(to,from,n) \ -({ \ - __chk_user_ptr(to); \ - __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \ -}) -#define __copy_from_user(to,from,n) \ -({ \ - __chk_user_ptr(from); \ - __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \ -}) - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - - -extern inline long -copy_to_user(void __user *to, const void *from, long n) -{ - return __copy_tofrom_user((__force void *)to, from, n, to); -} - -extern inline long -copy_from_user(void *to, const void __user *from, long n) -{ - return __copy_tofrom_user(to, (__force void *)from, n, from); -} - -extern void __do_clear_user(void); - -extern inline long -__clear_user(void __user *to, long len) -{ - register void __user * __cl_to __asm__("$6") = to; - register long __cl_len __asm__("$0") = len; - __asm__ __volatile__( - __module_call(28, 2, __do_clear_user) - : "=r"(__cl_len), "=r"(__cl_to) - : __module_address(__do_clear_user) - "0"(__cl_len), "1"(__cl_to) - : "$1","$2","$3","$4","$5","$28","memory"); - return __cl_len; -} - -extern inline long -clear_user(void __user *to, long len) -{ - if (__access_ok((unsigned long)to, len, get_fs())) - len = __clear_user(to, len); - return len; -} - -#undef __module_address -#undef __module_call - -/* Returns: -EFAULT if exception before terminator, N if the entire - buffer filled, else strlen. */ - -extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len); - -extern inline long -strncpy_from_user(char *to, const char __user *from, long n) -{ - long ret = -EFAULT; - if (__access_ok((unsigned long)from, 0, get_fs())) - ret = __strncpy_from_user(to, from, n); - return ret; -} - -/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ -extern long __strlen_user(const char __user *); - -extern inline long strlen_user(const char __user *str) -{ - return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0; -} - -/* Returns: 0 if exception before NUL or reaching the supplied limit (N), - * a value greater than N if the limit would be exceeded, else strlen. */ -extern long __strnlen_user(const char __user *, long); - -extern inline long strnlen_user(const char __user *str, long n) -{ - return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; -} - -/* - * About the exception table: - * - * - insn is a 32-bit pc-relative offset from the faulting insn. - * - nextinsn is a 16-bit offset off of the faulting instruction - * (not off of the *next* instruction as branches are). - * - errreg is the register in which to place -EFAULT. - * - valreg is the final target register for the load sequence - * and will be zeroed. - * - * Either errreg or valreg may be $31, in which case nothing happens. - * - * The exception fixup information "just so happens" to be arranged - * as in a MEM format instruction. This lets us emit our three - * values like so: - * - * lda valreg, nextinsn(errreg) - * - */ - -struct exception_table_entry -{ - signed int insn; - union exception_fixup { - unsigned unit; - struct { - signed int nextinsn : 16; - unsigned int errreg : 5; - unsigned int valreg : 5; - } bits; - } fixup; -}; - -/* Returns the new pc */ -#define fixup_exception(map_reg, fixup, pc) \ -({ \ - if ((fixup)->fixup.bits.valreg != 31) \ - map_reg((fixup)->fixup.bits.valreg) = 0; \ - if ((fixup)->fixup.bits.errreg != 31) \ - map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \ - (pc) + (fixup)->fixup.bits.nextinsn; \ -}) - - -#endif /* __ALPHA_UACCESS_H */ diff --git a/include/asm-alpha/ucontext.h b/include/asm-alpha/ucontext.h deleted file mode 100644 index 47578ab4215..00000000000 --- a/include/asm-alpha/ucontext.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _ASMAXP_UCONTEXT_H -#define _ASMAXP_UCONTEXT_H - -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - old_sigset_t uc_osf_sigmask; - stack_t uc_stack; - struct sigcontext uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; - -#endif /* !_ASMAXP_UCONTEXT_H */ diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h deleted file mode 100644 index 3787c60aed3..00000000000 --- a/include/asm-alpha/unaligned.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _ASM_ALPHA_UNALIGNED_H -#define _ASM_ALPHA_UNALIGNED_H - -#include -#include -#include - -#define get_unaligned __get_unaligned_le -#define put_unaligned __put_unaligned_le - -#endif /* _ASM_ALPHA_UNALIGNED_H */ diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h deleted file mode 100644 index 5b5c1748594..00000000000 --- a/include/asm-alpha/unistd.h +++ /dev/null @@ -1,464 +0,0 @@ -#ifndef _ALPHA_UNISTD_H -#define _ALPHA_UNISTD_H - -#define __NR_osf_syscall 0 /* not implemented */ -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_osf_old_open 5 /* not implemented */ -#define __NR_close 6 -#define __NR_osf_wait4 7 -#define __NR_osf_old_creat 8 /* not implemented */ -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_osf_execve 11 /* not implemented */ -#define __NR_chdir 12 -#define __NR_fchdir 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_chown 16 -#define __NR_brk 17 -#define __NR_osf_getfsstat 18 /* not implemented */ -#define __NR_lseek 19 -#define __NR_getxpid 20 -#define __NR_osf_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getxuid 24 -#define __NR_exec_with_loader 25 /* not implemented */ -#define __NR_ptrace 26 -#define __NR_osf_nrecvmsg 27 /* not implemented */ -#define __NR_osf_nsendmsg 28 /* not implemented */ -#define __NR_osf_nrecvfrom 29 /* not implemented */ -#define __NR_osf_naccept 30 /* not implemented */ -#define __NR_osf_ngetpeername 31 /* not implemented */ -#define __NR_osf_ngetsockname 32 /* not implemented */ -#define __NR_access 33 -#define __NR_osf_chflags 34 /* not implemented */ -#define __NR_osf_fchflags 35 /* not implemented */ -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_osf_old_stat 38 /* not implemented */ -#define __NR_setpgid 39 -#define __NR_osf_old_lstat 40 /* not implemented */ -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_osf_set_program_attributes 43 -#define __NR_osf_profil 44 /* not implemented */ -#define __NR_open 45 -#define __NR_osf_old_sigaction 46 /* not implemented */ -#define __NR_getxgid 47 -#define __NR_osf_sigprocmask 48 -#define __NR_osf_getlogin 49 /* not implemented */ -#define __NR_osf_setlogin 50 /* not implemented */ -#define __NR_acct 51 -#define __NR_sigpending 52 - -#define __NR_ioctl 54 -#define __NR_osf_reboot 55 /* not implemented */ -#define __NR_osf_revoke 56 /* not implemented */ -#define __NR_symlink 57 -#define __NR_readlink 58 -#define __NR_execve 59 -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_osf_old_fstat 62 /* not implemented */ -#define __NR_getpgrp 63 -#define __NR_getpagesize 64 -#define __NR_osf_mremap 65 /* not implemented */ -#define __NR_vfork 66 -#define __NR_stat 67 -#define __NR_lstat 68 -#define __NR_osf_sbrk 69 /* not implemented */ -#define __NR_osf_sstk 70 /* not implemented */ -#define __NR_mmap 71 /* OSF/1 mmap is superset of Linux */ -#define __NR_osf_old_vadvise 72 /* not implemented */ -#define __NR_munmap 73 -#define __NR_mprotect 74 -#define __NR_madvise 75 -#define __NR_vhangup 76 -#define __NR_osf_kmodcall 77 /* not implemented */ -#define __NR_osf_mincore 78 /* not implemented */ -#define __NR_getgroups 79 -#define __NR_setgroups 80 -#define __NR_osf_old_getpgrp 81 /* not implemented */ -#define __NR_setpgrp 82 /* BSD alias for setpgid */ -#define __NR_osf_setitimer 83 -#define __NR_osf_old_wait 84 /* not implemented */ -#define __NR_osf_table 85 /* not implemented */ -#define __NR_osf_getitimer 86 -#define __NR_gethostname 87 -#define __NR_sethostname 88 -#define __NR_getdtablesize 89 -#define __NR_dup2 90 -#define __NR_fstat 91 -#define __NR_fcntl 92 -#define __NR_osf_select 93 -#define __NR_poll 94 -#define __NR_fsync 95 -#define __NR_setpriority 96 -#define __NR_socket 97 -#define __NR_connect 98 -#define __NR_accept 99 -#define __NR_getpriority 100 -#define __NR_send 101 -#define __NR_recv 102 -#define __NR_sigreturn 103 -#define __NR_bind 104 -#define __NR_setsockopt 105 -#define __NR_listen 106 -#define __NR_osf_plock 107 /* not implemented */ -#define __NR_osf_old_sigvec 108 /* not implemented */ -#define __NR_osf_old_sigblock 109 /* not implemented */ -#define __NR_osf_old_sigsetmask 110 /* not implemented */ -#define __NR_sigsuspend 111 -#define __NR_osf_sigstack 112 -#define __NR_recvmsg 113 -#define __NR_sendmsg 114 -#define __NR_osf_old_vtrace 115 /* not implemented */ -#define __NR_osf_gettimeofday 116 -#define __NR_osf_getrusage 117 -#define __NR_getsockopt 118 - -#define __NR_readv 120 -#define __NR_writev 121 -#define __NR_osf_settimeofday 122 -#define __NR_fchown 123 -#define __NR_fchmod 124 -#define __NR_recvfrom 125 -#define __NR_setreuid 126 -#define __NR_setregid 127 -#define __NR_rename 128 -#define __NR_truncate 129 -#define __NR_ftruncate 130 -#define __NR_flock 131 -#define __NR_setgid 132 -#define __NR_sendto 133 -#define __NR_shutdown 134 -#define __NR_socketpair 135 -#define __NR_mkdir 136 -#define __NR_rmdir 137 -#define __NR_osf_utimes 138 -#define __NR_osf_old_sigreturn 139 /* not implemented */ -#define __NR_osf_adjtime 140 /* not implemented */ -#define __NR_getpeername 141 -#define __NR_osf_gethostid 142 /* not implemented */ -#define __NR_osf_sethostid 143 /* not implemented */ -#define __NR_getrlimit 144 -#define __NR_setrlimit 145 -#define __NR_osf_old_killpg 146 /* not implemented */ -#define __NR_setsid 147 -#define __NR_quotactl 148 -#define __NR_osf_oldquota 149 /* not implemented */ -#define __NR_getsockname 150 - -#define __NR_osf_pid_block 153 /* not implemented */ -#define __NR_osf_pid_unblock 154 /* not implemented */ - -#define __NR_sigaction 156 -#define __NR_osf_sigwaitprim 157 /* not implemented */ -#define __NR_osf_nfssvc 158 /* not implemented */ -#define __NR_osf_getdirentries 159 -#define __NR_osf_statfs 160 -#define __NR_osf_fstatfs 161 - -#define __NR_osf_asynch_daemon 163 /* not implemented */ -#define __NR_osf_getfh 164 /* not implemented */ -#define __NR_osf_getdomainname 165 -#define __NR_setdomainname 166 - -#define __NR_osf_exportfs 169 /* not implemented */ - -#define __NR_osf_alt_plock 181 /* not implemented */ - -#define __NR_osf_getmnt 184 /* not implemented */ - -#define __NR_osf_alt_sigpending 187 /* not implemented */ -#define __NR_osf_alt_setsid 188 /* not implemented */ - -#define __NR_osf_swapon 199 -#define __NR_msgctl 200 -#define __NR_msgget 201 -#define __NR_msgrcv 202 -#define __NR_msgsnd 203 -#define __NR_semctl 204 -#define __NR_semget 205 -#define __NR_semop 206 -#define __NR_osf_utsname 207 -#define __NR_lchown 208 -#define __NR_osf_shmat 209 -#define __NR_shmctl 210 -#define __NR_shmdt 211 -#define __NR_shmget 212 -#define __NR_osf_mvalid 213 /* not implemented */ -#define __NR_osf_getaddressconf 214 /* not implemented */ -#define __NR_osf_msleep 215 /* not implemented */ -#define __NR_osf_mwakeup 216 /* not implemented */ -#define __NR_msync 217 -#define __NR_osf_signal 218 /* not implemented */ -#define __NR_osf_utc_gettime 219 /* not implemented */ -#define __NR_osf_utc_adjtime 220 /* not implemented */ - -#define __NR_osf_security 222 /* not implemented */ -#define __NR_osf_kloadcall 223 /* not implemented */ - -#define __NR_getpgid 233 -#define __NR_getsid 234 -#define __NR_sigaltstack 235 -#define __NR_osf_waitid 236 /* not implemented */ -#define __NR_osf_priocntlset 237 /* not implemented */ -#define __NR_osf_sigsendset 238 /* not implemented */ -#define __NR_osf_set_speculative 239 /* not implemented */ -#define __NR_osf_msfs_syscall 240 /* not implemented */ -#define __NR_osf_sysinfo 241 -#define __NR_osf_uadmin 242 /* not implemented */ -#define __NR_osf_fuser 243 /* not implemented */ -#define __NR_osf_proplist_syscall 244 -#define __NR_osf_ntp_adjtime 245 /* not implemented */ -#define __NR_osf_ntp_gettime 246 /* not implemented */ -#define __NR_osf_pathconf 247 /* not implemented */ -#define __NR_osf_fpathconf 248 /* not implemented */ - -#define __NR_osf_uswitch 250 /* not implemented */ -#define __NR_osf_usleep_thread 251 -#define __NR_osf_audcntl 252 /* not implemented */ -#define __NR_osf_audgen 253 /* not implemented */ -#define __NR_sysfs 254 -#define __NR_osf_subsys_info 255 /* not implemented */ -#define __NR_osf_getsysinfo 256 -#define __NR_osf_setsysinfo 257 -#define __NR_osf_afs_syscall 258 /* not implemented */ -#define __NR_osf_swapctl 259 /* not implemented */ -#define __NR_osf_memcntl 260 /* not implemented */ -#define __NR_osf_fdatasync 261 /* not implemented */ - -/* - * Ignore legacy syscalls that we don't use. - */ -#define __IGNORE_alarm -#define __IGNORE_creat -#define __IGNORE_getegid -#define __IGNORE_geteuid -#define __IGNORE_getgid -#define __IGNORE_getpid -#define __IGNORE_getppid -#define __IGNORE_getuid -#define __IGNORE_pause -#define __IGNORE_time -#define __IGNORE_utime - -/* - * Linux-specific system calls begin at 300 - */ -#define __NR_bdflush 300 -#define __NR_sethae 301 -#define __NR_mount 302 -#define __NR_old_adjtimex 303 -#define __NR_swapoff 304 -#define __NR_getdents 305 -#define __NR_create_module 306 -#define __NR_init_module 307 -#define __NR_delete_module 308 -#define __NR_get_kernel_syms 309 -#define __NR_syslog 310 -#define __NR_reboot 311 -#define __NR_clone 312 -#define __NR_uselib 313 -#define __NR_mlock 314 -#define __NR_munlock 315 -#define __NR_mlockall 316 -#define __NR_munlockall 317 -#define __NR_sysinfo 318 -#define __NR__sysctl 319 -/* 320 was sys_idle. */ -#define __NR_oldumount 321 -#define __NR_swapon 322 -#define __NR_times 323 -#define __NR_personality 324 -#define __NR_setfsuid 325 -#define __NR_setfsgid 326 -#define __NR_ustat 327 -#define __NR_statfs 328 -#define __NR_fstatfs 329 -#define __NR_sched_setparam 330 -#define __NR_sched_getparam 331 -#define __NR_sched_setscheduler 332 -#define __NR_sched_getscheduler 333 -#define __NR_sched_yield 334 -#define __NR_sched_get_priority_max 335 -#define __NR_sched_get_priority_min 336 -#define __NR_sched_rr_get_interval 337 -#define __NR_afs_syscall 338 -#define __NR_uname 339 -#define __NR_nanosleep 340 -#define __NR_mremap 341 -#define __NR_nfsservctl 342 -#define __NR_setresuid 343 -#define __NR_getresuid 344 -#define __NR_pciconfig_read 345 -#define __NR_pciconfig_write 346 -#define __NR_query_module 347 -#define __NR_prctl 348 -#define __NR_pread64 349 -#define __NR_pwrite64 350 -#define __NR_rt_sigreturn 351 -#define __NR_rt_sigaction 352 -#define __NR_rt_sigprocmask 353 -#define __NR_rt_sigpending 354 -#define __NR_rt_sigtimedwait 355 -#define __NR_rt_sigqueueinfo 356 -#define __NR_rt_sigsuspend 357 -#define __NR_select 358 -#define __NR_gettimeofday 359 -#define __NR_settimeofday 360 -#define __NR_getitimer 361 -#define __NR_setitimer 362 -#define __NR_utimes 363 -#define __NR_getrusage 364 -#define __NR_wait4 365 -#define __NR_adjtimex 366 -#define __NR_getcwd 367 -#define __NR_capget 368 -#define __NR_capset 369 -#define __NR_sendfile 370 -#define __NR_setresgid 371 -#define __NR_getresgid 372 -#define __NR_dipc 373 -#define __NR_pivot_root 374 -#define __NR_mincore 375 -#define __NR_pciconfig_iobase 376 -#define __NR_getdents64 377 -#define __NR_gettid 378 -#define __NR_readahead 379 -/* 380 is unused */ -#define __NR_tkill 381 -#define __NR_setxattr 382 -#define __NR_lsetxattr 383 -#define __NR_fsetxattr 384 -#define __NR_getxattr 385 -#define __NR_lgetxattr 386 -#define __NR_fgetxattr 387 -#define __NR_listxattr 388 -#define __NR_llistxattr 389 -#define __NR_flistxattr 390 -#define __NR_removexattr 391 -#define __NR_lremovexattr 392 -#define __NR_fremovexattr 393 -#define __NR_futex 394 -#define __NR_sched_setaffinity 395 -#define __NR_sched_getaffinity 396 -#define __NR_tuxcall 397 -#define __NR_io_setup 398 -#define __NR_io_destroy 399 -#define __NR_io_getevents 400 -#define __NR_io_submit 401 -#define __NR_io_cancel 402 -#define __NR_exit_group 405 -#define __NR_lookup_dcookie 406 -#define __NR_epoll_create 407 -#define __NR_epoll_ctl 408 -#define __NR_epoll_wait 409 -/* Feb 2007: These three sys_epoll defines shouldn't be here but culling - * them would break userspace apps ... we'll kill them off in 2010 :) */ -#define __NR_sys_epoll_create __NR_epoll_create -#define __NR_sys_epoll_ctl __NR_epoll_ctl -#define __NR_sys_epoll_wait __NR_epoll_wait -#define __NR_remap_file_pages 410 -#define __NR_set_tid_address 411 -#define __NR_restart_syscall 412 -#define __NR_fadvise64 413 -#define __NR_timer_create 414 -#define __NR_timer_settime 415 -#define __NR_timer_gettime 416 -#define __NR_timer_getoverrun 417 -#define __NR_timer_delete 418 -#define __NR_clock_settime 419 -#define __NR_clock_gettime 420 -#define __NR_clock_getres 421 -#define __NR_clock_nanosleep 422 -#define __NR_semtimedop 423 -#define __NR_tgkill 424 -#define __NR_stat64 425 -#define __NR_lstat64 426 -#define __NR_fstat64 427 -#define __NR_vserver 428 -#define __NR_mbind 429 -#define __NR_get_mempolicy 430 -#define __NR_set_mempolicy 431 -#define __NR_mq_open 432 -#define __NR_mq_unlink 433 -#define __NR_mq_timedsend 434 -#define __NR_mq_timedreceive 435 -#define __NR_mq_notify 436 -#define __NR_mq_getsetattr 437 -#define __NR_waitid 438 -#define __NR_add_key 439 -#define __NR_request_key 440 -#define __NR_keyctl 441 -#define __NR_ioprio_set 442 -#define __NR_ioprio_get 443 -#define __NR_inotify_init 444 -#define __NR_inotify_add_watch 445 -#define __NR_inotify_rm_watch 446 -#define __NR_fdatasync 447 -#define __NR_kexec_load 448 -#define __NR_migrate_pages 449 -#define __NR_openat 450 -#define __NR_mkdirat 451 -#define __NR_mknodat 452 -#define __NR_fchownat 453 -#define __NR_futimesat 454 -#define __NR_fstatat64 455 -#define __NR_unlinkat 456 -#define __NR_renameat 457 -#define __NR_linkat 458 -#define __NR_symlinkat 459 -#define __NR_readlinkat 460 -#define __NR_fchmodat 461 -#define __NR_faccessat 462 -#define __NR_pselect6 463 -#define __NR_ppoll 464 -#define __NR_unshare 465 -#define __NR_set_robust_list 466 -#define __NR_get_robust_list 467 -#define __NR_splice 468 -#define __NR_sync_file_range 469 -#define __NR_tee 470 -#define __NR_vmsplice 471 -#define __NR_move_pages 472 -#define __NR_getcpu 473 -#define __NR_epoll_pwait 474 -#define __NR_utimensat 475 -#define __NR_signalfd 476 -#define __NR_timerfd 477 -#define __NR_eventfd 478 - -#ifdef __KERNEL__ - -#define NR_SYSCALLS 479 - -#define __ARCH_WANT_IPC_PARSE_VERSION -#define __ARCH_WANT_OLD_READDIR -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SYS_GETHOSTNAME -#define __ARCH_WANT_SYS_FADVISE64 -#define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_OLD_GETRLIMIT -#define __ARCH_WANT_SYS_OLDUMOUNT -#define __ARCH_WANT_SYS_SIGPENDING - -/* "Conditional" syscalls. What we want is - - __attribute__((weak,alias("sys_ni_syscall"))) - - but that raises the problem of what type to give the symbol. If we use - a prototype, it'll conflict with the definition given in this file and - others. If we use __typeof, we discover that not all symbols actually - have declarations. If we use no prototype, then we get warnings from - -Wstrict-prototypes. Ho hum. */ - -#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall") - -#endif /* __KERNEL__ */ -#endif /* _ALPHA_UNISTD_H */ diff --git a/include/asm-alpha/user.h b/include/asm-alpha/user.h deleted file mode 100644 index a4eb6a4ca8d..00000000000 --- a/include/asm-alpha/user.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef _ALPHA_USER_H -#define _ALPHA_USER_H - -#include -#include - -#include -#include - -/* - * Core file format: The core file is written in such a way that gdb - * can understand it and provide useful information to the user (under - * linux we use the `trad-core' bfd, NOT the osf-core). The file contents - * are as follows: - * - * upage: 1 page consisting of a user struct that tells gdb - * what is present in the file. Directly after this is a - * copy of the task_struct, which is currently not used by gdb, - * but it may come in handy at some point. All of the registers - * are stored as part of the upage. The upage should always be - * only one page long. - * data: The data segment follows next. We use current->end_text to - * current->brk to pick up all of the user variables, plus any memory - * that may have been sbrk'ed. No attempt is made to determine if a - * page is demand-zero or if a page is totally unused, we just cover - * the entire range. All of the addresses are rounded in such a way - * that an integral number of pages is written. - * stack: We need the stack information in order to get a meaningful - * backtrace. We need to write the data from usp to - * current->start_stack, so we round each of these in order to be able - * to write an integer number of pages. - */ -struct user { - unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */ - size_t u_tsize; /* text size (pages) */ - size_t u_dsize; /* data size (pages) */ - size_t u_ssize; /* stack size (pages) */ - unsigned long start_code; /* text starting address */ - unsigned long start_data; /* data starting address */ - unsigned long start_stack; /* stack starting address */ - long int signal; /* signal causing core dump */ - unsigned long u_ar0; /* help gdb find registers */ - unsigned long magic; /* identifies a core file */ - char u_comm[32]; /* user command name */ -}; - -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_DATA_START_ADDR (u.start_data) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - -#endif /* _ALPHA_USER_H */ diff --git a/include/asm-alpha/vga.h b/include/asm-alpha/vga.h deleted file mode 100644 index c00106bac52..00000000000 --- a/include/asm-alpha/vga.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Access to VGA videoram - * - * (c) 1998 Martin Mares - */ - -#ifndef _LINUX_ASM_VGA_H_ -#define _LINUX_ASM_VGA_H_ - -#include - -#define VT_BUF_HAVE_RW -#define VT_BUF_HAVE_MEMSETW -#define VT_BUF_HAVE_MEMCPYW - -static inline void scr_writew(u16 val, volatile u16 *addr) -{ - if (__is_ioaddr(addr)) - __raw_writew(val, (volatile u16 __iomem *) addr); - else - *addr = val; -} - -static inline u16 scr_readw(volatile const u16 *addr) -{ - if (__is_ioaddr(addr)) - return __raw_readw((volatile const u16 __iomem *) addr); - else - return *addr; -} - -static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) -{ - if (__is_ioaddr(s)) - memsetw_io((u16 __iomem *) s, c, count); - else - memsetw(s, c, count); -} - -/* Do not trust that the usage will be correct; analyze the arguments. */ -extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count); - -/* ??? These are currently only used for downloading character sets. As - such, they don't need memory barriers. Is this all they are intended - to be used for? */ -#define vga_readb(a) readb((u8 __iomem *)(a)) -#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a)) - -#ifdef CONFIG_VGA_HOSE -#include -#include - -extern struct pci_controller *pci_vga_hose; - -# define __is_port_vga(a) \ - (((a) >= 0x3b0) && ((a) < 0x3e0) && \ - ((a) != 0x3b3) && ((a) != 0x3d3)) - -# define __is_mem_vga(a) \ - (((a) >= 0xa0000) && ((a) <= 0xc0000)) - -# define FIXUP_IOADDR_VGA(a) do { \ - if (pci_vga_hose && __is_port_vga(a)) \ - (a) += pci_vga_hose->io_space->start; \ - } while(0) - -# define FIXUP_MEMADDR_VGA(a) do { \ - if (pci_vga_hose && __is_mem_vga(a)) \ - (a) += pci_vga_hose->mem_space->start; \ - } while(0) - -#else /* CONFIG_VGA_HOSE */ -# define pci_vga_hose 0 -# define __is_port_vga(a) 0 -# define __is_mem_vga(a) 0 -# define FIXUP_IOADDR_VGA(a) -# define FIXUP_MEMADDR_VGA(a) -#endif /* CONFIG_VGA_HOSE */ - -#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(x, s)) - -#endif diff --git a/include/asm-alpha/xor.h b/include/asm-alpha/xor.h deleted file mode 100644 index 5ee1c2bc049..00000000000 --- a/include/asm-alpha/xor.h +++ /dev/null @@ -1,855 +0,0 @@ -/* - * include/asm-alpha/xor.h - * - * Optimized RAID-5 checksumming functions for alpha EV5 and EV6 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * You should have received a copy of the GNU General Public License - * (for example /usr/src/linux/COPYING); if not, write to the Free - * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -extern void xor_alpha_2(unsigned long, unsigned long *, unsigned long *); -extern void xor_alpha_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -extern void xor_alpha_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_alpha_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); - -extern void xor_alpha_prefetch_2(unsigned long, unsigned long *, - unsigned long *); -extern void xor_alpha_prefetch_3(unsigned long, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_alpha_prefetch_4(unsigned long, unsigned long *, - unsigned long *, unsigned long *, - unsigned long *); -extern void xor_alpha_prefetch_5(unsigned long, unsigned long *, - unsigned long *, unsigned long *, - unsigned long *, unsigned long *); - -asm(" \n\ - .text \n\ - .align 3 \n\ - .ent xor_alpha_2 \n\ -xor_alpha_2: \n\ - .prologue 0 \n\ - srl $16, 6, $16 \n\ - .align 4 \n\ -2: \n\ - ldq $0,0($17) \n\ - ldq $1,0($18) \n\ - ldq $2,8($17) \n\ - ldq $3,8($18) \n\ - \n\ - ldq $4,16($17) \n\ - ldq $5,16($18) \n\ - ldq $6,24($17) \n\ - ldq $7,24($18) \n\ - \n\ - ldq $19,32($17) \n\ - ldq $20,32($18) \n\ - ldq $21,40($17) \n\ - ldq $22,40($18) \n\ - \n\ - ldq $23,48($17) \n\ - ldq $24,48($18) \n\ - ldq $25,56($17) \n\ - xor $0,$1,$0 # 7 cycles from $1 load \n\ - \n\ - ldq $27,56($18) \n\ - xor $2,$3,$2 \n\ - stq $0,0($17) \n\ - xor $4,$5,$4 \n\ - \n\ - stq $2,8($17) \n\ - xor $6,$7,$6 \n\ - stq $4,16($17) \n\ - xor $19,$20,$19 \n\ - \n\ - stq $6,24($17) \n\ - xor $21,$22,$21 \n\ - stq $19,32($17) \n\ - xor $23,$24,$23 \n\ - \n\ - stq $21,40($17) \n\ - xor $25,$27,$25 \n\ - stq $23,48($17) \n\ - subq $16,1,$16 \n\ - \n\ - stq $25,56($17) \n\ - addq $17,64,$17 \n\ - addq $18,64,$18 \n\ - bgt $16,2b \n\ - \n\ - ret \n\ - .end xor_alpha_2 \n\ - \n\ - .align 3 \n\ - .ent xor_alpha_3 \n\ -xor_alpha_3: \n\ - .prologue 0 \n\ - srl $16, 6, $16 \n\ - .align 4 \n\ -3: \n\ - ldq $0,0($17) \n\ - ldq $1,0($18) \n\ - ldq $2,0($19) \n\ - ldq $3,8($17) \n\ - \n\ - ldq $4,8($18) \n\ - ldq $6,16($17) \n\ - ldq $7,16($18) \n\ - ldq $21,24($17) \n\ - \n\ - ldq $22,24($18) \n\ - ldq $24,32($17) \n\ - ldq $25,32($18) \n\ - ldq $5,8($19) \n\ - \n\ - ldq $20,16($19) \n\ - ldq $23,24($19) \n\ - ldq $27,32($19) \n\ - nop \n\ - \n\ - xor $0,$1,$1 # 8 cycles from $0 load \n\ - xor $3,$4,$4 # 6 cycles from $4 load \n\ - xor $6,$7,$7 # 6 cycles from $7 load \n\ - xor $21,$22,$22 # 5 cycles from $22 load \n\ - \n\ - xor $1,$2,$2 # 9 cycles from $2 load \n\ - xor $24,$25,$25 # 5 cycles from $25 load \n\ - stq $2,0($17) \n\ - xor $4,$5,$5 # 6 cycles from $5 load \n\ - \n\ - stq $5,8($17) \n\ - xor $7,$20,$20 # 7 cycles from $20 load \n\ - stq $20,16($17) \n\ - xor $22,$23,$23 # 7 cycles from $23 load \n\ - \n\ - stq $23,24($17) \n\ - xor $25,$27,$27 # 7 cycles from $27 load \n\ - stq $27,32($17) \n\ - nop \n\ - \n\ - ldq $0,40($17) \n\ - ldq $1,40($18) \n\ - ldq $3,48($17) \n\ - ldq $4,48($18) \n\ - \n\ - ldq $6,56($17) \n\ - ldq $7,56($18) \n\ - ldq $2,40($19) \n\ - ldq $5,48($19) \n\ - \n\ - ldq $20,56($19) \n\ - xor $0,$1,$1 # 4 cycles from $1 load \n\ - xor $3,$4,$4 # 5 cycles from $4 load \n\ - xor $6,$7,$7 # 5 cycles from $7 load \n\ - \n\ - xor $1,$2,$2 # 4 cycles from $2 load \n\ - xor $4,$5,$5 # 5 cycles from $5 load \n\ - stq $2,40($17) \n\ - xor $7,$20,$20 # 4 cycles from $20 load \n\ - \n\ - stq $5,48($17) \n\ - subq $16,1,$16 \n\ - stq $20,56($17) \n\ - addq $19,64,$19 \n\ - \n\ - addq $18,64,$18 \n\ - addq $17,64,$17 \n\ - bgt $16,3b \n\ - ret \n\ - .end xor_alpha_3 \n\ - \n\ - .align 3 \n\ - .ent xor_alpha_4 \n\ -xor_alpha_4: \n\ - .prologue 0 \n\ - srl $16, 6, $16 \n\ - .align 4 \n\ -4: \n\ - ldq $0,0($17) \n\ - ldq $1,0($18) \n\ - ldq $2,0($19) \n\ - ldq $3,0($20) \n\ - \n\ - ldq $4,8($17) \n\ - ldq $5,8($18) \n\ - ldq $6,8($19) \n\ - ldq $7,8($20) \n\ - \n\ - ldq $21,16($17) \n\ - ldq $22,16($18) \n\ - ldq $23,16($19) \n\ - ldq $24,16($20) \n\ - \n\ - ldq $25,24($17) \n\ - xor $0,$1,$1 # 6 cycles from $1 load \n\ - ldq $27,24($18) \n\ - xor $2,$3,$3 # 6 cycles from $3 load \n\ - \n\ - ldq $0,24($19) \n\ - xor $1,$3,$3 \n\ - ldq $1,24($20) \n\ - xor $4,$5,$5 # 7 cycles from $5 load \n\ - \n\ - stq $3,0($17) \n\ - xor $6,$7,$7 \n\ - xor $21,$22,$22 # 7 cycles from $22 load \n\ - xor $5,$7,$7 \n\ - \n\ - stq $7,8($17) \n\ - xor $23,$24,$24 # 7 cycles from $24 load \n\ - ldq $2,32($17) \n\ - xor $22,$24,$24 \n\ - \n\ - ldq $3,32($18) \n\ - ldq $4,32($19) \n\ - ldq $5,32($20) \n\ - xor $25,$27,$27 # 8 cycles from $27 load \n\ - \n\ - ldq $6,40($17) \n\ - ldq $7,40($18) \n\ - ldq $21,40($19) \n\ - ldq $22,40($20) \n\ - \n\ - stq $24,16($17) \n\ - xor $0,$1,$1 # 9 cycles from $1 load \n\ - xor $2,$3,$3 # 5 cycles from $3 load \n\ - xor $27,$1,$1 \n\ - \n\ - stq $1,24($17) \n\ - xor $4,$5,$5 # 5 cycles from $5 load \n\ - ldq $23,48($17) \n\ - ldq $24,48($18) \n\ - \n\ - ldq $25,48($19) \n\ - xor $3,$5,$5 \n\ - ldq $27,48($20) \n\ - ldq $0,56($17) \n\ - \n\ - ldq $1,56($18) \n\ - ldq $2,56($19) \n\ - xor $6,$7,$7 # 8 cycles from $6 load \n\ - ldq $3,56($20) \n\ - \n\ - stq $5,32($17) \n\ - xor $21,$22,$22 # 8 cycles from $22 load \n\ - xor $7,$22,$22 \n\ - xor $23,$24,$24 # 5 cycles from $24 load \n\ - \n\ - stq $22,40($17) \n\ - xor $25,$27,$27 # 5 cycles from $27 load \n\ - xor $24,$27,$27 \n\ - xor $0,$1,$1 # 5 cycles from $1 load \n\ - \n\ - stq $27,48($17) \n\ - xor $2,$3,$3 # 4 cycles from $3 load \n\ - xor $1,$3,$3 \n\ - subq $16,1,$16 \n\ - \n\ - stq $3,56($17) \n\ - addq $20,64,$20 \n\ - addq $19,64,$19 \n\ - addq $18,64,$18 \n\ - \n\ - addq $17,64,$17 \n\ - bgt $16,4b \n\ - ret \n\ - .end xor_alpha_4 \n\ - \n\ - .align 3 \n\ - .ent xor_alpha_5 \n\ -xor_alpha_5: \n\ - .prologue 0 \n\ - srl $16, 6, $16 \n\ - .align 4 \n\ -5: \n\ - ldq $0,0($17) \n\ - ldq $1,0($18) \n\ - ldq $2,0($19) \n\ - ldq $3,0($20) \n\ - \n\ - ldq $4,0($21) \n\ - ldq $5,8($17) \n\ - ldq $6,8($18) \n\ - ldq $7,8($19) \n\ - \n\ - ldq $22,8($20) \n\ - ldq $23,8($21) \n\ - ldq $24,16($17) \n\ - ldq $25,16($18) \n\ - \n\ - ldq $27,16($19) \n\ - xor $0,$1,$1 # 6 cycles from $1 load \n\ - ldq $28,16($20) \n\ - xor $2,$3,$3 # 6 cycles from $3 load \n\ - \n\ - ldq $0,16($21) \n\ - xor $1,$3,$3 \n\ - ldq $1,24($17) \n\ - xor $3,$4,$4 # 7 cycles from $4 load \n\ - \n\ - stq $4,0($17) \n\ - xor $5,$6,$6 # 7 cycles from $6 load \n\ - xor $7,$22,$22 # 7 cycles from $22 load \n\ - xor $6,$23,$23 # 7 cycles from $23 load \n\ - \n\ - ldq $2,24($18) \n\ - xor $22,$23,$23 \n\ - ldq $3,24($19) \n\ - xor $24,$25,$25 # 8 cycles from $25 load \n\ - \n\ - stq $23,8($17) \n\ - xor $25,$27,$27 # 8 cycles from $27 load \n\ - ldq $4,24($20) \n\ - xor $28,$0,$0 # 7 cycles from $0 load \n\ - \n\ - ldq $5,24($21) \n\ - xor $27,$0,$0 \n\ - ldq $6,32($17) \n\ - ldq $7,32($18) \n\ - \n\ - stq $0,16($17) \n\ - xor $1,$2,$2 # 6 cycles from $2 load \n\ - ldq $22,32($19) \n\ - xor $3,$4,$4 # 4 cycles from $4 load \n\ - \n\ - ldq $23,32($20) \n\ - xor $2,$4,$4 \n\ - ldq $24,32($21) \n\ - ldq $25,40($17) \n\ - \n\ - ldq $27,40($18) \n\ - ldq $28,40($19) \n\ - ldq $0,40($20) \n\ - xor $4,$5,$5 # 7 cycles from $5 load \n\ - \n\ - stq $5,24($17) \n\ - xor $6,$7,$7 # 7 cycles from $7 load \n\ - ldq $1,40($21) \n\ - ldq $2,48($17) \n\ - \n\ - ldq $3,48($18) \n\ - xor $7,$22,$22 # 7 cycles from $22 load \n\ - ldq $4,48($19) \n\ - xor $23,$24,$24 # 6 cycles from $24 load \n\ - \n\ - ldq $5,48($20) \n\ - xor $22,$24,$24 \n\ - ldq $6,48($21) \n\ - xor $25,$27,$27 # 7 cycles from $27 load \n\ - \n\ - stq $24,32($17) \n\ - xor $27,$28,$28 # 8 cycles from $28 load \n\ - ldq $7,56($17) \n\ - xor $0,$1,$1 # 6 cycles from $1 load \n\ - \n\ - ldq $22,56($18) \n\ - ldq $23,56($19) \n\ - ldq $24,56($20) \n\ - ldq $25,56($21) \n\ - \n\ - xor $28,$1,$1 \n\ - xor $2,$3,$3 # 9 cycles from $3 load \n\ - xor $3,$4,$4 # 9 cycles from $4 load \n\ - xor $5,$6,$6 # 8 cycles from $6 load \n\ - \n\ - stq $1,40($17) \n\ - xor $4,$6,$6 \n\ - xor $7,$22,$22 # 7 cycles from $22 load \n\ - xor $23,$24,$24 # 6 cycles from $24 load \n\ - \n\ - stq $6,48($17) \n\ - xor $22,$24,$24 \n\ - subq $16,1,$16 \n\ - xor $24,$25,$25 # 8 cycles from $25 load \n\ - \n\ - stq $25,56($17) \n\ - addq $21,64,$21 \n\ - addq $20,64,$20 \n\ - addq $19,64,$19 \n\ - \n\ - addq $18,64,$18 \n\ - addq $17,64,$17 \n\ - bgt $16,5b \n\ - ret \n\ - .end xor_alpha_5 \n\ - \n\ - .align 3 \n\ - .ent xor_alpha_prefetch_2 \n\ -xor_alpha_prefetch_2: \n\ - .prologue 0 \n\ - srl $16, 6, $16 \n\ - \n\ - ldq $31, 0($17) \n\ - ldq $31, 0($18) \n\ - \n\ - ldq $31, 64($17) \n\ - ldq $31, 64($18) \n\ - \n\ - ldq $31, 128($17) \n\ - ldq $31, 128($18) \n\ - \n\ - ldq $31, 192($17) \n\ - ldq $31, 192($18) \n\ - .align 4 \n\ -2: \n\ - ldq $0,0($17) \n\ - ldq $1,0($18) \n\ - ldq $2,8($17) \n\ - ldq $3,8($18) \n\ - \n\ - ldq $4,16($17) \n\ - ldq $5,16($18) \n\ - ldq $6,24($17) \n\ - ldq $7,24($18) \n\ - \n\ - ldq $19,32($17) \n\ - ldq $20,32($18) \n\ - ldq $21,40($17) \n\ - ldq $22,40($18) \n\ - \n\ - ldq $23,48($17) \n\ - ldq $24,48($18) \n\ - ldq $25,56($17) \n\ - ldq $27,56($18) \n\ - \n\ - ldq $31,256($17) \n\ - xor $0,$1,$0 # 8 cycles from $1 load \n\ - ldq $31,256($18) \n\ - xor $2,$3,$2 \n\ - \n\ - stq $0,0($17) \n\ - xor $4,$5,$4 \n\ - stq $2,8($17) \n\ - xor $6,$7,$6 \n\ - \n\ - stq $4,16($17) \n\ - xor $19,$20,$19 \n\ - stq $6,24($17) \n\ - xor $21,$22,$21 \n\ - \n\ - stq $19,32($17) \n\ - xor $23,$24,$23 \n\ - stq $21,40($17) \n\ - xor $25,$27,$25 \n\ - \n\ - stq $23,48($17) \n\ - subq $16,1,$16 \n\ - stq $25,56($17) \n\ - addq $17,64,$17 \n\ - \n\ - addq $18,64,$18 \n\ - bgt $16,2b \n\ - ret \n\ - .end xor_alpha_prefetch_2 \n\ - \n\ - .align 3 \n\ - .ent xor_alpha_prefetch_3 \n\ -xor_alpha_prefetch_3: \n\ - .prologue 0 \n\ - srl $16, 6, $16 \n\ - \n\ - ldq $31, 0($17) \n\ - ldq $31, 0($18) \n\ - ldq $31, 0($19) \n\ - \n\ - ldq $31, 64($17) \n\ - ldq $31, 64($18) \n\ - ldq $31, 64($19) \n\ - \n\ - ldq $31, 128($17) \n\ - ldq $31, 128($18) \n\ - ldq $31, 128($19) \n\ - \n\ - ldq $31, 192($17) \n\ - ldq $31, 192($18) \n\ - ldq $31, 192($19) \n\ - .align 4 \n\ -3: \n\ - ldq $0,0($17) \n\ - ldq $1,0($18) \n\ - ldq $2,0($19) \n\ - ldq $3,8($17) \n\ - \n\ - ldq $4,8($18) \n\ - ldq $6,16($17) \n\ - ldq $7,16($18) \n\ - ldq $21,24($17) \n\ - \n\ - ldq $22,24($18) \n\ - ldq $24,32($17) \n\ - ldq $25,32($18) \n\ - ldq $5,8($19) \n\ - \n\ - ldq $20,16($19) \n\ - ldq $23,24($19) \n\ - ldq $27,32($19) \n\ - nop \n\ - \n\ - xor $0,$1,$1 # 8 cycles from $0 load \n\ - xor $3,$4,$4 # 7 cycles from $4 load \n\ - xor $6,$7,$7 # 6 cycles from $7 load \n\ - xor $21,$22,$22 # 5 cycles from $22 load \n\ - \n\ - xor $1,$2,$2 # 9 cycles from $2 load \n\ - xor $24,$25,$25 # 5 cycles from $25 load \n\ - stq $2,0($17) \n\ - xor $4,$5,$5 # 6 cycles from $5 load \n\ - \n\ - stq $5,8($17) \n\ - xor $7,$20,$20 # 7 cycles from $20 load \n\ - stq $20,16($17) \n\ - xor $22,$23,$23 # 7 cycles from $23 load \n\ - \n\ - stq $23,24($17) \n\ - xor $25,$27,$27 # 7 cycles from $27 load \n\ - stq $27,32($17) \n\ - nop \n\ - \n\ - ldq $0,40($17) \n\ - ldq $1,40($18) \n\ - ldq $3,48($17) \n\ - ldq $4,48($18) \n\ - \n\ - ldq $6,56($17) \n\ - ldq $7,56($18) \n\ - ldq $2,40($19) \n\ - ldq $5,48($19) \n\ - \n\ - ldq $20,56($19) \n\ - ldq $31,256($17) \n\ - ldq $31,256($18) \n\ - ldq $31,256($19) \n\ - \n\ - xor $0,$1,$1 # 6 cycles from $1 load \n\ - xor $3,$4,$4 # 5 cycles from $4 load \n\ - xor $6,$7,$7 # 5 cycles from $7 load \n\ - xor $1,$2,$2 # 4 cycles from $2 load \n\ - \n\ - xor $4,$5,$5 # 5 cycles from $5 load \n\ - xor $7,$20,$20 # 4 cycles from $20 load \n\ - stq $2,40($17) \n\ - subq $16,1,$16 \n\ - \n\ - stq $5,48($17) \n\ - addq $19,64,$19 \n\ - stq $20,56($17) \n\ - addq $18,64,$18 \n\ - \n\ - addq $17,64,$17 \n\ - bgt $16,3b \n\ - ret \n\ - .end xor_alpha_prefetch_3 \n\ - \n\ - .align 3 \n\ - .ent xor_alpha_prefetch_4 \n\ -xor_alpha_prefetch_4: \n\ - .prologue 0 \n\ - srl $16, 6, $16 \n\ - \n\ - ldq $31, 0($17) \n\ - ldq $31, 0($18) \n\ - ldq $31, 0($19) \n\ - ldq $31, 0($20) \n\ - \n\ - ldq $31, 64($17) \n\ - ldq $31, 64($18) \n\ - ldq $31, 64($19) \n\ - ldq $31, 64($20) \n\ - \n\ - ldq $31, 128($17) \n\ - ldq $31, 128($18) \n\ - ldq $31, 128($19) \n\ - ldq $31, 128($20) \n\ - \n\ - ldq $31, 192($17) \n\ - ldq $31, 192($18) \n\ - ldq $31, 192($19) \n\ - ldq $31, 192($20) \n\ - .align 4 \n\ -4: \n\ - ldq $0,0($17) \n\ - ldq $1,0($18) \n\ - ldq $2,0($19) \n\ - ldq $3,0($20) \n\ - \n\ - ldq $4,8($17) \n\ - ldq $5,8($18) \n\ - ldq $6,8($19) \n\ - ldq $7,8($20) \n\ - \n\ - ldq $21,16($17) \n\ - ldq $22,16($18) \n\ - ldq $23,16($19) \n\ - ldq $24,16($20) \n\ - \n\ - ldq $25,24($17) \n\ - xor $0,$1,$1 # 6 cycles from $1 load \n\ - ldq $27,24($18) \n\ - xor $2,$3,$3 # 6 cycles from $3 load \n\ - \n\ - ldq $0,24($19) \n\ - xor $1,$3,$3 \n\ - ldq $1,24($20) \n\ - xor $4,$5,$5 # 7 cycles from $5 load \n\ - \n\ - stq $3,0($17) \n\ - xor $6,$7,$7 \n\ - xor $21,$22,$22 # 7 cycles from $22 load \n\ - xor $5,$7,$7 \n\ - \n\ - stq $7,8($17) \n\ - xor $23,$24,$24 # 7 cycles from $24 load \n\ - ldq $2,32($17) \n\ - xor $22,$24,$24 \n\ - \n\ - ldq $3,32($18) \n\ - ldq $4,32($19) \n\ - ldq $5,32($20) \n\ - xor $25,$27,$27 # 8 cycles from $27 load \n\ - \n\ - ldq $6,40($17) \n\ - ldq $7,40($18) \n\ - ldq $21,40($19) \n\ - ldq $22,40($20) \n\ - \n\ - stq $24,16($17) \n\ - xor $0,$1,$1 # 9 cycles from $1 load \n\ - xor $2,$3,$3 # 5 cycles from $3 load \n\ - xor $27,$1,$1 \n\ - \n\ - stq $1,24($17) \n\ - xor $4,$5,$5 # 5 cycles from $5 load \n\ - ldq $23,48($17) \n\ - xor $3,$5,$5 \n\ - \n\ - ldq $24,48($18) \n\ - ldq $25,48($19) \n\ - ldq $27,48($20) \n\ - ldq $0,56($17) \n\ - \n\ - ldq $1,56($18) \n\ - ldq $2,56($19) \n\ - ldq $3,56($20) \n\ - xor $6,$7,$7 # 8 cycles from $6 load \n\ - \n\ - ldq $31,256($17) \n\ - xor $21,$22,$22 # 8 cycles from $22 load \n\ - ldq $31,256($18) \n\ - xor $7,$22,$22 \n\ - \n\ - ldq $31,256($19) \n\ - xor $23,$24,$24 # 6 cycles from $24 load \n\ - ldq $31,256($20) \n\ - xor $25,$27,$27 # 6 cycles from $27 load \n\ - \n\ - stq $5,32($17) \n\ - xor $24,$27,$27 \n\ - xor $0,$1,$1 # 7 cycles from $1 load \n\ - xor $2,$3,$3 # 6 cycles from $3 load \n\ - \n\ - stq $22,40($17) \n\ - xor $1,$3,$3 \n\ - stq $27,48($17) \n\ - subq $16,1,$16 \n\ - \n\ - stq $3,56($17) \n\ - addq $20,64,$20 \n\ - addq $19,64,$19 \n\ - addq $18,64,$18 \n\ - \n\ - addq $17,64,$17 \n\ - bgt $16,4b \n\ - ret \n\ - .end xor_alpha_prefetch_4 \n\ - \n\ - .align 3 \n\ - .ent xor_alpha_prefetch_5 \n\ -xor_alpha_prefetch_5: \n\ - .prologue 0 \n\ - srl $16, 6, $16 \n\ - \n\ - ldq $31, 0($17) \n\ - ldq $31, 0($18) \n\ - ldq $31, 0($19) \n\ - ldq $31, 0($20) \n\ - ldq $31, 0($21) \n\ - \n\ - ldq $31, 64($17) \n\ - ldq $31, 64($18) \n\ - ldq $31, 64($19) \n\ - ldq $31, 64($20) \n\ - ldq $31, 64($21) \n\ - \n\ - ldq $31, 128($17) \n\ - ldq $31, 128($18) \n\ - ldq $31, 128($19) \n\ - ldq $31, 128($20) \n\ - ldq $31, 128($21) \n\ - \n\ - ldq $31, 192($17) \n\ - ldq $31, 192($18) \n\ - ldq $31, 192($19) \n\ - ldq $31, 192($20) \n\ - ldq $31, 192($21) \n\ - .align 4 \n\ -5: \n\ - ldq $0,0($17) \n\ - ldq $1,0($18) \n\ - ldq $2,0($19) \n\ - ldq $3,0($20) \n\ - \n\ - ldq $4,0($21) \n\ - ldq $5,8($17) \n\ - ldq $6,8($18) \n\ - ldq $7,8($19) \n\ - \n\ - ldq $22,8($20) \n\ - ldq $23,8($21) \n\ - ldq $24,16($17) \n\ - ldq $25,16($18) \n\ - \n\ - ldq $27,16($19) \n\ - xor $0,$1,$1 # 6 cycles from $1 load \n\ - ldq $28,16($20) \n\ - xor $2,$3,$3 # 6 cycles from $3 load \n\ - \n\ - ldq $0,16($21) \n\ - xor $1,$3,$3 \n\ - ldq $1,24($17) \n\ - xor $3,$4,$4 # 7 cycles from $4 load \n\ - \n\ - stq $4,0($17) \n\ - xor $5,$6,$6 # 7 cycles from $6 load \n\ - xor $7,$22,$22 # 7 cycles from $22 load \n\ - xor $6,$23,$23 # 7 cycles from $23 load \n\ - \n\ - ldq $2,24($18) \n\ - xor $22,$23,$23 \n\ - ldq $3,24($19) \n\ - xor $24,$25,$25 # 8 cycles from $25 load \n\ - \n\ - stq $23,8($17) \n\ - xor $25,$27,$27 # 8 cycles from $27 load \n\ - ldq $4,24($20) \n\ - xor $28,$0,$0 # 7 cycles from $0 load \n\ - \n\ - ldq $5,24($21) \n\ - xor $27,$0,$0 \n\ - ldq $6,32($17) \n\ - ldq $7,32($18) \n\ - \n\ - stq $0,16($17) \n\ - xor $1,$2,$2 # 6 cycles from $2 load \n\ - ldq $22,32($19) \n\ - xor $3,$4,$4 # 4 cycles from $4 load \n\ - \n\ - ldq $23,32($20) \n\ - xor $2,$4,$4 \n\ - ldq $24,32($21) \n\ - ldq $25,40($17) \n\ - \n\ - ldq $27,40($18) \n\ - ldq $28,40($19) \n\ - ldq $0,40($20) \n\ - xor $4,$5,$5 # 7 cycles from $5 load \n\ - \n\ - stq $5,24($17) \n\ - xor $6,$7,$7 # 7 cycles from $7 load \n\ - ldq $1,40($21) \n\ - ldq $2,48($17) \n\ - \n\ - ldq $3,48($18) \n\ - xor $7,$22,$22 # 7 cycles from $22 load \n\ - ldq $4,48($19) \n\ - xor $23,$24,$24 # 6 cycles from $24 load \n\ - \n\ - ldq $5,48($20) \n\ - xor $22,$24,$24 \n\ - ldq $6,48($21) \n\ - xor $25,$27,$27 # 7 cycles from $27 load \n\ - \n\ - stq $24,32($17) \n\ - xor $27,$28,$28 # 8 cycles from $28 load \n\ - ldq $7,56($17) \n\ - xor $0,$1,$1 # 6 cycles from $1 load \n\ - \n\ - ldq $22,56($18) \n\ - ldq $23,56($19) \n\ - ldq $24,56($20) \n\ - ldq $25,56($21) \n\ - \n\ - ldq $31,256($17) \n\ - xor $28,$1,$1 \n\ - ldq $31,256($18) \n\ - xor $2,$3,$3 # 9 cycles from $3 load \n\ - \n\ - ldq $31,256($19) \n\ - xor $3,$4,$4 # 9 cycles from $4 load \n\ - ldq $31,256($20) \n\ - xor $5,$6,$6 # 8 cycles from $6 load \n\ - \n\ - stq $1,40($17) \n\ - xor $4,$6,$6 \n\ - xor $7,$22,$22 # 7 cycles from $22 load \n\ - xor $23,$24,$24 # 6 cycles from $24 load \n\ - \n\ - stq $6,48($17) \n\ - xor $22,$24,$24 \n\ - ldq $31,256($21) \n\ - xor $24,$25,$25 # 8 cycles from $25 load \n\ - \n\ - stq $25,56($17) \n\ - subq $16,1,$16 \n\ - addq $21,64,$21 \n\ - addq $20,64,$20 \n\ - \n\ - addq $19,64,$19 \n\ - addq $18,64,$18 \n\ - addq $17,64,$17 \n\ - bgt $16,5b \n\ - \n\ - ret \n\ - .end xor_alpha_prefetch_5 \n\ -"); - -static struct xor_block_template xor_block_alpha = { - .name = "alpha", - .do_2 = xor_alpha_2, - .do_3 = xor_alpha_3, - .do_4 = xor_alpha_4, - .do_5 = xor_alpha_5, -}; - -static struct xor_block_template xor_block_alpha_prefetch = { - .name = "alpha prefetch", - .do_2 = xor_alpha_prefetch_2, - .do_3 = xor_alpha_prefetch_3, - .do_4 = xor_alpha_prefetch_4, - .do_5 = xor_alpha_prefetch_5, -}; - -/* For grins, also test the generic routines. */ -#include - -#undef XOR_TRY_TEMPLATES -#define XOR_TRY_TEMPLATES \ - do { \ - xor_speed(&xor_block_8regs); \ - xor_speed(&xor_block_32regs); \ - xor_speed(&xor_block_alpha); \ - xor_speed(&xor_block_alpha_prefetch); \ - } while (0) - -/* Force the use of alpha_prefetch if EV6, as it is significantly - faster in the cold cache case. */ -#define XOR_SELECT_TEMPLATE(FASTEST) \ - (implver() == IMPLVER_EV6 ? &xor_block_alpha_prefetch : FASTEST) -- cgit v1.2.3-70-g09d2 From 2fdc86901d2ab30a12402b46238951d2a7891590 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 13 Aug 2008 18:02:18 +0200 Subject: x86: add MAP_STACK mmap flag as per this discussion: http://lkml.org/lkml/2008/8/12/423 Pardo reported that 64-bit threaded apps, if their stacks exceed the combined size of ~4GB, slow down drastically in pthread_create() - because glibc uses MAP_32BIT to allocate the stacks. The use of MAP_32BIT is a legacy hack - to speed up context switching on certain early model 64-bit P4 CPUs. So introduce a new flag to be used by glibc instead, to not constrain 64-bit apps like this. glibc can switch to this new flag straight away - it will be ignored by the kernel. If those old CPUs ever matter to anyone, support for it can be implemented. Signed-off-by: Ingo Molnar Acked-by: Ulrich Drepper --- include/asm-x86/mman.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h index c1682b542da..90bc4108a4f 100644 --- a/include/asm-x86/mman.h +++ b/include/asm-x86/mman.h @@ -12,6 +12,7 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -- cgit v1.2.3-70-g09d2 From 8c9a9dd0fa3a269d380eaae2dc1bee39e865fae1 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 15 Aug 2008 10:39:38 +0100 Subject: tty: remove resize window special case This moves it to being a tty operation. That removes special cases and now also means that resize can be picked up by um and other non vt consoles which may have a resize operation. Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- drivers/char/tty_io.c | 72 ++++++++++++++++++++++------------------ drivers/char/vt.c | 82 +++++++++++++++++++++++++++++++++++----------- drivers/char/vt_ioctl.c | 4 ++- include/linux/tty.h | 2 ++ include/linux/tty_driver.h | 14 ++++++++ include/linux/vt_kern.h | 1 - 6 files changed, 121 insertions(+), 54 deletions(-) (limited to 'include') diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 0e6866fe0f9..a27160ba21d 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -2496,45 +2496,25 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg) } /** - * tiocswinsz - implement window size set ioctl - * @tty; tty - * @arg: user buffer for result + * tty_do_resize - resize event + * @tty: tty being resized + * @real_tty: real tty (if using a pty/tty pair) + * @rows: rows (character) + * @cols: cols (character) * - * Copies the user idea of the window size to the kernel. Traditionally - * this is just advisory information but for the Linux console it - * actually has driver level meaning and triggers a VC resize. - * - * Locking: - * Called function use the console_sem is used to ensure we do - * not try and resize the console twice at once. - * The tty->termios_mutex is used to ensure we don't double - * resize and get confused. Lock order - tty->termios_mutex before - * console sem + * Update the termios variables and send the neccessary signals to + * peform a terminal resize correctly */ -static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, - struct winsize __user *arg) +int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, + struct winsize *ws) { - struct winsize tmp_ws; struct pid *pgrp, *rpgrp; unsigned long flags; - if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) - return -EFAULT; - mutex_lock(&tty->termios_mutex); - if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg))) + if (!memcmp(ws, &tty->winsize, sizeof(*ws))) goto done; - -#ifdef CONFIG_VT - if (tty->driver->type == TTY_DRIVER_TYPE_CONSOLE) { - if (vc_lock_resize(tty->driver_data, tmp_ws.ws_col, - tmp_ws.ws_row)) { - mutex_unlock(&tty->termios_mutex); - return -ENXIO; - } - } -#endif /* Get the PID values and reference them so we can avoid holding the tty ctrl lock while sending signals */ spin_lock_irqsave(&tty->ctrl_lock, flags); @@ -2550,13 +2530,41 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, put_pid(pgrp); put_pid(rpgrp); - tty->winsize = tmp_ws; - real_tty->winsize = tmp_ws; + tty->winsize = *ws; + real_tty->winsize = *ws; done: mutex_unlock(&tty->termios_mutex); return 0; } +/** + * tiocswinsz - implement window size set ioctl + * @tty; tty + * @arg: user buffer for result + * + * Copies the user idea of the window size to the kernel. Traditionally + * this is just advisory information but for the Linux console it + * actually has driver level meaning and triggers a VC resize. + * + * Locking: + * Driver dependant. The default do_resize method takes the + * tty termios mutex and ctrl_lock. The console takes its own lock + * then calls into the default method. + */ + +static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, + struct winsize __user *arg) +{ + struct winsize tmp_ws; + if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) + return -EFAULT; + + if (tty->ops->resize) + return tty->ops->resize(tty, real_tty, &tmp_ws); + else + return tty_do_resize(tty, real_tty, &tmp_ws); +} + /** * tioccons - allow admin to move logical console * @file: the file to become console diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 1bc00c9d860..60359c36091 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -803,7 +803,25 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, */ #define VC_RESIZE_MAXCOL (32767) #define VC_RESIZE_MAXROW (32767) -int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) + +/** + * vc_do_resize - resizing method for the tty + * @tty: tty being resized + * @real_tty: real tty (different to tty if a pty/tty pair) + * @vc: virtual console private data + * @cols: columns + * @lines: lines + * + * Resize a virtual console, clipping according to the actual constraints. + * If the caller passes a tty structure then update the termios winsize + * information and perform any neccessary signal handling. + * + * Caller must hold the console semaphore. Takes the termios mutex and + * ctrl_lock of the tty IFF a tty is passed. + */ + +static int vc_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, + struct vc_data *vc, unsigned int cols, unsigned int lines) { unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0; unsigned int old_cols, old_rows, old_row_size, old_screen_size; @@ -907,24 +925,15 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) gotoxy(vc, vc->vc_x, vc->vc_y); save_cur(vc); - if (vc->vc_tty) { - struct winsize ws, *cws = &vc->vc_tty->winsize; - struct pid *pgrp = NULL; - + if (tty) { + /* Rewrite the requested winsize data with the actual + resulting sizes */ + struct winsize ws; memset(&ws, 0, sizeof(ws)); ws.ws_row = vc->vc_rows; ws.ws_col = vc->vc_cols; ws.ws_ypixel = vc->vc_scan_lines; - - spin_lock_irq(&vc->vc_tty->ctrl_lock); - if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col)) - pgrp = get_pid(vc->vc_tty->pgrp); - spin_unlock_irq(&vc->vc_tty->ctrl_lock); - if (pgrp) { - kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1); - put_pid(pgrp); - } - *cws = ws; + tty_do_resize(tty, real_tty, &ws); } if (CON_IS_VISIBLE(vc)) @@ -932,14 +941,47 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) return err; } -int vc_lock_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) +/** + * vc_resize - resize a VT + * @vc: virtual console + * @cols: columns + * @rows: rows + * + * Resize a virtual console as seen from the console end of things. We + * use the common vc_do_resize methods to update the structures. The + * caller must hold the console sem to protect console internals and + * vc->vc_tty + */ + +int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows) +{ + return vc_do_resize(vc->vc_tty, vc->vc_tty, vc, cols, rows); +} + +/** + * vt_resize - resize a VT + * @tty: tty to resize + * @real_tty: tty if a pty/tty pair + * @ws: winsize attributes + * + * Resize a virtual terminal. This is called by the tty layer as we + * register our own handler for resizing. The mutual helper does all + * the actual work. + * + * Takes the console sem and the called methods then take the tty + * termios_mutex and the tty ctrl_lock in that order. + */ + +int vt_resize(struct tty_struct *tty, struct tty_struct *real_tty, + struct winsize *ws) { - int rc; + struct vc_data *vc = tty->driver_data; + int ret; acquire_console_sem(); - rc = vc_resize(vc, cols, lines); + ret = vc_do_resize(tty, real_tty, vc, ws->ws_col, ws->ws_row); release_console_sem(); - return rc; + return ret; } void vc_deallocate(unsigned int currcons) @@ -2907,6 +2949,7 @@ static const struct tty_operations con_ops = { .start = con_start, .throttle = con_throttle, .unthrottle = con_unthrottle, + .resize = vt_resize, }; int __init vty_init(void) @@ -4061,7 +4104,6 @@ EXPORT_SYMBOL(default_blu); EXPORT_SYMBOL(update_region); EXPORT_SYMBOL(redraw_screen); EXPORT_SYMBOL(vc_resize); -EXPORT_SYMBOL(vc_lock_resize); EXPORT_SYMBOL(fg_console); EXPORT_SYMBOL(console_blank_hook); EXPORT_SYMBOL(console_blanked); diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 3211afd9d57..c904e9ad4a7 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -947,14 +947,16 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, get_user(cc, &vtsizes->v_cols)) ret = -EFAULT; else { + acquire_console_sem(); for (i = 0; i < MAX_NR_CONSOLES; i++) { vc = vc_cons[i].d; if (vc) { vc->vc_resize_user = 1; - vc_lock_resize(vc_cons[i].d, cc, ll); + vc_resize(vc_cons[i].d, cc, ll); } } + release_console_sem(); } break; } diff --git a/include/linux/tty.h b/include/linux/tty.h index e3579cb086e..0cbec74ec08 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -331,6 +331,8 @@ extern int tty_write_room(struct tty_struct *tty); extern void tty_driver_flush_buffer(struct tty_struct *tty); extern void tty_throttle(struct tty_struct *tty); extern void tty_unthrottle(struct tty_struct *tty); +extern int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, + struct winsize *ws); extern int is_current_pgrp_orphaned(void); extern struct pid *tty_get_pgrp(struct tty_struct *tty); diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index e1065ac0d92..16d27944c32 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -168,6 +168,18 @@ * * Optional: If not provided then the write method is called under * the atomic write lock to keep it serialized with the ldisc. + * + * int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, + * unsigned int rows, unsigned int cols); + * + * Called when a termios request is issued which changes the + * requested terminal geometry. + * + * Optional: the default action is to update the termios structure + * without error. This is usually the correct behaviour. Drivers should + * not force errors here if they are not resizable objects (eg a serial + * line). See tty_do_resize() if you need to wrap the standard method + * in your own logic - the usual case. */ #include @@ -206,6 +218,8 @@ struct tty_operations { int (*tiocmget)(struct tty_struct *tty, struct file *file); int (*tiocmset)(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); + int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, + struct winsize *ws); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct tty_driver *driver, int line, char *options); int (*poll_get_char)(struct tty_driver *driver, int line); diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 1c78d56c57e..1cbd0a7db4e 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -35,7 +35,6 @@ extern int fg_console, last_console, want_console; int vc_allocate(unsigned int console); int vc_cons_allocated(unsigned int console); int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines); -int vc_lock_resize(struct vc_data *vc, unsigned int cols, unsigned int lines); void vc_deallocate(unsigned int console); void reset_palette(struct vc_data *vc); void do_blank_screen(int entering_gfx); -- cgit v1.2.3-70-g09d2 From cd98a04a59e2f94fa64d5bf1e26498d27427d5e7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 13 Aug 2008 18:02:18 +0200 Subject: x86: add MAP_STACK mmap flag as per this discussion: http://lkml.org/lkml/2008/8/12/423 Pardo reported that 64-bit threaded apps, if their stacks exceed the combined size of ~4GB, slow down drastically in pthread_create() - because glibc uses MAP_32BIT to allocate the stacks. The use of MAP_32BIT is a legacy hack - to speed up context switching on certain early model 64-bit P4 CPUs. So introduce a new flag to be used by glibc instead, to not constrain 64-bit apps like this. glibc can switch to this new flag straight away - it will be ignored by the kernel. If those old CPUs ever matter to anyone, support for it can be implemented. Signed-off-by: Ingo Molnar Acked-by: Ulrich Drepper Signed-off-by: Linus Torvalds --- include/asm-x86/mman.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h index c1682b542da..90bc4108a4f 100644 --- a/include/asm-x86/mman.h +++ b/include/asm-x86/mman.h @@ -12,6 +12,7 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ +#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -- cgit v1.2.3-70-g09d2 From 66bfa2f03191aec2e2958414b1dfb80a56637133 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sun, 10 Aug 2008 15:25:55 +0100 Subject: [ARM] 5191/1: ARM: remove CVS keywords This patch removes CVS keywords that weren't updated for a long time. Signed-off-by: Adrian Bunk Signed-off-by: Russell King --- arch/arm/include/asm/mtd-xip.h | 2 -- arch/arm/mach-integrator/cpu.c | 2 -- arch/arm/mach-integrator/include/mach/platform.h | 2 -- arch/arm/mach-lh7a40x/include/mach/ssp.h | 1 - arch/arm/mach-lh7a40x/lcd-panel.h | 1 - arch/arm/mach-pxa/include/mach/mtd-xip.h | 2 -- arch/arm/mach-s3c2410/include/mach/regs-clock.h | 2 +- arch/arm/mach-s3c2410/include/mach/regs-gpio.h | 2 +- arch/arm/mach-s3c2410/include/mach/regs-irq.h | 2 +- arch/arm/mach-s3c2410/include/mach/regs-lcd.h | 2 +- arch/arm/mach-s3c2410/include/mach/regs-mem.h | 2 +- arch/arm/mach-s3c2410/mach-smdk2410.c | 1 - arch/arm/mach-sa1100/cpu-sa1110.c | 2 -- arch/arm/mach-sa1100/include/mach/mtd-xip.h | 2 -- include/asm-arm/plat-s3c/regs-nand.h | 2 +- include/asm-arm/plat-s3c/regs-timer.h | 2 +- include/asm-arm/plat-s3c/regs-watchdog.h | 2 +- 17 files changed, 8 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/arch/arm/include/asm/mtd-xip.h b/arch/arm/include/asm/mtd-xip.h index 4225372a26f..d8fbe2d9b8b 100644 --- a/arch/arm/include/asm/mtd-xip.h +++ b/arch/arm/include/asm/mtd-xip.h @@ -10,8 +10,6 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * $Id: xip.h,v 1.2 2004/12/01 15:49:10 nico Exp $ */ #ifndef __ARM_MTD_XIP_H__ diff --git a/arch/arm/mach-integrator/cpu.c b/arch/arm/mach-integrator/cpu.c index ce5ea7c2667..7c49d55e6b2 100644 --- a/arch/arm/mach-integrator/cpu.c +++ b/arch/arm/mach-integrator/cpu.c @@ -3,8 +3,6 @@ * * Copyright (C) 2001-2002 Deep Blue Solutions Ltd. * - * $Id: cpu.c,v 1.6 2002/07/18 13:58:51 rmk Exp $ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. diff --git a/arch/arm/mach-integrator/include/mach/platform.h b/arch/arm/mach-integrator/include/mach/platform.h index 83c4c1ceb41..028b87839c0 100644 --- a/arch/arm/mach-integrator/include/mach/platform.h +++ b/arch/arm/mach-integrator/include/mach/platform.h @@ -26,8 +26,6 @@ * NOTE: This is a multi-hosted header file for use with uHAL and * supported debuggers. * - * $Id: platform.s,v 1.32 2000/02/18 10:51:39 asims Exp $ - * * ***********************************************************************/ #ifndef __address_h diff --git a/arch/arm/mach-lh7a40x/include/mach/ssp.h b/arch/arm/mach-lh7a40x/include/mach/ssp.h index 132b1c4d5ce..509916182e3 100644 --- a/arch/arm/mach-lh7a40x/include/mach/ssp.h +++ b/arch/arm/mach-lh7a40x/include/mach/ssp.h @@ -1,5 +1,4 @@ /* ssp.h - $Id$ written by Marc Singer 6 Dec 2004 diff --git a/arch/arm/mach-lh7a40x/lcd-panel.h b/arch/arm/mach-lh7a40x/lcd-panel.h index df6e38ed425..a7f5027b2f7 100644 --- a/arch/arm/mach-lh7a40x/lcd-panel.h +++ b/arch/arm/mach-lh7a40x/lcd-panel.h @@ -1,5 +1,4 @@ /* lcd-panel.h - $Id$ written by Marc Singer 18 Jul 2005 diff --git a/arch/arm/mach-pxa/include/mach/mtd-xip.h b/arch/arm/mach-pxa/include/mach/mtd-xip.h index 351f32f13ce..4d452fcb150 100644 --- a/arch/arm/mach-pxa/include/mach/mtd-xip.h +++ b/arch/arm/mach-pxa/include/mach/mtd-xip.h @@ -10,8 +10,6 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * $Id: xip.h,v 1.2 2004/12/01 15:49:10 nico Exp $ */ #ifndef __ARCH_PXA_MTD_XIP_H__ diff --git a/arch/arm/mach-s3c2410/include/mach/regs-clock.h b/arch/arm/mach-s3c2410/include/mach/regs-clock.h index d583688458a..b3f90aa7807 100644 --- a/arch/arm/mach-s3c2410/include/mach/regs-clock.h +++ b/arch/arm/mach-s3c2410/include/mach/regs-clock.h @@ -11,7 +11,7 @@ */ #ifndef __ASM_ARM_REGS_CLOCK -#define __ASM_ARM_REGS_CLOCK "$Id: clock.h,v 1.4 2003/04/30 14:50:51 ben Exp $" +#define __ASM_ARM_REGS_CLOCK #define S3C2410_CLKREG(x) ((x) + S3C24XX_VA_CLKPWR) diff --git a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h index 30bec027f5f..528080ceac4 100644 --- a/arch/arm/mach-s3c2410/include/mach/regs-gpio.h +++ b/arch/arm/mach-s3c2410/include/mach/regs-gpio.h @@ -12,7 +12,7 @@ #ifndef __ASM_ARCH_REGS_GPIO_H -#define __ASM_ARCH_REGS_GPIO_H "$Id: gpio.h,v 1.5 2003/05/19 12:51:08 ben Exp $" +#define __ASM_ARCH_REGS_GPIO_H #define S3C2410_GPIONO(bank,offset) ((bank) + (offset)) diff --git a/arch/arm/mach-s3c2410/include/mach/regs-irq.h b/arch/arm/mach-s3c2410/include/mach/regs-irq.h index b057c06d167..de86ee8812b 100644 --- a/arch/arm/mach-s3c2410/include/mach/regs-irq.h +++ b/arch/arm/mach-s3c2410/include/mach/regs-irq.h @@ -10,7 +10,7 @@ #ifndef ___ASM_ARCH_REGS_IRQ_H -#define ___ASM_ARCH_REGS_IRQ_H "$Id: irq.h,v 1.3 2003/03/25 21:29:06 ben Exp $" +#define ___ASM_ARCH_REGS_IRQ_H /* interrupt controller */ diff --git a/arch/arm/mach-s3c2410/include/mach/regs-lcd.h b/arch/arm/mach-s3c2410/include/mach/regs-lcd.h index 893b8742f95..ee8f040aff5 100644 --- a/arch/arm/mach-s3c2410/include/mach/regs-lcd.h +++ b/arch/arm/mach-s3c2410/include/mach/regs-lcd.h @@ -10,7 +10,7 @@ #ifndef ___ASM_ARCH_REGS_LCD_H -#define ___ASM_ARCH_REGS_LCD_H "$Id: lcd.h,v 1.3 2003/06/26 13:25:06 ben Exp $" +#define ___ASM_ARCH_REGS_LCD_H #define S3C2410_LCDREG(x) (x) diff --git a/arch/arm/mach-s3c2410/include/mach/regs-mem.h b/arch/arm/mach-s3c2410/include/mach/regs-mem.h index f9926abd5cd..57759804e2f 100644 --- a/arch/arm/mach-s3c2410/include/mach/regs-mem.h +++ b/arch/arm/mach-s3c2410/include/mach/regs-mem.h @@ -11,7 +11,7 @@ */ #ifndef __ASM_ARM_MEMREGS_H -#define __ASM_ARM_MEMREGS_H "$Id: regs.h,v 1.8 2003/05/01 15:55:41 ben Exp $" +#define __ASM_ARM_MEMREGS_H #ifndef S3C2410_MEMREG #define S3C2410_MEMREG(x) (S3C24XX_VA_MEMCTRL + (x)) diff --git a/arch/arm/mach-s3c2410/mach-smdk2410.c b/arch/arm/mach-s3c2410/mach-smdk2410.c index c9040080727..b88939d7228 100644 --- a/arch/arm/mach-s3c2410/mach-smdk2410.c +++ b/arch/arm/mach-s3c2410/mach-smdk2410.c @@ -5,7 +5,6 @@ * Copyright (C) 2004 by FS Forth-Systeme GmbH * All rights reserved. * - * $Id: mach-smdk2410.c,v 1.1 2004/05/11 14:15:38 mpietrek Exp $ * @Author: Jonas Dietsche * * This program is free software; you can redistribute it and/or diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c index 39d38c80173..029dbfbbafc 100644 --- a/arch/arm/mach-sa1100/cpu-sa1110.c +++ b/arch/arm/mach-sa1100/cpu-sa1110.c @@ -3,8 +3,6 @@ * * Copyright (C) 2001 Russell King * - * $Id: cpu-sa1110.c,v 1.9 2002/07/06 16:53:18 rmk Exp $ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. diff --git a/arch/arm/mach-sa1100/include/mach/mtd-xip.h b/arch/arm/mach-sa1100/include/mach/mtd-xip.h index 80cfdac2b94..eaa09e86ad1 100644 --- a/arch/arm/mach-sa1100/include/mach/mtd-xip.h +++ b/arch/arm/mach-sa1100/include/mach/mtd-xip.h @@ -10,8 +10,6 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * $Id: xip.h,v 1.2 2004/12/01 15:49:10 nico Exp $ */ #ifndef __ARCH_SA1100_MTD_XIP_H__ diff --git a/include/asm-arm/plat-s3c/regs-nand.h b/include/asm-arm/plat-s3c/regs-nand.h index 09f0b5503f5..b2caa4bca27 100644 --- a/include/asm-arm/plat-s3c/regs-nand.h +++ b/include/asm-arm/plat-s3c/regs-nand.h @@ -11,7 +11,7 @@ */ #ifndef __ASM_ARM_REGS_NAND -#define __ASM_ARM_REGS_NAND "$Id: nand.h,v 1.3 2003/12/09 11:36:29 ben Exp $" +#define __ASM_ARM_REGS_NAND #define S3C2410_NFREG(x) (x) diff --git a/include/asm-arm/plat-s3c/regs-timer.h b/include/asm-arm/plat-s3c/regs-timer.h index b4366ea3967..cc0eedd53e3 100644 --- a/include/asm-arm/plat-s3c/regs-timer.h +++ b/include/asm-arm/plat-s3c/regs-timer.h @@ -12,7 +12,7 @@ #ifndef __ASM_ARCH_REGS_TIMER_H -#define __ASM_ARCH_REGS_TIMER_H "$Id: timer.h,v 1.4 2003/05/06 19:30:50 ben Exp $" +#define __ASM_ARCH_REGS_TIMER_H #define S3C_TIMERREG(x) (S3C_VA_TIMER + (x)) #define S3C_TIMERREG2(tmr,reg) S3C_TIMERREG((reg)+0x0c+((tmr)*0x0c)) diff --git a/include/asm-arm/plat-s3c/regs-watchdog.h b/include/asm-arm/plat-s3c/regs-watchdog.h index 1229f076c0a..4938492470f 100644 --- a/include/asm-arm/plat-s3c/regs-watchdog.h +++ b/include/asm-arm/plat-s3c/regs-watchdog.h @@ -12,7 +12,7 @@ #ifndef __ASM_ARCH_REGS_WATCHDOG_H -#define __ASM_ARCH_REGS_WATCHDOG_H "$Id: watchdog.h,v 1.2 2003/04/29 13:31:09 ben Exp $" +#define __ASM_ARCH_REGS_WATCHDOG_H #define S3C_WDOGREG(x) ((x) + S3C_VA_WATCHDOG) -- cgit v1.2.3-70-g09d2 From 605d9288b3e8a3d15e6f36185c2fc737b6979572 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sat, 16 Aug 2008 11:07:21 +0100 Subject: mm: VM_flags comment fixes Try to comment away a little of the confusion between mm's vm_area_struct vm_flags and vmalloc's vm_struct flags: based on an idea by Ulrich Drepper. Signed-off-by: Hugh Dickins Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 +- include/linux/mm_types.h | 2 +- include/linux/vmalloc.h | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index fa651609b65..72a15dc26bb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -73,7 +73,7 @@ extern unsigned int kobjsize(const void *objp); #endif /* - * vm_flags.. + * vm_flags in vm_area_struct, see mm_types.h. */ #define VM_READ 0x00000001 /* currently active flags */ #define VM_WRITE 0x00000002 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 386edbe2cb4..bf334138c7c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -113,7 +113,7 @@ struct vm_area_struct { struct vm_area_struct *vm_next; pgprot_t vm_page_prot; /* Access permissions of this VMA. */ - unsigned long vm_flags; /* Flags, listed below. */ + unsigned long vm_flags; /* Flags, see mm.h. */ struct rb_node vm_rb; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 364789aae9f..328eb402272 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -4,9 +4,9 @@ #include #include /* pgprot_t */ -struct vm_area_struct; +struct vm_area_struct; /* vma defining user mapping in mm_types.h */ -/* bits in vm_struct->flags */ +/* bits in flags of vmalloc's vm_struct below */ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ -- cgit v1.2.3-70-g09d2 From 5e6b83ed8c00f2e2ae5b2413c5907bed735b600d Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Sat, 16 Aug 2008 11:55:04 +0100 Subject: Fix header export of videodev2.h, ivtv.h, ivtvfb.h The exported copy of videodev2.h contains this line: #define #include This is because for some reason it defines __user for itself -- despite the fact that we remove all instances of __user when exporting headers. _All_ pointers in userspace are user pointers. Fix it by removing the unnecessary '#define __user' from the file. The new headers ivtv.h and ivtvfb.h would have the same problem... if whoever put them there had actually remembered to add them to the Kbuild file while he was at it. Fix those too, and export them as was presumably intended. Note that includes of are also stripped by the header export process, so those don't need to be conditional. Signed-off-by: David Woodhouse Signed-off-by: Mauro Carvalho Chehab Acked-by: Hans Verkuil Signed-off-by: Linus Torvalds --- include/linux/Kbuild | 2 ++ include/linux/ivtv.h | 6 +----- include/linux/ivtvfb.h | 6 +----- include/linux/videodev2.h | 4 ++-- 4 files changed, 6 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 327f60658d9..7d970678f94 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -250,6 +250,8 @@ unifdef-y += isdn.h unifdef-y += isdnif.h unifdef-y += isdn_divertif.h unifdef-y += isdn_ppp.h +unifdef-y += ivtv.h +unifdef-y += ivtvfb.h unifdef-y += joystick.h unifdef-y += kdev_t.h unifdef-y += kd.h diff --git a/include/linux/ivtv.h b/include/linux/ivtv.h index 794b8daa937..17ca64b5a66 100644 --- a/include/linux/ivtv.h +++ b/include/linux/ivtv.h @@ -21,11 +21,7 @@ #ifndef __LINUX_IVTV_H__ #define __LINUX_IVTV_H__ -#ifdef __KERNEL__ -#include /* need __user */ -#else -#define __user -#endif +#include #include /* ivtv knows several distinct output modes: MPEG streaming, diff --git a/include/linux/ivtvfb.h b/include/linux/ivtvfb.h index e980ba62ddc..e20af47b59a 100644 --- a/include/linux/ivtvfb.h +++ b/include/linux/ivtvfb.h @@ -21,11 +21,7 @@ #ifndef __LINUX_IVTVFB_H__ #define __LINUX_IVTVFB_H__ -#ifdef __KERNEL__ -#include /* need __user */ -#else -#define __user -#endif +#include #include /* Framebuffer external API */ diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index e466bd54a50..e65a6bed4e3 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -55,13 +55,13 @@ */ #ifndef __LINUX_VIDEODEV2_H #define __LINUX_VIDEODEV2_H + #ifdef __KERNEL__ #include /* need struct timeval */ -#include /* need __user */ #else -#define __user #include #endif +#include #include #include -- cgit v1.2.3-70-g09d2 From 5e186b57e7ede86aeb9db30e66315bde4e8b1815 Mon Sep 17 00:00:00 2001 From: Alexander Beregalov Date: Sun, 17 Aug 2008 05:34:20 +0400 Subject: security.h: fix build failure security.h: fix build failure include/linux/security.h: In function 'security_ptrace_traceme': include/linux/security.h:1760: error: 'parent' undeclared (first use in this function) Signed-off-by: Alexander Beregalov Tested-by: Ingo Molnar Signed-off-by: James Morris --- include/linux/security.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/security.h b/include/linux/security.h index 2ee5ecfb239..80c4d002864 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1755,7 +1755,7 @@ static inline int security_ptrace_may_access(struct task_struct *child, return cap_ptrace_may_access(child, mode); } -static inline int security_ptrace_traceme(struct task_struct *child) +static inline int security_ptrace_traceme(struct task_struct *parent) { return cap_ptrace_traceme(parent); } -- cgit v1.2.3-70-g09d2 From 3c3b5c3b0bf798316a410e27e3d7e6f015663602 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Sat, 16 Aug 2008 03:39:26 -0400 Subject: x86: correct register constraints for 64-bit atomic operations x86_64 add/sub atomic ops does not seems to accept integer values bigger than 32 bits as immediates. Intel's add/sub documentation specifies they have to be passed as registers. The only operations in the x86-64 architecture which accept arbitrary 64-bit immediates is "movq" to any register; similarly, the only operation which accept arbitrary 64-bit displacement is "movabs" to or from al/ax/eax/rax. http://gcc.gnu.org/onlinedocs/gcc-4.3.0/gcc/Machine-Constraints.html states : e 32-bit signed integer constant, or a symbolic reference known to fit that range (for immediate operands in sign-extending x86-64 instructions). Z 32-bit unsigned integer constant, or a symbolic reference known to fit that range (for immediate operands in zero-extending x86-64 instructions). Since add/sub does sign extension, using the "e" constraint seems appropriate. It applies to 2.6.27-rc, 2.6.26, 2.6.25... Signed-off-by: Mathieu Desnoyers Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- include/asm-x86/atomic_64.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h index a0095191c02..91c7d03e65b 100644 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h @@ -228,7 +228,7 @@ static inline void atomic64_add(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) - : "ir" (i), "m" (v->counter)); + : "er" (i), "m" (v->counter)); } /** @@ -242,7 +242,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "subq %1,%0" : "=m" (v->counter) - : "ir" (i), "m" (v->counter)); + : "er" (i), "m" (v->counter)); } /** @@ -260,7 +260,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" : "=m" (v->counter), "=qm" (c) - : "ir" (i), "m" (v->counter) : "memory"); + : "er" (i), "m" (v->counter) : "memory"); return c; } @@ -341,7 +341,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" : "=m" (v->counter), "=qm" (c) - : "ir" (i), "m" (v->counter) : "memory"); + : "er" (i), "m" (v->counter) : "memory"); return c; } -- cgit v1.2.3-70-g09d2 From c72a5efec1193faa2ef34c0bd48d7251a70ec934 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Mon, 11 Aug 2008 00:11:13 +0200 Subject: x86: mmconf: fix section mismatch warning WARNING: arch/x86/kernel/built-in.o(.cpuinit.text+0x1591): Section mismatch in reference from the function init_amd() to the function .init.text:check_enable_amd_mmconf_dmi() The function __cpuinit init_amd() references a function __init check_enable_amd_mmconf_dmi(). If check_enable_amd_mmconf_dmi is only used by init_amd then annotate check_enable_amd_mmconf_dmi with a matching annotation. check_enable_amd_mmconf_dmi is only called from init_amd which is __cpuinit Signed-off-by: Marcin Slusarz Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/kernel/mmconf-fam10h_64.c | 2 +- include/asm-x86/mmconfig.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index fdfdc550b36..efc2f361fe8 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c @@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata mmconf_dmi_table[] = { {} }; -void __init check_enable_amd_mmconf_dmi(void) +void __cpuinit check_enable_amd_mmconf_dmi(void) { dmi_check_system(mmconf_dmi_table); } diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h index 95beda07c6f..e293ab81e85 100644 --- a/include/asm-x86/mmconfig.h +++ b/include/asm-x86/mmconfig.h @@ -3,7 +3,7 @@ #ifdef CONFIG_PCI_MMCONFIG extern void __cpuinit fam10h_check_enable_mmcfg(void); -extern void __init check_enable_amd_mmconf_dmi(void); +extern void __cpuinit check_enable_amd_mmconf_dmi(void); #else static inline void fam10h_check_enable_mmcfg(void) { } static inline void check_enable_amd_mmconf_dmi(void) { } -- cgit v1.2.3-70-g09d2 From c6a92a2501b35880d2e357dbd7f2cbc9a06f1058 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Sun, 17 Aug 2008 17:50:50 +0200 Subject: x86, percpu: silence section mismatch warnings related to EARLY_PER_CPU variables Quoting Mike Travis in "x86: cleanup early per cpu variables/accesses v4" (23ca4bba3e20c6c3cb11c1bb0ab4770b724d39ac): The DEFINE macro defines the per_cpu variable as well as the early map and pointer. It also initializes the per_cpu variable and map elements to "_initvalue". The early_* macros provide access to the initial map (usually setup during system init) and the early pointer. This pointer is initialized to point to the early map but is then NULL'ed when the actual per_cpu areas are setup. After that the per_cpu variable is the correct access to the variable. As these variables are NULL'ed before __init sections are dropped (in setup_per_cpu_maps), they can be safely annotated as __ref. This change silences following section mismatch warnings: WARNING: vmlinux.o(.data+0x46c0): Section mismatch in reference from the variable x86_cpu_to_apicid_early_ptr to the variable .init.data:x86_cpu_to_apicid_early_map The variable x86_cpu_to_apicid_early_ptr references the variable __initdata x86_cpu_to_apicid_early_map If the reference is valid then annotate the variable with __init* (see linux/init.h) or name the variable: *driver, *_template, *_timer, *_sht, *_ops, *_probe, *_probe_one, *_console, WARNING: vmlinux.o(.data+0x46c8): Section mismatch in reference from the variable x86_bios_cpu_apicid_early_ptr to the variable .init.data:x86_bios_cpu_apicid_early_map The variable x86_bios_cpu_apicid_early_ptr references the variable __initdata x86_bios_cpu_apicid_early_map If the reference is valid then annotate the variable with __init* (see linux/init.h) or name the variable: *driver, *_template, *_timer, *_sht, *_ops, *_probe, *_probe_one, *_console, WARNING: vmlinux.o(.data+0x46d0): Section mismatch in reference from the variable x86_cpu_to_node_map_early_ptr to the variable .init.data:x86_cpu_to_node_map_early_map The variable x86_cpu_to_node_map_early_ptr references the variable __initdata x86_cpu_to_node_map_early_map If the reference is valid then annotate the variable with __init* (see linux/init.h) or name the variable: *driver, *_template, *_timer, *_sht, *_ops, *_probe, *_probe_one, *_console, Signed-off-by: Marcin Slusarz Cc: Mike Travis Signed-off-by: Ingo Molnar --- include/asm-x86/percpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h index 4e91ee1e37a..f643a3a92da 100644 --- a/include/asm-x86/percpu.h +++ b/include/asm-x86/percpu.h @@ -182,7 +182,7 @@ do { \ DEFINE_PER_CPU(_type, _name) = _initvalue; \ __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ { [0 ... NR_CPUS-1] = _initvalue }; \ - __typeof__(_type) *_name##_early_ptr = _name##_early_map + __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ EXPORT_PER_CPU_SYMBOL(_name) -- cgit v1.2.3-70-g09d2 From 37014c64079748c47fd109ef2d91ecd785a8c764 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 18 Aug 2008 21:40:05 +0200 Subject: ata: add missing ATA_ID_* defines (take 2) Add missing ATA_ID_* defines and update {ata,atapi}_*() inlines accordingly. The currently unused defines are needed for the forthcoming drivers/ide/ changes. v2: Add ATA_ID_SPG. Acked-by: Jeff Garzik Signed-off-by: Bartlomiej Zolnierkiewicz --- include/linux/ata.h | 122 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 76 insertions(+), 46 deletions(-) (limited to 'include') diff --git a/include/linux/ata.h b/include/linux/ata.h index 1c622e2b050..03fff6239b3 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -46,18 +46,48 @@ enum { ATA_MAX_SECTORS_TAPE = 65535, ATA_ID_WORDS = 256, + ATA_ID_CONFIG = 0, + ATA_ID_CYLS = 1, + ATA_ID_HEADS = 3, + ATA_ID_SECTORS = 6, ATA_ID_SERNO = 10, + ATA_ID_BUF_SIZE = 21, ATA_ID_FW_REV = 23, ATA_ID_PROD = 27, + ATA_ID_MAX_MULTSECT = 47, + ATA_ID_DWORD_IO = 48, + ATA_ID_CAPABILITY = 49, ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_OLD_DMA_MODES = 52, ATA_ID_FIELD_VALID = 53, + ATA_ID_CUR_CYLS = 54, + ATA_ID_CUR_HEADS = 55, + ATA_ID_CUR_SECTORS = 56, + ATA_ID_MULTSECT = 59, + ATA_ID_LBA_CAPACITY = 60, + ATA_ID_SWDMA_MODES = 62, ATA_ID_MWDMA_MODES = 63, ATA_ID_PIO_MODES = 64, ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_DMA_TIME = 66, ATA_ID_EIDE_PIO = 67, ATA_ID_EIDE_PIO_IORDY = 68, - ATA_ID_UDMA_MODES = 88, + ATA_ID_QUEUE_DEPTH = 75, ATA_ID_MAJOR_VER = 80, + ATA_ID_COMMAND_SET_1 = 82, + ATA_ID_COMMAND_SET_2 = 83, + ATA_ID_CFSSE = 84, + ATA_ID_CFS_ENABLE_1 = 85, + ATA_ID_CFS_ENABLE_2 = 86, + ATA_ID_CSF_DEFAULT = 87, + ATA_ID_UDMA_MODES = 88, + ATA_ID_HW_CONFIG = 93, + ATA_ID_SPG = 98, + ATA_ID_LBA_CAPACITY_2 = 100, + ATA_ID_LAST_LUN = 126, + ATA_ID_DLF = 128, + ATA_ID_CSFO = 129, + ATA_ID_CFA_POWER = 160, ATA_ID_PIO4 = (1 << 1), ATA_ID_SERNO_LEN = 20, @@ -438,17 +468,17 @@ static inline int ata_is_data(u8 prot) /* * id tests */ -#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0) -#define ata_id_has_lba(id) ((id)[49] & (1 << 9)) -#define ata_id_has_dma(id) ((id)[49] & (1 << 8)) +#define ata_id_is_ata(id) (((id)[ATA_ID_CONFIG] & (1 << 15)) == 0) +#define ata_id_has_lba(id) ((id)[ATA_ID_CAPABILITY] & (1 << 9)) +#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8)) #define ata_id_has_ncq(id) ((id)[76] & (1 << 8)) -#define ata_id_queue_depth(id) (((id)[75] & 0x1f) + 1) -#define ata_id_removeable(id) ((id)[0] & (1 << 7)) +#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1) +#define ata_id_removeable(id) ((id)[ATA_ID_CONFIG] & (1 << 7)) #define ata_id_has_atapi_AN(id) \ ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ ((id)[78] & (1 << 5)) ) -#define ata_id_iordy_disable(id) ((id)[49] & (1 << 10)) -#define ata_id_has_iordy(id) ((id)[49] & (1 << 11)) +#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) +#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) #define ata_id_u32(id,n) \ (((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)])) #define ata_id_u64(id,n) \ @@ -457,7 +487,7 @@ static inline int ata_is_data(u8 prot) ((u64) (id)[(n) + 1] << 16) | \ ((u64) (id)[(n) + 0]) ) -#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20) +#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) static inline bool ata_id_has_hipm(const u16 *id) { @@ -482,75 +512,75 @@ static inline bool ata_id_has_dipm(const u16 *id) static inline int ata_id_has_fua(const u16 *id) { - if ((id[84] & 0xC000) != 0x4000) + if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000) return 0; - return id[84] & (1 << 6); + return id[ATA_ID_CFSSE] & (1 << 6); } static inline int ata_id_has_flush(const u16 *id) { - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; - return id[83] & (1 << 12); + return id[ATA_ID_COMMAND_SET_2] & (1 << 12); } static inline int ata_id_has_flush_ext(const u16 *id) { - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; - return id[83] & (1 << 13); + return id[ATA_ID_COMMAND_SET_2] & (1 << 13); } static inline int ata_id_has_lba48(const u16 *id) { - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; - if (!ata_id_u64(id, 100)) + if (!ata_id_u64(id, ATA_ID_LBA_CAPACITY_2)) return 0; - return id[83] & (1 << 10); + return id[ATA_ID_COMMAND_SET_2] & (1 << 10); } static inline int ata_id_hpa_enabled(const u16 *id) { /* Yes children, word 83 valid bits cover word 82 data */ - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; /* And 87 covers 85-87 */ - if ((id[87] & 0xC000) != 0x4000) + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return 0; /* Check command sets enabled as well as supported */ - if ((id[85] & ( 1 << 10)) == 0) + if ((id[ATA_ID_CFS_ENABLE_1] & (1 << 10)) == 0) return 0; - return id[82] & (1 << 10); + return id[ATA_ID_COMMAND_SET_1] & (1 << 10); } static inline int ata_id_has_wcache(const u16 *id) { /* Yes children, word 83 valid bits cover word 82 data */ - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; - return id[82] & (1 << 5); + return id[ATA_ID_COMMAND_SET_1] & (1 << 5); } static inline int ata_id_has_pm(const u16 *id) { - if ((id[83] & 0xC000) != 0x4000) + if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000) return 0; - return id[82] & (1 << 3); + return id[ATA_ID_COMMAND_SET_1] & (1 << 3); } static inline int ata_id_rahead_enabled(const u16 *id) { - if ((id[87] & 0xC000) != 0x4000) + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return 0; - return id[85] & (1 << 6); + return id[ATA_ID_CFS_ENABLE_1] & (1 << 6); } static inline int ata_id_wcache_enabled(const u16 *id) { - if ((id[87] & 0xC000) != 0x4000) + if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000) return 0; - return id[85] & (1 << 5); + return id[ATA_ID_CFS_ENABLE_1] & (1 << 5); } /** @@ -581,7 +611,7 @@ static inline unsigned int ata_id_major_version(const u16 *id) static inline int ata_id_is_sata(const u16 *id) { - return ata_id_major_version(id) >= 5 && id[93] == 0; + return ata_id_major_version(id) >= 5 && id[ATA_ID_HW_CONFIG] == 0; } static inline int ata_id_has_tpm(const u16 *id) @@ -599,7 +629,7 @@ static inline int ata_id_has_dword_io(const u16 *id) /* ATA 8 reuses this flag for "trusted" computing */ if (ata_id_major_version(id) > 7) return 0; - if (id[48] & (1 << 0)) + if (id[ATA_ID_DWORD_IO] & (1 << 0)) return 1; return 0; } @@ -608,22 +638,22 @@ static inline int ata_id_current_chs_valid(const u16 *id) { /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command has not been issued to the device then the values of - id[54] to id[56] are vendor specific. */ - return (id[53] & 0x01) && /* Current translation valid */ - id[54] && /* cylinders in current translation */ - id[55] && /* heads in current translation */ - id[55] <= 16 && - id[56]; /* sectors in current translation */ + id[ATA_ID_CUR_CYLS] to id[ATA_ID_CUR_SECTORS] are vendor specific. */ + return (id[ATA_ID_FIELD_VALID] & 1) && /* Current translation valid */ + id[ATA_ID_CUR_CYLS] && /* cylinders in current translation */ + id[ATA_ID_CUR_HEADS] && /* heads in current translation */ + id[ATA_ID_CUR_HEADS] <= 16 && + id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */ } static inline int ata_id_is_cfa(const u16 *id) { - u16 v = id[0]; - if (v == 0x848A) /* Standard CF */ + if (id[ATA_ID_CONFIG] == 0x848A) /* Standard CF */ return 1; /* Could be CF hiding as standard ATA */ - if (ata_id_major_version(id) >= 3 && id[82] != 0xFFFF && - (id[82] & ( 1 << 2))) + if (ata_id_major_version(id) >= 3 && + id[ATA_ID_COMMAND_SET_1] != 0xFFFF && + (id[ATA_ID_COMMAND_SET_1] & (1 << 2))) return 1; return 0; } @@ -632,21 +662,21 @@ static inline int ata_drive_40wire(const u16 *dev_id) { if (ata_id_is_sata(dev_id)) return 0; /* SATA */ - if ((dev_id[93] & 0xE000) == 0x6000) + if ((dev_id[ATA_ID_HW_CONFIG] & 0xE000) == 0x6000) return 0; /* 80 wire */ return 1; } static inline int ata_drive_40wire_relaxed(const u16 *dev_id) { - if ((dev_id[93] & 0x2000) == 0x2000) + if ((dev_id[ATA_ID_HW_CONFIG] & 0x2000) == 0x2000) return 0; /* 80 wire */ return 1; } static inline int atapi_cdb_len(const u16 *dev_id) { - u16 tmp = dev_id[0] & 0x3; + u16 tmp = dev_id[ATA_ID_CONFIG] & 0x3; switch (tmp) { case 0: return 12; case 1: return 16; @@ -656,7 +686,7 @@ static inline int atapi_cdb_len(const u16 *dev_id) static inline int atapi_command_packet_set(const u16 *dev_id) { - return (dev_id[0] >> 8) & 0x1f; + return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; } static inline int atapi_id_dmadir(const u16 *dev_id) -- cgit v1.2.3-70-g09d2 From 476d9894dde2da2c2b326d70b5bce5eccc593c8b Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 18 Aug 2008 21:40:05 +0200 Subject: ata: add missing ATA_CMD_* defines Add missing ATA_CMD_* defines to . Also add ATA_EXABYTE_ENABLE_NEST, SETFEATURES_AAM_* and ATA_SMART_* defines while at it. Partially based on earlier work by Chris Wedgwood. Acked-by: Chris Wedgwood Acked-by: Jeff Garzik Signed-off-by: Bartlomiej Zolnierkiewicz --- include/linux/ata.h | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'include') diff --git a/include/linux/ata.h b/include/linux/ata.h index 03fff6239b3..cf4ef6d915a 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -222,6 +222,13 @@ enum { ATA_CMD_PMP_WRITE = 0xE8, ATA_CMD_CONF_OVERLAY = 0xB1, ATA_CMD_SEC_FREEZE_LOCK = 0xF5, + ATA_CMD_SMART = 0xB0, + ATA_CMD_MEDIA_LOCK = 0xDE, + ATA_CMD_MEDIA_UNLOCK = 0xDF, + /* marked obsolete in the ATA/ATAPI-7 spec */ + ATA_CMD_RESTORE = 0x10, + /* EXABYTE specific */ + ATA_EXABYTE_ENABLE_NEST = 0xF0, /* READ_LOG_EXT pages */ ATA_LOG_SATA_NCQ = 0x10, @@ -262,6 +269,10 @@ enum { SETFEATURES_WC_ON = 0x02, /* Enable write cache */ SETFEATURES_WC_OFF = 0x82, /* Disable write cache */ + /* Enable/Disable Automatic Acoustic Management */ + SETFEATURES_AAM_ON = 0x42, + SETFEATURES_AAM_OFF = 0xC2, + SETFEATURES_SPINUP = 0x07, /* Spin-up drive */ SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ @@ -284,6 +295,15 @@ enum { ATA_DCO_IDENTIFY = 0xC2, ATA_DCO_SET = 0xC3, + /* feature values for SMART */ + ATA_SMART_ENABLE = 0xD8, + ATA_SMART_READ_VALUES = 0xD0, + ATA_SMART_READ_THRESHOLDS = 0xD1, + + /* password used in LBA Mid / LBA High for executing SMART commands */ + ATA_SMART_LBAM_PASS = 0x4F, + ATA_SMART_LBAH_PASS = 0xC2, + /* ATAPI stuff */ ATAPI_PKT_DMA = (1 << 0), ATAPI_DMADIR = (1 << 2), /* ATAPI data dir: -- cgit v1.2.3-70-g09d2 From b59116205c54c89df9cc80721b59e1e8d14488f1 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Mon, 18 Aug 2008 21:40:05 +0200 Subject: ata: add missing ATA_* defines Add missing ATA_* defines to . Also add ATAPI_{LFS,EOM,ILI,IO,CODE} defines while at it. Cc: Jeff Garzik Signed-off-by: Bartlomiej Zolnierkiewicz --- include/linux/ata.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include') diff --git a/include/linux/ata.h b/include/linux/ata.h index cf4ef6d915a..1ce19c1ef0e 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -153,13 +153,26 @@ enum { ATA_BUSY = (1 << 7), /* BSY status bit */ ATA_DRDY = (1 << 6), /* device ready */ ATA_DF = (1 << 5), /* device fault */ + ATA_DSC = (1 << 4), /* drive seek complete */ ATA_DRQ = (1 << 3), /* data request i/o */ + ATA_CORR = (1 << 2), /* corrected data error */ + ATA_IDX = (1 << 1), /* index */ ATA_ERR = (1 << 0), /* have an error */ ATA_SRST = (1 << 2), /* software reset */ ATA_ICRC = (1 << 7), /* interface CRC error */ + ATA_BBK = ATA_ICRC, /* pre-EIDE: block marked bad */ ATA_UNC = (1 << 6), /* uncorrectable media error */ + ATA_MC = (1 << 5), /* media changed */ ATA_IDNF = (1 << 4), /* ID not found */ + ATA_MCR = (1 << 3), /* media change requested */ ATA_ABORTED = (1 << 2), /* command aborted */ + ATA_TRK0NF = (1 << 1), /* track 0 not found */ + ATA_AMNF = (1 << 0), /* address mark not found */ + ATAPI_LFS = 0xF0, /* last failed sense */ + ATAPI_EOM = ATA_TRK0NF, /* end of media */ + ATAPI_ILI = ATA_AMNF, /* illegal length indication */ + ATAPI_IO = (1 << 1), + ATAPI_COD = (1 << 0), /* ATA command block registers */ ATA_REG_DATA = 0x00, -- cgit v1.2.3-70-g09d2