summaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/xen-asm.S
blob: 4c6f967991319757efc89bc62183d4efa25a347d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
/*
	Asm versions of Xen pv-ops, suitable for either direct use or inlining.
	The inline versions are the same as the direct-use versions, with the
	pre- and post-amble chopped off.

	This code is encoded for size rather than absolute efficiency,
	with a view to being able to inline as much as possible.

	We only bother with direct forms (ie, vcpu in percpu data) of
	the operations here; the indirect forms are better handled in
	C, since they're generally too large to inline anyway.
 */

#include <asm/asm-offsets.h>
#include <asm/percpu.h>
#include <asm/processor-flags.h>

#include "xen-asm.h"

/*
	Enable events.  This clears the event mask and tests the pending
	event status with one and operation.  If there are pending
	events, then enter the hypervisor to get them handled.
 */
ENTRY(xen_irq_enable_direct)
	/* Unmask events */
	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask

	/* Preempt here doesn't matter because that will deal with
	   any pending interrupts.  The pending check may end up being
	   run on the wrong CPU, but that doesn't hurt. */

	/* Test for pending */
	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
	jz 1f

2:	call check_events
1:
ENDPATCH(xen_irq_enable_direct)
	ret
	ENDPROC(xen_irq_enable_direct)
	RELOC(xen_irq_enable_direct, 2b+1)


/*
	Disabling events is simply a matter of making the event mask
	non-zero.
 */
ENTRY(xen_irq_disable_direct)
	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
ENDPATCH(xen_irq_disable_direct)
	ret
	ENDPROC(xen_irq_disable_direct)
	RELOC(xen_irq_disable_direct, 0)

/*
	(xen_)save_fl is used to get the current interrupt enable status.
	Callers expect the status to be in X86_EFLAGS_IF, and other bits
	may be set in the return value.  We take advantage of this by
	making sure that X86_EFLAGS_IF has the right value (and other bits
	in that byte are 0), but other bits in the return value are
	undefined.  We need to toggle the state of the bit, because
	Xen and x86 use opposite senses (mask vs enable).
 */
ENTRY(xen_save_fl_direct)
	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
	setz %ah
	addb %ah,%ah
ENDPATCH(xen_save_fl_direct)
	ret
	ENDPROC(xen_save_fl_direct)
	RELOC(xen_save_fl_direct, 0)


/*
	In principle the caller should be passing us a value return
	from xen_save_fl_direct, but for robustness sake we test only
	the X86_EFLAGS_IF flag rather than the whole byte. After
	setting the interrupt mask state, it checks for unmasked
	pending events and enters the hypervisor to get them delivered
	if so.
 */
ENTRY(xen_restore_fl_direct)
#ifdef CONFIG_X86_64
	testw $X86_EFLAGS_IF, %di
#else
	testb $X86_EFLAGS_IF>>8, %ah
#endif
	setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
	/* Preempt here doesn't matter because that will deal with
	   any pending interrupts.  The pending check may end up being
	   run on the wrong CPU, but that doesn't hurt. */

	/* check for unmasked and pending */
	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
	jz 1f
2:	call check_events
1:
ENDPATCH(xen_restore_fl_direct)
	ret
	ENDPROC(xen_restore_fl_direct)
	RELOC(xen_restore_fl_direct, 2b+1)


/*
	Force an event check by making a hypercall,
	but preserve regs before making the call.
 */
check_events:
#ifdef CONFIG_X86_32
	push %eax
	push %ecx
	push %edx
	call xen_force_evtchn_callback
	pop %edx
	pop %ecx
	pop %eax
#else
	push %rax
	push %rcx
	push %rdx
	push %rsi
	push %rdi
	push %r8
	push %r9
	push %r10
	push %r11
	call xen_force_evtchn_callback
	pop %r11
	pop %r10
	pop %r9
	pop %r8
	pop %rdi
	pop %rsi
	pop %rdx
	pop %rcx
	pop %rax
#endif
	ret