summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/include/asm/futex.h9
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/defconfig37
-rw-r--r--arch/s390/include/asm/facility.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/swab.h2
-rw-r--r--arch/s390/include/asm/tlb.h22
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/irq.c9
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/mm/maccess.c27
-rw-r--r--arch/s390/mm/pgtable.c63
-rw-r--r--arch/x86/kvm/pmu.c18
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/lib/insn.c53
15 files changed, 162 insertions, 96 deletions
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 0ab82cc2dc8..d2bf1fd5e44 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -106,15 +106,16 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
{
- register unsigned long r8 __asm ("r8") = 0;
+ register unsigned long r8 __asm ("r8");
unsigned long prev;
__asm__ __volatile__(
" mf;; \n"
- " mov ar.ccv=%3;; \n"
- "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
+ " mov %0=r0 \n"
+ " mov ar.ccv=%4;; \n"
+ "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]"
- : "=r" (prev)
+ : "=r" (r8), "=r" (prev)
: "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval)
: "memory");
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2b7c0fbe578..9015060919a 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -90,7 +90,6 @@ config S390
select HAVE_KERNEL_XZ
select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
- select HAVE_RCU_TABLE_FREE if SMP
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 6cf8e26b313..1957a9dd256 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,8 +1,12 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_AUDIT=y
-CONFIG_RCU_TRACE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -14,16 +18,22 @@ CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y
+CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_XZ=y
+CONFIG_RD_LZO=y
+CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -34,18 +44,15 @@ CONFIG_KSM=y
CONFIG_BINFMT_MISC=m
CONFIG_CMM=m
CONFIG_HZ_100=y
-CONFIG_KEXEC=y
-CONFIG_PM=y
+CONFIG_CRASH_DUMP=y
CONFIG_HIBERNATION=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_NET_KEY=y
-CONFIG_AFIUCV=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_INET_LRO is not set
CONFIG_IPV6=y
-CONFIG_NET_SCTPPROBE=m
CONFIG_L2TP=m
CONFIG_L2TP_DEBUGFS=m
CONFIG_VLAN_8021Q=y
@@ -84,15 +91,14 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_ZFCP=y
-CONFIG_ZFCP_DIF=y
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
CONFIG_VIRTIO_NET=y
CONFIG_RAW_DRIVER=m
+CONFIG_VIRTIO_BALLOON=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -103,27 +109,21 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_IBM_PARTITION=y
-CONFIG_DLM=m
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_TIMER_STATS=y
CONFIG_PROVE_LOCKING=y
CONFIG_PROVE_RCU=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_NOTIFIERS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_RCU_TRACE=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
CONFIG_LATENCYTOP=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_DEBUG_PAGEALLOC=y
-# CONFIG_FTRACE is not set
+CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_STRICT_DEVMEM is not set
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
@@ -173,4 +173,3 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRC7=m
-CONFIG_VIRTIO_BALLOON=y
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 1e5b27edc0c..2ee66a65f2d 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -38,12 +38,11 @@ static inline void stfle(u64 *stfle_fac_list, int size)
unsigned long nr;
preempt_disable();
- S390_lowcore.stfl_fac_list = 0;
asm volatile(
" .insn s,0xb2b10000,0(0)\n" /* stfl */
"0:\n"
EX_TABLE(0b, 0b)
- : "=m" (S390_lowcore.stfl_fac_list));
+ : "+m" (S390_lowcore.stfl_fac_list));
nr = 4; /* bytes stored by stfl */
memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
if (S390_lowcore.stfl_fac_list & 0x01000000) {
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 8eef9b5b3cf..78e3041919d 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,10 +22,7 @@ void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
void page_table_free(struct mm_struct *, unsigned long *);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
void page_table_free_rcu(struct mmu_gather *, unsigned long *);
-void __tlb_remove_table(void *_table);
-#endif
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
index 6bdee21c077..a3e4ebb3209 100644
--- a/arch/s390/include/asm/swab.h
+++ b/arch/s390/include/asm/swab.h
@@ -77,7 +77,7 @@ static inline __u16 __arch_swab16p(const __u16 *x)
asm volatile(
#ifndef __s390x__
- " icm %0,2,%O+1(%R1)\n"
+ " icm %0,2,%O1+1(%R1)\n"
" ic %0,%1\n"
: "=&d" (result) : "Q" (*x) : "cc");
#else /* __s390x__ */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index c687a2c8346..775a5eea8f9 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -30,14 +30,10 @@
struct mmu_gather {
struct mm_struct *mm;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch;
-#endif
unsigned int fullmm;
- unsigned int need_flush;
};
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch {
struct rcu_head rcu;
unsigned int nr;
@@ -49,7 +45,6 @@ struct mmu_table_batch {
extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-#endif
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm,
@@ -57,29 +52,20 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
{
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
- tlb->need_flush = 0;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
-#endif
if (tlb->fullmm)
__tlb_flush_mm(mm);
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
- if (!tlb->need_flush)
- return;
- tlb->need_flush = 0;
- __tlb_flush_mm(tlb->mm);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
-#endif
}
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end)
{
- tlb_flush_mmu(tlb);
+ tlb_table_flush(tlb);
}
/*
@@ -105,10 +91,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address)
{
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
if (!tlb->fullmm)
return page_table_free_rcu(tlb, (unsigned long *) pte);
-#endif
page_table_free(tlb->mm, (unsigned long *) pte);
}
@@ -125,10 +109,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
if (!tlb->fullmm)
return tlb_remove_table(tlb, pmd);
-#endif
crst_table_free(tlb->mm, (unsigned long *) pmd);
#endif
}
@@ -146,10 +128,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#ifdef __s390x__
if (tlb->mm->context.asce_limit <= (1UL << 42))
return;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
if (!tlb->fullmm)
return tlb_remove_table(tlb, pud);
-#endif
crst_table_free(tlb->mm, (unsigned long *) pud);
#endif
}
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index c27a0727f93..adccd908ebc 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -474,9 +474,9 @@ ENTRY(startup_kdump)
stck __LC_LAST_UPDATE_CLOCK
spt 5f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13)
+ xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
- xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
.insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
jz 0f
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1c2cdd59ccd..8a22c27219d 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -118,9 +118,10 @@ asmlinkage void do_softirq(void)
"a" (__do_softirq)
: "0", "1", "2", "3", "4", "5", "14",
"cc", "memory" );
- } else
+ } else {
/* We are already on the async stack. */
__do_softirq();
+ }
}
local_irq_restore(flags);
@@ -192,11 +193,12 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
int index = ext_hash(code);
spin_lock_irqsave(&ext_int_hash_lock, flags);
- list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
+ list_for_each_entry_rcu(p, &ext_int_hash[index], entry) {
if (p->code == code && p->handler == handler) {
list_del_rcu(&p->entry);
kfree_rcu(p, rcu);
}
+ }
spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
@@ -211,9 +213,10 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
old_regs = set_irq_regs(regs);
irq_enter();
- if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+ if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) {
/* Serve timer interrupts first. */
clock_comparator_work();
+ }
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
if (ext_code.code != 0x1004)
__get_cpu_var(s390_idle).nohz_delay = 1;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 46405086479..cb019f429e8 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -178,7 +178,7 @@ static void cpumf_pmu_enable(struct pmu *pmu)
err = lcctl(cpuhw->state);
if (err) {
pr_err("Enabling the performance measuring unit "
- "failed with rc=%lx\n", err);
+ "failed with rc=%x\n", err);
return;
}
@@ -203,7 +203,7 @@ static void cpumf_pmu_disable(struct pmu *pmu)
err = lcctl(inactive);
if (err) {
pr_err("Disabling the performance measuring unit "
- "failed with rc=%lx\n", err);
+ "failed with rc=%x\n", err);
return;
}
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 7bb15fcca75..e1335dc2b1b 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -61,21 +61,14 @@ long probe_kernel_write(void *dst, const void *src, size_t size)
return copied < 0 ? -EFAULT : 0;
}
-/*
- * Copy memory in real mode (kernel to kernel)
- */
-int memcpy_real(void *dest, void *src, size_t count)
+static int __memcpy_real(void *dest, void *src, size_t count)
{
register unsigned long _dest asm("2") = (unsigned long) dest;
register unsigned long _len1 asm("3") = (unsigned long) count;
register unsigned long _src asm("4") = (unsigned long) src;
register unsigned long _len2 asm("5") = (unsigned long) count;
- unsigned long flags;
int rc = -EFAULT;
- if (!count)
- return 0;
- flags = __arch_local_irq_stnsm(0xf8UL);
asm volatile (
"0: mvcle %1,%2,0x0\n"
"1: jo 0b\n"
@@ -86,7 +79,23 @@ int memcpy_real(void *dest, void *src, size_t count)
"+d" (_len2), "=m" (*((long *) dest))
: "m" (*((long *) src))
: "cc", "memory");
- arch_local_irq_restore(flags);
+ return rc;
+}
+
+/*
+ * Copy memory in real mode (kernel to kernel)
+ */
+int memcpy_real(void *dest, void *src, size_t count)
+{
+ unsigned long flags;
+ int rc;
+
+ if (!count)
+ return 0;
+ local_irq_save(flags);
+ __arch_local_irq_stnsm(0xfbUL);
+ rc = __memcpy_real(dest, src, count);
+ local_irq_restore(flags);
return rc;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 373adf69b01..6e765bf0067 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -678,8 +678,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
}
}
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-
static void __page_table_free_rcu(void *table, unsigned bit)
{
struct page *page;
@@ -733,7 +731,66 @@ void __tlb_remove_table(void *_table)
free_pages((unsigned long) table, ALLOC_ORDER);
}
-#endif
+static void tlb_remove_table_smp_sync(void *arg)
+{
+ /* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+ /*
+ * This isn't an RCU grace period and hence the page-tables cannot be
+ * assumed to be actually RCU-freed.
+ *
+ * It is however sufficient for software page-table walkers that rely
+ * on IRQ disabling. See the comment near struct mmu_table_batch.
+ */
+ smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+ __tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+ struct mmu_table_batch *batch;
+ int i;
+
+ batch = container_of(head, struct mmu_table_batch, rcu);
+
+ for (i = 0; i < batch->nr; i++)
+ __tlb_remove_table(batch->tables[i]);
+
+ free_page((unsigned long)batch);
+}
+
+void tlb_table_flush(struct mmu_gather *tlb)
+{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ if (*batch) {
+ __tlb_flush_mm(tlb->mm);
+ call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+ *batch = NULL;
+ }
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ if (*batch == NULL) {
+ *batch = (struct mmu_table_batch *)
+ __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ if (*batch == NULL) {
+ __tlb_flush_mm(tlb->mm);
+ tlb_remove_table_one(table);
+ return;
+ }
+ (*batch)->nr = 0;
+ }
+ (*batch)->tables[(*batch)->nr++] = table;
+ if ((*batch)->nr == MAX_TABLE_BATCH)
+ tlb_table_flush(tlb);
+}
/*
* switch on pgstes for its userspace process (for kvm)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 173df38dbda..2e88438ffd8 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -459,17 +459,17 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
if (pmu->version == 1) {
- pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
- return;
+ pmu->nr_arch_fixed_counters = 0;
+ } else {
+ pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
+ X86_PMC_MAX_FIXED);
+ pmu->counter_bitmask[KVM_PMC_FIXED] =
+ ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
}
- pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
- X86_PMC_MAX_FIXED);
- pmu->counter_bitmask[KVM_PMC_FIXED] =
- ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
- pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
- | (((1ull << pmu->nr_arch_fixed_counters) - 1)
- << X86_PMC_IDX_FIXED));
+ pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
+ (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED);
+ pmu->global_ctrl_mask = ~pmu->global_ctrl;
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ad85adfef84..4ff0ab9bc3c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2210,9 +2210,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
msr = find_msr_entry(vmx, msr_index);
if (msr) {
msr->data = data;
- if (msr - vmx->guest_msrs < vmx->save_nmsrs)
+ if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+ preempt_disable();
kvm_set_shared_msr(msr->index, msr->data,
msr->mask);
+ preempt_enable();
+ }
break;
}
ret = kvm_set_msr_common(vcpu, msr_index, data);
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 25feb1ae71c..b1e6c4b2e8e 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -379,8 +379,8 @@ err_out:
return;
}
-/* Decode moffset16/32/64 */
-static void __get_moffset(struct insn *insn)
+/* Decode moffset16/32/64. Return 0 if failed */
+static int __get_moffset(struct insn *insn)
{
switch (insn->addr_bytes) {
case 2:
@@ -397,15 +397,19 @@ static void __get_moffset(struct insn *insn)
insn->moffset2.value = get_next(int, insn);
insn->moffset2.nbytes = 4;
break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
}
insn->moffset1.got = insn->moffset2.got = 1;
+ return 1;
+
err_out:
- return;
+ return 0;
}
-/* Decode imm v32(Iz) */
-static void __get_immv32(struct insn *insn)
+/* Decode imm v32(Iz). Return 0 if failed */
+static int __get_immv32(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
@@ -417,14 +421,18 @@ static void __get_immv32(struct insn *insn)
insn->immediate.value = get_next(int, insn);
insn->immediate.nbytes = 4;
break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
}
+ return 1;
+
err_out:
- return;
+ return 0;
}
-/* Decode imm v64(Iv/Ov) */
-static void __get_immv(struct insn *insn)
+/* Decode imm v64(Iv/Ov), Return 0 if failed */
+static int __get_immv(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
@@ -441,15 +449,18 @@ static void __get_immv(struct insn *insn)
insn->immediate2.value = get_next(int, insn);
insn->immediate2.nbytes = 4;
break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
}
insn->immediate1.got = insn->immediate2.got = 1;
+ return 1;
err_out:
- return;
+ return 0;
}
/* Decode ptr16:16/32(Ap) */
-static void __get_immptr(struct insn *insn)
+static int __get_immptr(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
@@ -462,14 +473,17 @@ static void __get_immptr(struct insn *insn)
break;
case 8:
/* ptr16:64 is not exist (no segment) */
- return;
+ return 0;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
}
insn->immediate2.value = get_next(unsigned short, insn);
insn->immediate2.nbytes = 2;
insn->immediate1.got = insn->immediate2.got = 1;
+ return 1;
err_out:
- return;
+ return 0;
}
/**
@@ -489,7 +503,8 @@ void insn_get_immediate(struct insn *insn)
insn_get_displacement(insn);
if (inat_has_moffset(insn->attr)) {
- __get_moffset(insn);
+ if (!__get_moffset(insn))
+ goto err_out;
goto done;
}
@@ -517,16 +532,20 @@ void insn_get_immediate(struct insn *insn)
insn->immediate2.nbytes = 4;
break;
case INAT_IMM_PTR:
- __get_immptr(insn);
+ if (!__get_immptr(insn))
+ goto err_out;
break;
case INAT_IMM_VWORD32:
- __get_immv32(insn);
+ if (!__get_immv32(insn))
+ goto err_out;
break;
case INAT_IMM_VWORD:
- __get_immv(insn);
+ if (!__get_immv(insn))
+ goto err_out;
break;
default:
- break;
+ /* Here, insn must have an immediate, but failed */
+ goto err_out;
}
if (inat_has_second_immediate(insn->attr)) {
insn->immediate2.value = get_next(char, insn);