diff options
author | Liu Yu <yu.liu@freescale.com> | 2011-12-20 14:42:56 +0000 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-03-05 14:52:40 +0200 |
commit | d37b1a037cae725e69e5bf96f58544b69d7c93a6 (patch) | |
tree | 73db07a65bd9ebbeae0e982cbb5291a047dddb38 | |
parent | 82ed36164c8a8ee685ea3fb3c4f741214ac070ca (diff) |
KVM: PPC: booke: Add booke206 TLB trace
The existing kvm_stlb_write/kvm_gtlb_write were a poor match for
the e500/book3e MMU -- mas1 was passed as "tid", mas2 was limited
to "unsigned int" which will be a problem on 64-bit, mas3/7 got
split up rather than treated as a single 64-bit word, etc.
Signed-off-by: Liu Yu <yu.liu@freescale.com>
[scottwood@freescale.com: made mas2 64-bit, and added mas8 init]
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/powerpc/kvm/e500_tlb.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/trace.h | 57 |
2 files changed, 63 insertions, 4 deletions
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 1746e677bf3..6e53e4164de 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c @@ -294,6 +294,9 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); asm volatile("isync; tlbwe" : : : "memory"); local_irq_restore(flags); + + trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, + stlbe->mas2, stlbe->mas7_3); } /* @@ -332,8 +335,6 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(sesel))); } - trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, - (u32)stlbe->mas7_3, (u32)(stlbe->mas7_3 >> 32)); } void kvmppc_map_magic(struct kvm_vcpu *vcpu) @@ -355,6 +356,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu) magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; + magic.mas8 = 0; __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); preempt_enable(); @@ -954,8 +956,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) gtlbe->mas2 = vcpu->arch.shared->mas2; gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; - trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2, - (u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32)); + trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, + gtlbe->mas2, gtlbe->mas7_3); /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ if (tlbe_is_host_safe(vcpu, gtlbe)) { diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index 609d8bfb54e..877186b7b1c 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h @@ -340,6 +340,63 @@ TRACE_EVENT(kvm_book3s_slbmte, #endif /* CONFIG_PPC_BOOK3S */ + +/************************************************************************* + * Book3E trace points * + *************************************************************************/ + +#ifdef CONFIG_BOOKE + +TRACE_EVENT(kvm_booke206_stlb_write, + TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3), + TP_ARGS(mas0, mas8, mas1, mas2, mas7_3), + + TP_STRUCT__entry( + __field( __u32, mas0 ) + __field( __u32, mas8 ) + __field( __u32, mas1 ) + __field( __u64, mas2 ) + __field( __u64, mas7_3 ) + ), + + TP_fast_assign( + __entry->mas0 = mas0; + __entry->mas8 = mas8; + __entry->mas1 = mas1; + __entry->mas2 = mas2; + __entry->mas7_3 = mas7_3; + ), + + TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx", + __entry->mas0, __entry->mas8, __entry->mas1, + __entry->mas2, __entry->mas7_3) +); + +TRACE_EVENT(kvm_booke206_gtlb_write, + TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3), + TP_ARGS(mas0, mas1, mas2, mas7_3), + + TP_STRUCT__entry( + __field( __u32, mas0 ) + __field( __u32, mas1 ) + __field( __u64, mas2 ) + __field( __u64, mas7_3 ) + ), + + TP_fast_assign( + __entry->mas0 = mas0; + __entry->mas1 = mas1; + __entry->mas2 = mas2; + __entry->mas7_3 = mas7_3; + ), + + TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx", + __entry->mas0, __entry->mas1, + __entry->mas2, __entry->mas7_3) +); + +#endif + #endif /* _TRACE_KVM_H */ /* This part must be outside protection */ |