>
> $ perf record -a -f -g
> $ perf report -g
here we go:
here we go:
- 49.72% _raw_spin_lock
▒
- 32.32% kvm_mmu_pte_write
▒
- 98.02% emulator_write_phys
▒
emulator_write_emulated_onepage
▒
emulator_write_emulated
▒
x86_emulate_insn
▒
emulate_instruction
▒
kvm_mmu_page_fault
▒
handle_exception
▒
vmx_handle_exit
▒
kvm_arch_vcpu_ioctl_run
▒
kvm_vcpu_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 1.98% paging64_invlpg
▒
kvm_mmu_invlpg
▒
handle_invlpg
▒
vmx_handle_exit
▒
kvm_arch_vcpu_ioctl_run
▒
kvm_vcpu_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 23.66% task_rq_lock
▒
- try_to_wake_up
▒
- 94.76% wake_up_process
▒
cpu_stop_queue_work
▒
__stop_cpus
▒
try_stop_cpus
▒
synchronize_sched_expedited
▒
__synchronize_srcu
▒
synchronize_srcu_expedited
▒
__kvm_set_memory_region
▒
kvm_set_memory_region
▒
kvm_vm_ioctl_set_memory_region
▒
kvm_vm_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 4.44% default_wake_function
▒
- 53.81% autoremove_wake_function
▒
__wake_up_common
▒
__wake_up
▒
kvm_vcpu_kick
▒
__apic_accept_irq
▒
kvm_apic_set_irq
▒
- kvm_irq_delivery_to_apic
▒
- 58.41% apic_reg_write
▒
apic_mmio_write
▒
emulator_write_emulated_onepage
▒
emulator_write_emulated
▒
x86_emulate_insn
▒
emulate_instruction
▒
kvm_mmu_page_fault
▒
handle_exception
▒
vmx_handle_exit
▒
kvm_arch_vcpu_ioctl_run
▒
kvm_vcpu_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 29.60% ioapic_service
▒
kvm_ioapic_set_irq
▒
kvm_set_ioapic_irq
▒
kvm_set_irq
▒
kvm_arch_vm_ioctl
▒
kvm_vm_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 11.99% kvm_set_msi
▒
kvm_set_irq
▒
kvm_arch_vm_ioctl
▒
kvm_vm_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 46.19% pollwake
▒
__wake_up_common
▒
- __wake_up
▒
- 77.30% __send_signal
▒
send_signal
▒
- do_send_sig_info
▒
- 68.83% do_send_specific
▒
do_tkill
▒
sys_tgkill
▒
system_call_fastpath
▒
__pthread_kill
▒
- 31.17% group_send_sig_info
▒
kill_pid_info
▒
sys_kill
▒
system_call_fastpath
▒
__kill
▒
- 22.70% send_sigqueue
▒
posix_timer_event
▒
posix_timer_fn
▒
__run_hrtimer
▒
hrtimer_interrupt
▒
smp_apic_timer_interrupt
▒
apic_timer_interrupt
▒
kvm_arch_commit_memory_region
▒
__kvm_set_memory_region
▒
kvm_set_memory_region
▒
kvm_vm_ioctl_set_memory_region
▒
kvm_vm_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 0.80% wake_up_state
▒
- wake_futex
▒
- 68.01% do_futex
▒
sys_futex
▒
system_call_fastpath
▒
__pthread_cond_signal
▒
- 31.99% futex_wake
▒
do_futex
▒
sys_futex
▒
system_call_fastpath
▒
__lll_unlock_wake
▒
- 11.51% mmu_free_roots
▒
- 96.89% kvm_mmu_unload
▒
kvm_arch_vcpu_ioctl_run
▒
kvm_vcpu_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 3.11% paging_new_cr3
▒
kvm_set_cr3
▒
handle_cr
▒
vmx_handle_exit
▒
kvm_arch_vcpu_ioctl_run
▒
kvm_vcpu_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 6.78% paging64_page_fault
▒
kvm_mmu_page_fault
▒
handle_exception
▒
vmx_handle_exit
▒
kvm_arch_vcpu_ioctl_run
▒
kvm_vcpu_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 5.72% make_all_cpus_request
▒
- 62.13% kvm_reload_remote_mmus
▒
- kvm_mmu_prepare_zap_page
▒
- 96.56% kvm_mmu_zap_all
▒
kvm_arch_flush_shadow
▒
__kvm_set_memory_region
▒
kvm_set_memory_region
▒
kvm_vm_ioctl_set_memory_region
▒
kvm_vm_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
- 3.44% kvm_mmu_pte_write
▒
emulator_write_phys
▒
emulator_write_emulated_onepage
▒
emulator_write_emulated
▒
x86_emulate_insn
▒
emulate_instruction
▒
kvm_mmu_page_fault
▒
handle_exception
▒
vmx_handle_exit
▒
kvm_arch_vcpu_ioctl_run
▒
kvm_vcpu_ioctl
▒
vfs_ioctl
▒
do_vfs_ioctl
▒
sys_ioctl
▒
system_call_fastpath
▒
__GI_ioctl
▒
.
.
it's not all, is this enough? or can I simply export whole tree somehow?
>
> will show who calls do_raw_spin_lock.
>
>> 235.00 4.9% send_mono_rect /usr/bin/qemu-kvm
>> 215.00 4.5% rb_next [kernel.kallsyms]
>> 166.00 3.5% schedule [kernel.kallsyms]
>
> What's the context switch rate? 'vmstat 1'
procs -----------memory---------- ---swap-- -----io---- --system--
-----cpu------
r b swpd free buff cache si so bi bo in cs us sy id wa st
3 0 0 168920 5841008 4614064 0 0 28 21 32 28 15 22 63 0
0
2 0 0 168820 5841008 4614088 0 0 0 0 13489 76739 15 16 69
0 0
2 0 0 167656 5841008 4614088 0 0 0 104 6089 33390 16 13 71
0 0
3 0 0 169184 5841008 4614088 0 0 0 0 12489 71263 17 15 69
0 0
2 0 0 169200 5841012 4614092 0 0 0 8 7034 33908 17 12 72
0 0
2 0 0 169432 5841024 4614080 0 0 0 16 10924 67008 16 12 72
0 0
2 0 0 168084 5841028 4614088 0 0 0 4 8955 47767 17 13 71
0 0
2 0 0 168936 5841032 4614088 0 0 0 80 9528 50119 16 13 71
0 0
.
.> >> 141.00 3.0% add_preempt_count [kernel.kallsyms] >> 137.00 2.9% gen_rotc_rm_T1 /usr/bin/qemu-kvm > > Do you have a guest running with kvm disabled?! nope, all seem to be using KVM > > -- ------------------------------------- Ing. Nikola CIPRICH LinuxBox.cz, s.r.o. 28. rijna 168, 709 01 Ostrava tel.: +420 596 603 142 fax: +420 596 621 273 mobil: +420 777 093 799 www.linuxbox.cz mobil servis: +420 737 238 656 email servis: [email protected] ------------------------------------- -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to [email protected] More majordomo info at http://vger.kernel.org/majordomo-info.html
