On Thu, Aug 27, 2009 at 11:25:17AM +0300, Avi Kivity wrote:
> On 08/27/2009 04:20 AM, Marcelo Tosatti wrote:
>
>> +}
>> +
>> +void kvm_vcpu_ipi(struct kvm_vcpu *vcpu)
>> +{
>> + int me;
>> + int cpu = vcpu->cpu;
>>
>> me = get_cpu();
>> - if (cpu != me&& (unsigned)cpu< nr_cpu_ids&& cpu_online(cpu))
>> - if (!test_and_set_bit(KVM_REQ_KICK,&vcpu->requests))
>> - smp_send_reschedule(cpu);
>> + if (cpu != me&& (unsigned)cpu< nr_cpu_ids&& cpu_online(cpu)) {
>> + if (test_bit(KVM_VCPU_GUEST_MODE,&vcpu->vcpu_state)) {
>> + if (!test_and_set_bit(KVM_VCPU_KICKED,
>> + &vcpu->vcpu_state))
>> + smp_send_reschedule(cpu);
>> + }
>> + }
>> put_cpu();
>> }
>>
>> @@ -168,6 +176,30 @@ static bool make_all_cpus_request(struct
>> return called;
>> }
>>
>> +static int kvm_req_wait(void *unused)
>> +{
>> + cpu_relax();
>> + return 0;
>> +}
>> +
>> +static void kvm_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req)
>> +{
>> + set_bit(req,&vcpu->requests);
>> + barrier();
>> + kvm_vcpu_ipi(vcpu);
>> + wait_on_bit(&vcpu->vcpu_state, KVM_VCPU_GUEST_MODE, kvm_req_wait,
>> + TASK_UNINTERRUPTIBLE);
>> +}
>> +
>> +static void kvm_vcpus_request(struct kvm *kvm, unsigned int req)
>> +{
>> + int i;
>> + struct kvm_vcpu *vcpu;
>> +
>> + kvm_for_each_vcpu(i, vcpu, kvm)
>> + kvm_vcpu_request(vcpu, req);
>> +}
>>
>
> Gleb notes there are two problems here: instead of using a multicast
> IPI, you're sending multiple unicast IPIs. Second, you're serializing
> the waiting. It would be better to batch-send the IPIs, then batch-wait
> for results.
Right. Playing with multiple variants of batched send/wait but
so far haven't been able to see significant improvements for
REQ_FLUSH/REQ_RELOAD.
Batched send will probably be more visible in guest IPI emulation.
Note however that even with multiple unicast IPIs this change collapses
kvm_vcpu_kick with the remote requests, so you decrease the number of
IPI's.
Was hoping to include these changes incrementally?
void kvm_vcpus_request(struct kvm *kvm, unsigned int req)
{
- int i;
+ int i, me, cpu;
struct kvm_vcpu *vcpu;
+ cpumask_var_t wait_cpus, kick_cpus;
+
+ if (alloc_cpumask_var(&wait_cpus, GFP_ATOMIC))
+ cpumask_clear(wait_cpus);
+
+ if (alloc_cpumask_var(&kick_cpus, GFP_ATOMIC))
+ cpumask_clear(kick_cpus);
+
+ me = get_cpu();
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ set_bit(req, &vcpu->requests);
+ barrier();
+ cpu = vcpu->cpu;
+ if (test_bit(KVM_VCPU_GUEST_MODE, &vcpu->vcpu_state)) {
+ if (cpu != -1 && cpu != me) {
+ if (wait_cpus != NULL)
+ cpumask_set_cpu(cpu, wait_cpus);
+ if (kick_cpus != NULL)
+ if (!test_and_set_bit(KVM_VCPU_KICKED,
+ &vcpu->vcpu_state))
+ cpumask_set_cpu(cpu, kick_cpus);
+ }
+ }
+ }
+ if (unlikely(kick_cpus == NULL))
+ smp_call_function_many(cpu_online_mask, ack_flush,
+ NULL, 1);
+ else if (!cpumask_empty(kick_cpus))
+ smp_send_reschedule_many(kick_cpus);
kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vcpu_request(vcpu, req);
+ if (cpumask_test_cpu(vcpu->cpu, wait_cpus))
+ if (test_bit(req, &vcpu->requests))
+ wait_on_bit(&vcpu->vcpu_state, KVM_VCPU_GUEST_MODE,
+ kvm_req_wait, TASK_UNINTERRUPTIBLE);
+ put_cpu();
+
+ free_cpumask_var(wait_cpus);
+ free_cpumask_var(kick_cpus);
}
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html