* Avi Kivity <[EMAIL PROTECTED]> wrote:

> >>>+struct kvm_cr3_cache {
> >>>+  struct kvm_cr3_cache_entry entry[KVM_CR3_CACHE_SIZE];
> >>>+  u32 max_idx;
> >>>+};
> >>> 
> >>>      
> >>This will require an api version bump whenever KVM_CR3_CACHE_SIZE 
> >>changes.
> >>
> >>Better to advertise the gpa of the cache, so it can be unlimited.
> >>    
> >
> >the gpa of the cache, and its guest-side size, right?
> 
> Yes (can use max_idx, no?).

agreed, done. the cr3 registration now passes in the guest-side max size 
of the cr3 cache, which the host-side updates (trims if necessary).

> BTW, max_idx is ambiguous: is it the last valid entry or one past the 
> end?  entry_count is more explicit IMO.

agreed, done.

> >it should already be padded - i layed it out that way. (if it's not 
> >then let me know where it's not padded)
> 
> Right, I was confused by the cr3 cache, but it's the last field.

i moved it to the first place and added a padding word - that way all 
structure sizes are properly aligned and we cannot combine them in an 
unaligned way by accident.

> >+
> >+    if (kvm_arch_ops->patch_hypercall) {
> >  
> 
> It's safe to assume that the arch op exists.

ok, i changed this and i added the SVM patch function too. AMD's VMMCALL 
is 0x0f, 0x01, 0xd9, correct?

> >+EXPORT_SYMBOL_GPL(gpa_to_hpa);
> 
> Is this needed now?  If so, it needs a kvm_ prefix.

not needed anymore because usage is now in kvm_main.c - i removed the 
export.

> >+    .patch_hypercall = vmx_patch_hypercall,
> > };
> >  
> Where is the vmcall exit handler?

in my tree, have not sent the patch yet - first want to combine it with 
the cr3 feature to have it tested.

> Please add the svm code too.  I can test it if you lack amd hardware.

done.

updated patches attached.

        Ingo
Subject: [patch] KVM: add MSR based hypercall API
From: Ingo Molnar <[EMAIL PROTECTED]>

this adds a special MSR based hypercall API to KVM. This is to be
used by paravirtual kernels and virtual drivers.

VMX-only at the moment.

Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
---
 drivers/kvm/kvm.h        |    6 +++
 drivers/kvm/kvm_main.c   |   74 ++++++++++++++++++++++++++++++++++++++++++++++
 drivers/kvm/svm.c        |   12 +++++++
 drivers/kvm/vmx.c        |   16 ++++++++--
 include/linux/kvm_para.h |   75 +++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 181 insertions(+), 2 deletions(-)

Index: linux/drivers/kvm/kvm.h
===================================================================
--- linux.orig/drivers/kvm/kvm.h
+++ linux/drivers/kvm/kvm.h
@@ -14,6 +14,7 @@
 
 #include "vmx.h"
 #include <linux/kvm.h>
+#include <linux/kvm_para.h>
 
 #define CR0_PE_MASK (1ULL << 0)
 #define CR0_TS_MASK (1ULL << 3)
@@ -237,6 +238,9 @@ struct kvm_vcpu {
        unsigned long cr0;
        unsigned long cr2;
        unsigned long cr3;
+       gpa_t para_state_gpa;
+       struct page *para_state_page;
+       gpa_t hypercall_gpa;
        unsigned long cr4;
        unsigned long cr8;
        u64 pdptrs[4]; /* pae */
@@ -379,6 +383,8 @@ struct kvm_arch_ops {
        int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
        int (*vcpu_setup)(struct kvm_vcpu *vcpu);
        void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
+       void (*patch_hypercall)(struct kvm_vcpu *vcpu,
+                               unsigned char *hypercall_addr);
 };
 
 extern struct kvm_stat kvm_stat;
Index: linux/drivers/kvm/kvm_main.c
===================================================================
--- linux.orig/drivers/kvm/kvm_main.c
+++ linux/drivers/kvm/kvm_main.c
@@ -1204,6 +1204,74 @@ void realmode_set_cr(struct kvm_vcpu *vc
        }
 }
 
+
+/*
+ * Register the para guest with the host:
+ */
+static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
+{
+       struct kvm_vcpu_para_state *para_state;
+       hpa_t para_state_hpa, hypercall_hpa;
+       struct page *para_state_page;
+       unsigned char *hypercall;
+       gpa_t hypercall_gpa;
+
+       printk("KVM: guest trying to enter paravirtual mode\n");
+       printk(".... para_state_gpa: %08Lx\n", para_state_gpa);
+
+       /*
+        * Needs to be page aligned:
+        */
+       if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
+               goto err_gp;
+
+       para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
+       printk(".... para_state_hpa: %08Lx\n", para_state_hpa);
+       if (is_error_hpa(para_state_hpa))
+               goto err_gp;
+
+       para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
+       para_state = kmap_atomic(para_state_page, KM_USER0);
+
+       printk("....  guest version: %d\n", para_state->guest_version);
+       printk("....           size: %d\n", para_state->size);
+
+       para_state->host_version = KVM_PARA_API_VERSION;
+       /*
+        * We cannot support guests that try to register themselves
+        * with a newer API version than the host supports:
+        */
+       if (para_state->guest_version > KVM_PARA_API_VERSION) {
+               para_state->ret = -KVM_EINVAL;
+               goto err_kunmap_skip;
+       }
+
+       hypercall_gpa = para_state->hypercall_gpa;
+       hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
+       printk(".... hypercall_hpa: %08Lx\n", hypercall_hpa);
+       if (is_error_hpa(hypercall_hpa)) {
+               para_state->ret = -KVM_EINVAL;
+               goto err_kunmap_skip;
+       }
+
+       printk("KVM: para guest successfully registered.\n");
+       vcpu->para_state_page = para_state_page;
+       vcpu->para_state_gpa = para_state_gpa;
+       vcpu->hypercall_gpa = hypercall_gpa;
+
+       hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
+                               KM_USER1);
+       kvm_arch_ops->patch_hypercall(vcpu, hypercall);
+       kunmap_atomic(hypercall, KM_USER1);
+
+       para_state->ret = 0;
+err_kunmap_skip:
+       kunmap_atomic(para_state, KM_USER0);
+       return 0;
+err_gp:
+       return 1;
+}
+
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 {
        u64 data;
@@ -1306,6 +1374,12 @@ int kvm_set_msr_common(struct kvm_vcpu *
        case MSR_IA32_APICBASE:
                vcpu->apic_base = data;
                break;
+       /*
+        * This is the 'probe whether the host is KVM' logic:
+        */
+       case MSR_KVM_API_MAGIC:
+               return vcpu_register_para(vcpu, data);
+
        default:
                printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
                return 1;
Index: linux/drivers/kvm/svm.c
===================================================================
--- linux.orig/drivers/kvm/svm.c
+++ linux/drivers/kvm/svm.c
@@ -1642,6 +1642,17 @@ static int is_disabled(void)
        return 0;
 }
 
+static void
+svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
+{
+       /*
+        * Patch in the VMMCALL instruction:
+        */
+       hypercall[0] = 0x0f;
+       hypercall[1] = 0x01;
+       hypercall[2] = 0xd9;
+}
+
 static struct kvm_arch_ops svm_arch_ops = {
        .cpu_has_kvm_support = has_svm,
        .disabled_by_bios = is_disabled,
@@ -1689,6 +1700,7 @@ static struct kvm_arch_ops svm_arch_ops 
        .run = svm_vcpu_run,
        .skip_emulated_instruction = skip_emulated_instruction,
        .vcpu_setup = svm_vcpu_setup,
+       .patch_hypercall = svm_patch_hypercall,
 };
 
 static int __init svm_init(void)
Index: linux/drivers/kvm/vmx.c
===================================================================
--- linux.orig/drivers/kvm/vmx.c
+++ linux/drivers/kvm/vmx.c
@@ -406,10 +406,10 @@ static int vmx_set_msr(struct kvm_vcpu *
        case MSR_IA32_SYSENTER_ESP:
                vmcs_write32(GUEST_SYSENTER_ESP, data);
                break;
-       case MSR_IA32_TIME_STAMP_COUNTER: {
+       case MSR_IA32_TIME_STAMP_COUNTER:
                guest_write_tsc(data);
                break;
-       }
+
        default:
                msr = find_msr_entry(vcpu, msr_index);
                if (msr) {
@@ -1448,6 +1448,17 @@ static int handle_io(struct kvm_vcpu *vc
        return 0;
 }
 
+static void
+vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
+{
+       /*
+        * Patch in the VMCALL instruction:
+        */
+       hypercall[0] = 0x0f;
+       hypercall[1] = 0x01;
+       hypercall[2] = 0xc1;
+}
+
 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        u64 exit_qualification;
@@ -2042,6 +2053,7 @@ static struct kvm_arch_ops vmx_arch_ops 
        .run = vmx_vcpu_run,
        .skip_emulated_instruction = skip_emulated_instruction,
        .vcpu_setup = vmx_vcpu_setup,
+       .patch_hypercall = vmx_patch_hypercall,
 };
 
 static int __init vmx_init(void)
Index: linux/include/linux/kvm_para.h
===================================================================
--- /dev/null
+++ linux/include/linux/kvm_para.h
@@ -0,0 +1,75 @@
+#ifndef __LINUX_KVM_PARA_H
+#define __LINUX_KVM_PARA_H
+
+#include <linux/errno.h>
+
+/*
+ * Guest OS interface for KVM paravirtualization
+ *
+ * Note: this interface is considered experimental and may change without
+ *       notice.
+ */
+
+#define KVM_CR3_CACHE_SIZE     4
+
+struct kvm_cr3_cache_entry {
+       u64 guest_cr3;
+       u64 host_cr3;
+};
+
+struct kvm_cr3_cache {
+       u32 entry_count;
+       u32 __pad;
+       struct kvm_cr3_cache_entry entry[KVM_CR3_CACHE_SIZE];
+};
+
+/*
+ * Per-VCPU descriptor area shared between guest and host. Writable to
+ * both guest and host. Registered with the host by the guest when
+ * a guest acknowledges paravirtual mode.
+ *
+ * NOTE: all addresses are guest-physical addresses (gpa), to make it
+ * easier for the hypervisor to map between the various addresses.
+ */
+struct kvm_vcpu_para_state {
+       /*
+        * API version information for compatibility. If there's any support
+        * mismatch (too old host trying to execute too new guest) then
+        * the host will deny entry into paravirtual mode. Any other
+        * combination (new host + old guest and new host + new guest)
+        * is supposed to work - new host versions will support all old
+        * guest API versions.
+        */
+       u32 guest_version;
+       u32 host_version;
+       u32 size;
+       u32 ret;
+
+       /*
+        * The address of the vm exit instruction (VMCALL or VMMCALL),
+        * which the host will patch according to the CPU model the
+        * VM runs on:
+        */
+       u64 hypercall_gpa;
+
+       /*
+        * Pointer to the struct kvm_cr3_cache CR3 cache:
+        */
+       u64 cr3_cache_gpa;
+
+} __attribute__ ((aligned(PAGE_SIZE)));
+
+#define KVM_PARA_API_VERSION 1
+
+/*
+ * This is used for an RDMSR's ECX parameter to probe for a KVM host.
+ * Hopefully no CPU vendor will use up this number. This is placed well
+ * out of way of the typical space occupied by CPU vendors' MSR indices,
+ * and we think (or at least hope) it wont be occupied in the future
+ * either.
+ */
+#define MSR_KVM_API_MAGIC 0x87655678
+
+#define KVM_EINVAL EINVAL
+
+#endif
Subject: [patch] KVM: paravirtual guest support
From: Ingo Molnar <[EMAIL PROTECTED]>

this enables a CONFIG_PARAVIRT Linux guest kernel to establish a
hypercall API to a KVM host. If successfully registered, then the Linux
guest will optimize a few things like its interrupt controller, io-delay
and it also registers its cr3-cache structures with the host. (but the
host will not touch those, just yet)

(this is fully backwards compatible - if the WRMSR fails then the Linux
guest continues to execute as a native kernel.)

Signed-off-by: Ingo Molnar <[EMAIL PROTECTED]>
---
 arch/i386/kernel/paravirt.c  |  238 +++++++++++++++++++++++++++++++++++++++++++
 drivers/serial/8250.c        |    3 
 include/asm-i386/processor.h |    1 
 include/linux/paravirt.h     |   12 ++
 init/main.c                  |    6 -
 5 files changed, 257 insertions(+), 3 deletions(-)

Index: linux/arch/i386/kernel/paravirt.c
===================================================================
--- linux.orig/arch/i386/kernel/paravirt.c
+++ linux/arch/i386/kernel/paravirt.c
@@ -20,6 +20,7 @@
 #include <linux/efi.h>
 #include <linux/bcd.h>
 #include <linux/start_kernel.h>
+#include <linux/kvm_para.h>
 
 #include <asm/bug.h>
 #include <asm/paravirt.h>
@@ -33,6 +34,9 @@
 #include <asm/apic.h>
 #include <asm/tlbflush.h>
 
+#include <asm/i8259.h>
+#include <io_ports.h>
+
 /* nop stub */
 static void native_nop(void)
 {
@@ -683,3 +687,237 @@ struct paravirt_ops paravirt_ops = {
        .irq_enable_sysexit = native_irq_enable_sysexit,
        .iret = native_iret,
 };
+
+/*
+ * KVM paravirtualization optimizations:
+ */
+int kvm_paravirt;
+
+/*
+ * No need for any "IO delay" on KVM:
+ */
+static void kvm_io_delay(void)
+{
+}
+
+static DEFINE_PER_CPU(struct kvm_vcpu_para_state, para_state);
+
+static fastcall void kvm_write_cr3(unsigned long val)
+{
+       asm volatile("movl %0,%%cr3": :"r" (val));
+}
+
+/*
+ * Avoid the VM exit upon cr3 load by using the cached
+ * ->active_mm->pgd value:
+ */
+static void kvm_flush_tlb_user(void)
+{
+       kvm_write_cr3(__pa(current->active_mm->pgd));
+}
+
+static void kvm_flush_tlb_single(u32 addr)
+{
+       __native_flush_tlb_single(addr);
+}
+/*
+ * Disable global pages, do a flush, then enable global pages:
+ */
+static fastcall void kvm_flush_tlb_kernel(void)
+{
+       unsigned long orig_cr4 = read_cr4();
+
+       write_cr4(orig_cr4 & ~X86_CR4_PGE);
+       kvm_flush_tlb_user();
+       write_cr4(orig_cr4);
+}
+
+/*
+ * Simplified i8259A controller handling:
+ */
+static void mask_and_ack_kvm(unsigned int irq)
+{
+       unsigned int irqmask = 1 << irq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&i8259A_lock, flags);
+       cached_irq_mask |= irqmask;
+
+       if (irq & 8) {
+               outb(cached_slave_mask, PIC_SLAVE_IMR);
+               outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+               outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to 
master-IRQ2 */
+       } else {
+               outb(cached_master_mask, PIC_MASTER_IMR);
+               /* 'Specific EOI' to master: */
+               outb(0x60+irq, PIC_MASTER_CMD);
+       }
+       spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+static void disable_kvm_irq(unsigned int irq)
+{
+       unsigned int mask = 1 << irq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&i8259A_lock, flags);
+       cached_irq_mask |= mask;
+       if (irq & 8)
+               outb(cached_slave_mask, PIC_SLAVE_IMR);
+       else
+               outb(cached_master_mask, PIC_MASTER_IMR);
+       spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+static void enable_kvm_irq(unsigned int irq)
+{
+       unsigned int mask = ~(1 << irq);
+       unsigned long flags;
+
+       spin_lock_irqsave(&i8259A_lock, flags);
+       cached_irq_mask &= mask;
+       if (irq & 8)
+               outb(cached_slave_mask, PIC_SLAVE_IMR);
+       else
+               outb(cached_master_mask, PIC_MASTER_IMR);
+       spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+static struct irq_chip kvm_chip = {
+       .name           = "XT-PIC",
+       .mask           = disable_kvm_irq,
+       .disable        = disable_kvm_irq,
+       .unmask         = enable_kvm_irq,
+       .mask_ack       = mask_and_ack_kvm,
+};
+
+static void __init kvm_init_IRQ(void)
+{
+       int i;
+
+       printk("init KVM IRQ controller\n");
+#ifdef CONFIG_X86_LOCAL_APIC
+       init_bsp_APIC();
+#endif
+       init_8259A(0);
+
+       for (i = 0; i < NR_IRQS; i++) {
+               irq_desc[i].status = IRQ_DISABLED;
+               irq_desc[i].action = NULL;
+               irq_desc[i].depth = 1;
+
+               if (i < 16) {
+                       /*
+                        * 16 old-style INTA-cycle interrupts:
+                        */
+                       set_irq_chip_and_handler_name(i, &kvm_chip,
+                                                     handle_level_irq, "XT");
+               } else {
+                       /*
+                        * 'high' PCI IRQs filled in on demand
+                        */
+                       irq_desc[i].chip = &no_irq_chip;
+               }
+       }
+
+       /*
+        * Cover the whole vector space, no vector can escape
+        * us. (some of these will be overridden and become
+        * 'special' SMP interrupts)
+        */
+       for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
+               int vector = FIRST_EXTERNAL_VECTOR + i;
+               if (i >= NR_IRQS)
+                       break;
+               if (vector != SYSCALL_VECTOR)
+                       set_intr_gate(vector, interrupt[i]);
+       }
+
+       /* setup after call gates are initialised (usually add in
+        * the architecture specific gates)
+        */
+       intr_init_hook();
+
+       irq_ctx_init(smp_processor_id());
+}
+
+/*
+ * This is the vm-syscall address - to be patched by the host to
+ * VMCALL (Intel) or VMMCALL (AMD), depending on the CPU model:
+ */
+asm (
+       "       .globl hypercall_addr           \n"
+       "       .align 4                        \n"
+       "       hypercall_addr:         \n"
+       "               nop                     \n"
+       "               nop                     \n"
+       "               nop                     \n"
+       "               ret                     \n"
+);
+
+extern unsigned char hypercall_addr[4];
+
+int kvm_guest_register_para(int cpu)
+{
+       struct kvm_vcpu_para_state *para_state = &per_cpu(para_state, cpu);
+
+       printk("kvm guest on VCPU#%d: trying to register para_state %p\n",
+               cpu, para_state);
+       /*
+        * Move a magic (and otherwise invalid) value to
+        * cr3, and thus signal to KVM that we are entering
+        * paravirtualized mode:
+        */
+       para_state->guest_version = KVM_PARA_API_VERSION;
+       para_state->host_version = -1;
+       para_state->size = sizeof(*para_state);
+       para_state->ret = 0;
+       para_state->hypercall_gpa = __pa(hypercall_addr);
+
+       if (wrmsr_safe(MSR_KVM_API_MAGIC, __pa(para_state), 0)) {
+               printk("KVM guest: WRMSR probe failed.\n");
+               return 0;
+       }
+
+       printk("kvm guest: host returned %d\n", para_state->ret);
+       printk("kvm guest: host version: %d\n", para_state->host_version);
+       printk("kvm guest: syscall entry: %02x %02x %02x %02x\n",
+                       hypercall_addr[0], hypercall_addr[1],
+                       hypercall_addr[2], hypercall_addr[3]);
+       if (para_state->ret) {
+               printk("kvm guest: host refused registration.\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static int __init kvm_paravirt_setup(char *s)
+{
+       printk("KVM paravirtualization setup\n");
+        if (sscanf(s, "%u", &kvm_paravirt) <= 0)
+               return 1;
+       if (!kvm_paravirt)
+               return 1;
+
+       kvm_paravirt = kvm_guest_register_para(smp_processor_id());
+       if (!kvm_paravirt)
+               return 1;
+
+       printk("KVM paravirtualized: OK\n");
+
+       paravirt_ops.name = "KVM";
+       paravirt_ops.io_delay = kvm_io_delay;
+       paravirt_ops.init_IRQ = kvm_init_IRQ;
+       paravirt_ops.flush_tlb_user = kvm_flush_tlb_user;
+       paravirt_ops.flush_tlb_kernel = kvm_flush_tlb_kernel;
+       paravirt_ops.flush_tlb_single = kvm_flush_tlb_single;
+       paravirt_ops.write_cr3 = kvm_write_cr3;
+       paravirt_ops.paravirt_enabled = 1;
+
+       return 1;
+}
+__setup("kvm_paravirt=", kvm_paravirt_setup);
+
+EXPORT_SYMBOL_GPL(paravirt_ops);
+
Index: linux/drivers/serial/8250.c
===================================================================
--- linux.orig/drivers/serial/8250.c
+++ linux/drivers/serial/8250.c
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/ioport.h>
+#include <linux/paravirt.h>
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/sysrq.h>
@@ -1371,7 +1372,7 @@ static irqreturn_t serial8250_interrupt(
 
                l = l->next;
 
-               if (l == i->head && pass_counter++ > PASS_LIMIT) {
+               if (!paravirt_enabled() && l == i->head && pass_counter++ > 
PASS_LIMIT) {
                        /* If we hit this, we're dead. */
                        printk(KERN_ERR "serial8250: too much work for "
                                "irq%d\n", irq);
Index: linux/include/asm-i386/processor.h
===================================================================
--- linux.orig/include/asm-i386/processor.h
+++ linux/include/asm-i386/processor.h
@@ -547,7 +547,6 @@ static inline void rep_nop(void)
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
-#define paravirt_enabled() 0
 #define __cpuid native_cpuid
 
 static inline void load_esp0(struct tss_struct *tss, struct thread_struct 
*thread)
Index: linux/include/linux/paravirt.h
===================================================================
--- /dev/null
+++ linux/include/linux/paravirt.h
@@ -0,0 +1,12 @@
+#ifndef __LINUX_PARAVIRT_H
+#define __LINUX_PARAVIRT_H
+
+/*
+ * Paravirtualization support
+ */
+
+#ifndef CONFIG_PARAVIRT
+# define paravirt_enabled()    0
+#endif
+
+#endif
Index: linux/init/main.c
===================================================================
--- linux.orig/init/main.c
+++ linux/init/main.c
@@ -374,7 +374,11 @@ static void __init setup_per_cpu_areas(v
        if (size < PERCPU_ENOUGH_ROOM)
                size = PERCPU_ENOUGH_ROOM;
 #endif
-       ptr = alloc_bootmem(size * nr_possible_cpus);
+       /*
+        * Align them to page size - just in case someone aligns
+        * the per-CPU data to page that alignment should be preserved:
+        */
+       ptr = alloc_bootmem_pages(size * nr_possible_cpus);
 
        for_each_possible_cpu(i) {
                __per_cpu_offset[i] = ptr - __per_cpu_start;
-------------------------------------------------------------------------
Take Surveys. Earn Cash. Influence the Future of IT
Join SourceForge.net's Techsay panel and you'll get the chance to share your
opinions on IT & business topics through brief surveys - and earn cash
http://www.techsay.com/default.php?page=join.php&p=sourceforge&CID=DEVDEV
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to