From: Avi Kivity <[EMAIL PROTECTED]>

Add supportint code for patching tpr accesses.  The code includes a descriptor
that allows locating the code in the bios, relocation information for
patching it into whatever virtual address it ends up in, and vapic access code.

Signed-off-by: Avi Kivity <[EMAIL PROTECTED]>

diff --git a/bios/Makefile b/bios/Makefile
index 8ec22b8..0abc108 100644
--- a/bios/Makefile
+++ b/bios/Makefile
@@ -97,8 +97,8 @@ rombios32.bin: rombios32.out rombios.h
        objcopy -O binary $< $@
        ./biossums -pad $@
 
-rombios32.out: rombios32start.o rombios32.o rombios32.ld
-       ld -o $@ -T rombios32.ld rombios32start.o rombios32.o
+rombios32.out: rombios32start.o rombios32.o vapic.o rombios32.ld
+       ld -o $@ -T rombios32.ld rombios32start.o vapic.o rombios32.o
 
 rombios32.o: rombios32.c acpi-dsdt.hex
        $(GCC) -m32 -O2 -Wall -c -o $@ $<
@@ -109,6 +109,9 @@ acpi-dsdt.hex: acpi-dsdt.dsl
 rombios32start.o: rombios32start.S
        $(GCC) -m32 -c -o $@ $<
 
+vapic.o: vapic.S
+       $(GCC) -m32 -c -o $@ $<
+
 BIOS-bochs-latest: rombios16.bin rombios32.bin
        cat rombios32.bin rombios16.bin > $@
 
diff --git a/bios/rombios32.ld b/bios/rombios32.ld
index 38a1f13..4f331ea 100644
--- a/bios/rombios32.ld
+++ b/bios/rombios32.ld
@@ -6,6 +6,10 @@ SECTIONS
         . = 0x00040000;
         .text     : { *(.text)    } 
         .rodata    : { *(.rodata) }
+        . = ALIGN(64);
+        fixup_start = .;
+        .fixup    : { *(.fixup) }
+        fixup_end = .;
         . = ALIGN(4096);
         .data     : { *(.data)    } 
         __bss_start = . ;
diff --git a/bios/vapic.S b/bios/vapic.S
new file mode 100644
index 0000000..16e5cc4
--- /dev/null
+++ b/bios/vapic.S
@@ -0,0 +1,181 @@
+       .text
+       .code32
+       .align 4096
+
+vapic_size = 2*4096
+
+.macro fixup delta=-4
+777:
+       .pushsection .fixup, "a"
+       .long 777b + \delta  - vapic_base
+       .popsection
+.endm
+
+vapic_base:
+       .ascii "kvm aPiC"
+
+       /* relocation data */
+       .long vapic_base        ; fixup
+       .long fixup_start       ; fixup
+       .long fixup_end         ; fixup
+
+       .long vapic             ; fixup
+       .long vapic_size
+vcpu_shift:
+       .long 0
+real_tpr:
+       .long 0
+       .long set_tpr           ; fixup
+       .long set_tpr_eax       ; fixup
+       .long get_tpr_eax       ; fixup
+       .long get_tpr_ecx       ; fixup
+       .long get_tpr_edx       ; fixup
+       .long get_tpr_ebx       ; fixup
+       .long 0 /* esp. won't work. */
+       .long get_tpr_ebp       ; fixup
+       .long get_tpr_esi       ; fixup
+       .long get_tpr_edi       ; fixup
+
+.macro kvm_hypercall
+       .byte 0x0f, 0x01, 0xc1
+.endm
+
+kvm_hypercall_vapic_poll_irq = 1
+
+tr_vcpu_signature = 0xdb
+
+.align 64
+
+get_tpr_eax:
+       pushf
+       push %ecx
+
+       str %eax
+       cmp $tr_vcpu_signature, %al
+       jne get_tpr_bad
+       movzbl %ah, %eax
+
+       mov vcpu_shift, %ecx    ; fixup
+       shl %cl, %eax
+       movzbl vapic(%eax), %eax ; fixup
+
+get_tpr_out:
+       pop %ecx
+       popf
+       ret
+
+get_tpr_bad:
+       mov real_tpr, %eax      ; fixup
+       mov (%eax), %eax
+       jmp get_tpr_out
+
+get_tpr_ebx:
+       mov %eax, %ebx
+       call get_tpr_eax
+       xchg %eax, %ebx
+       ret
+
+get_tpr_ecx:
+       mov %eax, %ecx
+       call get_tpr_eax
+       xchg %eax, %ecx
+       ret
+
+get_tpr_edx:
+       mov %eax, %edx
+       call get_tpr_eax
+       xchg %eax, %edx
+       ret
+
+get_tpr_esi:
+       mov %eax, %esi
+       call get_tpr_eax
+       xchg %eax, %esi
+       ret
+
+get_tpr_edi:
+       mov %eax, %edi
+       call get_tpr_edi
+       xchg %eax, %edi
+       ret
+
+get_tpr_ebp:
+       mov %eax, %ebp
+       call get_tpr_eax
+       xchg %eax, %ebp
+       ret
+
+set_tpr_eax:
+       push %eax
+       call set_tpr
+       ret
+
+set_tpr:
+       pushf
+       push %eax
+       push %ecx
+       push %edx
+       push %ebx
+
+set_tpr_failed:
+       str %eax
+       cmp $tr_vcpu_signature, %al
+       jne set_tpr_bad
+       movzbl %ah, %edx
+
+       mov vcpu_shift, %ecx    ; fixup
+       shl %cl, %edx
+
+       mov vapic(%edx), %eax   ; fixup
+
+       mov %eax, %ebx
+       mov 24(%esp), %bl
+
+       /* %ebx = new vapic (%bl = tpr, %bh = isr, %b3 = irr) */
+
+       lock cmpxchg %ebx, vapic(%edx) ; fixup
+       jnz set_tpr_failed
+
+       /* compute ppr */
+       cmp %bh, %bl
+       jae tpr_is_bigger
+isr_is_bigger:
+       mov %bh, %bl
+tpr_is_bigger:
+       /* %bl = ppr */
+       mov %bl, %ch   /* ch = ppr */
+       rol $8, %ebx
+       /* now: %bl = irr, %bh = ppr */
+       cmp %bh, %bl
+       ja set_tpr_poll_irq
+
+set_tpr_out:
+       pop %ebx
+       pop %edx
+       pop %ecx
+       pop %eax
+       popf
+       ret $4
+
+set_tpr_poll_irq:
+       mov $kvm_hypercall_vapic_poll_irq, %eax
+       kvm_hypercall
+       jmp set_tpr_out
+
+set_tpr_bad:
+       mov 24(%esp), %ecx
+       mov real_tpr, %eax      ; fixup
+       mov %ecx, (%eax)
+       jmp set_tpr_out
+
+.align 4096
+/*
+ * vapic format:
+ *  per-vcpu records of size 2^vcpu shift.
+ *     byte 0: tpr (r/w)
+ *     byte 1: highest in-service interrupt (isr) (r/o); bits 3:0 are zero
+ *     byte 2: zero (r/o)
+ *     byte 3: highest pending interrupt (irr) (r/o)
+ */
+vapic:
+. = . + vapic_size

-------------------------------------------------------------------------
This SF.net email is sponsored by: Microsoft
Defy all challenges. Microsoft(R) Visual Studio 2005.
http://clk.atdmt.com/MRT/go/vse0120000070mrt/direct/01/
_______________________________________________
kvm-commits mailing list
kvm-commits@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-commits

Reply via email to