<URL: http://funnyman:26776/markup/xen-3.1-testing.hg/linux-2.6-xen-sparse/arch/i386/kernel/vm86.c?q=do_vm86_irq_handling#l175


  257:
258: static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
  259: {
  260: #ifndef CONFIG_X86_NO_TSS
  261:         struct tss_struct *tss;
  262: #endif
  263:         long eax;
  264: /*
  265:  * make sure the vm86() system call doesn't try to do anything silly
  266:  */
  267:         info->regs.__null_ds = 0;
  268:         info->regs.__null_es = 0;
  269:
  270: /* we are clearing fs,gs later just before "jmp resume_userspace",
271: * because starting with Linux 2.1.x they aren't no longer saved/restored
  272:  */
  273:
  274: /*
275: * The eflags register is also special: we cannot trust that the user
  276:  * has set it up safely, so this makes sure interrupt etc flags are
  277:  * inherited from protected mode.
  278:  */
  279:         VEFLAGS = info->regs.eflags;
  280:         info->regs.eflags &= SAFE_MASK;
  281:         info->regs.eflags |= info->regs32->eflags & ~SAFE_MASK;
  282:         info->regs.eflags |= VM_MASK;
  283:
  284:         switch (info->cpu_type) {
  285:                 case CPU_286:
  286:                         tsk->thread.v86mask = 0;
  287:                         break;
  288:                 case CPU_386:
  289:                         tsk->thread.v86mask = NT_MASK | IOPL_MASK;
  290:                         break;
  291:                 case CPU_486:
292: tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK;
  293:                         break;
  294:                 default:
295: tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
  296:                         break;
  297:         }
  298:
  299: /*
  300:  * Save old state, set default return value (%eax) to 0
  301:  */
  302:         info->regs32->eax = 0;
  303:         tsk->thread.saved_esp0 = tsk->thread.esp0;
  304:         savesegment(fs, tsk->thread.saved_fs);
  305:         savesegment(gs, tsk->thread.saved_gs);
  306:
  307: #ifndef CONFIG_X86_NO_TSS
  308:         tss = &per_cpu(init_tss, get_cpu());
  309: #endif
  310:         tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
  311:         if (cpu_has_sep)
  312:                 tsk->thread.sysenter_cs = 0;
  313:         load_esp0(tss, &tsk->thread);
  314: #ifndef CONFIG_X86_NO_TSS
  315:         put_cpu();
  316: #endif
  317:
  318:         tsk->thread.screen_bitmap = info->screen_bitmap;
  319:         if (info->flags & VM86_SCREEN_BITMAP)
  320:                 mark_screen_rdonly(tsk->mm);
321: __asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t");
  322:         __asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax));
  323:
324: /*call audit_syscall_exit since we do not exit via the normal paths */
  325:         if (unlikely(current->audit_context))
  326:                 audit_syscall_exit(AUDITSC_RESULT(eax), eax);
  327:
  328:         __asm__ __volatile__(
  329:                 "movl %0,%%esp\n\t"
  330:                 "movl %1,%%ebp\n\t"
  331:                 "jmp resume_userspace"
  332:                 : /* no outputs */
  333:                 :"r" (&info->regs), "r" (task_thread_info(tsk)));
  334:         /* we never return here */
  335: }
   336:


Reply via email to