http://lxr.linux.no/#linux+v2.6.31/arch/x86/kernel/process_64.c#L347

 371/*
 372 *      switch_to(x,y) should switch tasks from x to y.
 373 *
 374 * This could still be optimized:
 375 * - fold all the options into a flag word and test it with a single test.
 376 * - could test fs/gs bitsliced
 377 *
 378 * Kprobes not supported here. Set the probe on schedule instead.
 379 * Function graph tracer not supported too.
 380 */
 381__notrace_funcgraph struct task_struct *
 382__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 383{
 384        struct thread_struct *prev = &prev_p->thread;
 385        struct thread_struct *next = &next_p->thread;
 386        int cpu = smp_processor_id();
 387        struct tss_struct *tss = &per_cpu(init_tss, cpu);
 388        unsigned fsindex, gsindex;
 389
 390        /* we're going to use this soon, after a few expensive things */
 391        if (next_p->fpu_counter > 5)
 392                prefetch(next->xstate);
 393
 394        /*
 395         * Reload esp0, LDT and the page table pointer:
 396         */
 397        load_sp0(tss, next);
 398
 399        /*
 400         * Switch DS and ES.
 401         * This won't pick up thread selector changes, but I guess that is ok.
 402         */
 403        savesegment(es, prev->es);
 404        if (unlikely(next->es | prev->es))
 405                loadsegment(es, next->es);
 406
 407        savesegment(ds, prev->ds);
 408        if (unlikely(next->ds | prev->ds))
 409                loadsegment(ds, next->ds);
 410
 411
 412        /* We must save %fs and %gs before load_TLS() because
 413         * %fs and %gs may be cleared by load_TLS().
 414         *
 415         * (e.g. xen_load_tls())
 416         */
 417        savesegment(fs, fsindex);
 418        savesegment(gs, gsindex);
 419
 420        load_TLS(next, cpu);
 421
 422        /*
 423         * Leave lazy mode, flushing any hypercalls made here.
 424         * This must be done before restoring TLS segments so
 425         * the GDT and LDT are properly updated, and must be
 426         * done before math_state_restore, so the TS bit is up
 427         * to date.
 428         */
 429        arch_end_context_switch(next_p);
 430
 431        /*
 432         * Switch FS and GS.
 433         *
 434         * Segment register != 0 always requires a reload.  Also
 435         * reload when it has changed.  When prev process used 64bit
 436         * base always reload to avoid an information leak.
 437         */
 438        if (unlikely(fsindex | next->fsindex | prev->fs)) {
 439                loadsegment(fs, next->fsindex);
 440                /*
 441                 * Check if the user used a selector != 0; if yes
 442                 *  clear 64bit base, since overloaded base is always
 443                 *  mapped to the Null selector
 444                 */
 445                if (fsindex)
 446                        prev->fs = 0;
 447        }
 448        /* when next process has a 64bit base use it */
 449        if (next->fs)
 450                wrmsrl(MSR_FS_BASE, next->fs);
 451        prev->fsindex = fsindex;
 452
 453        if (unlikely(gsindex | next->gsindex | prev->gs)) {
 454                load_gs_index(next->gsindex);
 455                if (gsindex)
 456                        prev->gs = 0;
 457        }
 458        if (next->gs)
 459                wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
 460        prev->gsindex = gsindex;
 461
 462        /* Must be after DS reload */
 463        unlazy_fpu(prev_p);
 464
 465        /*
 466         * Switch the PDA and FPU contexts.
 467         */
 468        prev->usersp = percpu_read(old_rsp);
 469        percpu_write(old_rsp, next->usersp);
 470        percpu_write(current_task, next_p);
 471
 472        percpu_write(kernel_stack,
 473                  (unsigned long)task_stack_page(next_p) +
 474                  THREAD_SIZE - KERNEL_STACK_OFFSET);
 475
 476        /*
 477         * Now maybe reload the debug registers and handle I/O bitmaps
 478         */
 479        if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
 480                     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
 481                __switch_to_xtra(prev_p, next_p, tss);
 482
 483        /* If the task has used fpu the last 5 timeslices, just do a full
 484         * restore of the math state immediately to avoid the trap; the
 485         * chances of needing FPU soon are obviously high now
 486         *
 487         * tsk_used_math() checks prevent calling math_state_restore(),
 488         * which can sleep in the case of !tsk_used_math()
 489         */
 490        if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
 491                math_state_restore();
 492        return prev_p;
 493}
 494



Reply via email to