On Tue, 18 Dec 2018 17:46:36 +0100
Daniel Bristot de Oliveira <bris...@redhat.com> wrote:

> +void text_poke_bp_batch(struct text_to_poke *tp, unsigned int nr_entries)
> +{
> +     unsigned int i;
> +     unsigned char int3 = 0xcc;
> +     int patched_all_but_first = 0;
> +
> +     bp_int3_tpv = tp;
> +     bp_int3_tpv_nr = nr_entries;
> +     bp_patching_in_progress = true;
> +     /*
> +      * Corresponding read barrier in int3 notifier for making sure the
> +      * in_progress and handler are correctly ordered wrt. patching.
> +      */
> +     smp_wmb();
> +
> +     for (i = 0; i < nr_entries; i++)
> +             text_poke_bp_set_handler(tp[i].addr, tp[i].handler, int3);
> +
> +     on_each_cpu(do_sync_core, NULL, 1);
> +
> +     for (i = 0; i < nr_entries; i++) {
> +             if (tp->len - sizeof(int3) > 0) {

Should this be:

                if (tp[i].len - sizeof(int3) > 0) {

?

-- Steve

> +                     patch_all_but_first_byte(tp[i].addr, tp[i].opcode,
> +                                              tp[i].len, int3);
> +                     patched_all_but_first++;
> +             }
> +     }
> +
> +     if (patched_all_but_first) {
> +             /*
> +              * According to Intel, this core syncing is very likely
> +              * not necessary and we'd be safe even without it. But
> +              * better safe than sorry (plus there's not only Intel).
> +              */
> +             on_each_cpu(do_sync_core, NULL, 1);
> +     }
> +
> +     for (i = 0; i < nr_entries; i++)
> +             patch_first_byte(tp[i].addr, tp[i].opcode, int3);
> +
> +     on_each_cpu(do_sync_core, NULL, 1);
> +     /*
> +      * sync_core() implies an smp_mb() and orders this store against
> +      * the writing of the new instruction.
> +      */
> +     bp_int3_tpv_nr = 0;
> +     bp_int3_tpv = NULL;
> +     bp_patching_in_progress = false;
> +}

Reply via email to