Unlike the jump_label bits, static_cpu_has is implemented with
alternatives. We use the new type field to distinguish them from any
other alternatives

Like jump_labels, make static_cpu_has set static_jump_dest on the
instructions after the static branch such that we can assert on it.

Cc: Borislav Petkov <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
 tools/objtool/check.c   |   21 +++++++++++++++++++++
 tools/objtool/special.c |   11 +++++++++++
 tools/objtool/special.h |    1 +
 3 files changed, 33 insertions(+)

--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -636,6 +636,12 @@ static int handle_group_alt(struct objto
        fake_jump->ignore = true;
 
        if (!special_alt->new_len) {
+               /*
+                * The NOP case for _static_cpu_has()
+                */
+               if (special_alt->static_feat)
+                       fake_jump->jump_dest->static_jump_dest = true;
+
                *new_insn = fake_jump;
                return 0;
        }
@@ -664,6 +670,21 @@ static int handle_group_alt(struct objto
                                  insn->sec, insn->offset);
                        return -1;
                }
+
+               if (special_alt->static_feat) {
+                       if (insn->type != INSN_JUMP_UNCONDITIONAL) {
+                               WARN_FUNC("not an unconditional jump in 
_static_cpu_has()",
+                                         insn->sec, insn->offset);
+                       }
+                       if (insn->jump_dest == fake_jump) {
+                               WARN_FUNC("jump inside alternative for 
_static_cpu_has()",
+                                         insn->sec, insn->offset);
+                       }
+                       /*
+                        * The JMP+disp case for _static_cpu_has()
+                        */
+                       insn->jump_dest->static_jump_dest = true;
+               }
        }
 
        if (!last_new_insn) {
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -40,6 +40,11 @@
 #define ALT_FEATURE_OFFSET     8
 #define ALT_ORIG_LEN_OFFSET    10
 #define ALT_NEW_LEN_OFFSET     11
+#define ALT_PADDING_OFFSET     12
+#define ALT_TYPE_OFFSET                13
+
+#define ALT_TYPE_DEFAULT       0
+#define ALT_TYPE_STATIC_CPU_HAS        1
 
 #define X86_FEATURE_POPCNT (4*32+23)
 
@@ -99,10 +104,13 @@ static int get_alt_entry(struct elf *elf
 
        if (entry->feature) {
                unsigned short feature;
+               unsigned char type;
 
                feature = *(unsigned short *)(sec->data->d_buf + offset +
                                              entry->feature);
 
+               type = *(unsigned char *)(sec->data->d_buf + offset + 
ALT_TYPE_OFFSET);
+
                /*
                 * It has been requested that we don't validate the !POPCNT
                 * feature path which is a "very very small percentage of
@@ -110,6 +118,9 @@ static int get_alt_entry(struct elf *elf
                 */
                if (feature == X86_FEATURE_POPCNT)
                        alt->skip_orig = true;
+
+               if (type == ALT_TYPE_STATIC_CPU_HAS)
+                       alt->static_feat = true;
        }
 
        orig_rela = find_rela_by_dest(sec, offset + entry->orig);
--- a/tools/objtool/special.h
+++ b/tools/objtool/special.h
@@ -27,6 +27,7 @@ struct special_alt {
        bool group;
        bool skip_orig;
        bool jump_or_nop;
+       bool static_feat;
 
        struct section *orig_sec;
        unsigned long orig_off;


Reply via email to