Hi Marc,

FYI, the error/warning still remains.

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 
master
head:   88bb507a74ea7d75fa49edd421eaa710a7d80598
commit: 68b824e428c5fb5c3dc5ef80b1543e767534b58e KVM: arm64: Patch 
kimage_voffset instead of loading the EL1 value
date:   10 weeks ago
config: arm64-randconfig-r024-20210202 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # 
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=68b824e428c5fb5c3dc5ef80b1543e767534b58e
        git remote add linus 
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
        git fetch --no-tags linus master
        git checkout 68b824e428c5fb5c3dc5ef80b1543e767534b58e
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross 
ARCH=arm64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>

All warnings (new ones prefixed by >>):

   arch/arm64/kvm/va_layout.c:138:6: warning: no previous prototype for 
'kvm_patch_vector_branch' [-Wmissing-prototypes]
     138 | void kvm_patch_vector_branch(struct alt_instr *alt,
         |      ^~~~~~~~~~~~~~~~~~~~~~~
   arch/arm64/kvm/va_layout.c:249:6: warning: no previous prototype for 
'kvm_update_kimg_phys_offset' [-Wmissing-prototypes]
     249 | void kvm_update_kimg_phys_offset(struct alt_instr *alt,
         |      ^~~~~~~~~~~~~~~~~~~~~~~~~~~
>> arch/arm64/kvm/va_layout.c:255:6: warning: no previous prototype for 
>> 'kvm_get_kimage_voffset' [-Wmissing-prototypes]
     255 | void kvm_get_kimage_voffset(struct alt_instr *alt,
         |      ^~~~~~~~~~~~~~~~~~~~~~


vim +/kvm_get_kimage_voffset +255 arch/arm64/kvm/va_layout.c

   137  
 > 138  void kvm_patch_vector_branch(struct alt_instr *alt,
   139                               __le32 *origptr, __le32 *updptr, int 
nr_inst)
   140  {
   141          u64 addr;
   142          u32 insn;
   143  
   144          BUG_ON(nr_inst != 5);
   145  
   146          if (has_vhe() || 
!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
   147                  
WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS));
   148                  return;
   149          }
   150  
   151          /*
   152           * Compute HYP VA by using the same computation as kern_hyp_va()
   153           */
   154          addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector);
   155          addr &= va_mask;
   156          addr |= tag_val << tag_lsb;
   157  
   158          /* Use PC[10:7] to branch to the same vector in KVM */
   159          addr |= ((u64)origptr & GENMASK_ULL(10, 7));
   160  
   161          /*
   162           * Branch over the preamble in order to avoid the initial store 
on
   163           * the stack (which we already perform in the hardening 
vectors).
   164           */
   165          addr += KVM_VECTOR_PREAMBLE;
   166  
   167          /* stp x0, x1, [sp, #-16]! */
   168          insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
   169                                                  AARCH64_INSN_REG_1,
   170                                                  AARCH64_INSN_REG_SP,
   171                                                  -16,
   172                                                  
AARCH64_INSN_VARIANT_64BIT,
   173                                                  
AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX);
   174          *updptr++ = cpu_to_le32(insn);
   175  
   176          /* movz x0, #(addr & 0xffff) */
   177          insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
   178                                           (u16)addr,
   179                                           0,
   180                                           AARCH64_INSN_VARIANT_64BIT,
   181                                           AARCH64_INSN_MOVEWIDE_ZERO);
   182          *updptr++ = cpu_to_le32(insn);
   183  
   184          /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
   185          insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
   186                                           (u16)(addr >> 16),
   187                                           16,
   188                                           AARCH64_INSN_VARIANT_64BIT,
   189                                           AARCH64_INSN_MOVEWIDE_KEEP);
   190          *updptr++ = cpu_to_le32(insn);
   191  
   192          /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
   193          insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
   194                                           (u16)(addr >> 32),
   195                                           32,
   196                                           AARCH64_INSN_VARIANT_64BIT,
   197                                           AARCH64_INSN_MOVEWIDE_KEEP);
   198          *updptr++ = cpu_to_le32(insn);
   199  
   200          /* br x0 */
   201          insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
   202                                             AARCH64_INSN_BRANCH_NOLINK);
   203          *updptr++ = cpu_to_le32(insn);
   204  }
   205  
   206  static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, 
int nr_inst)
   207  {
   208          u32 insn, oinsn, rd;
   209  
   210          BUG_ON(nr_inst != 4);
   211  
   212          /* Compute target register */
   213          oinsn = le32_to_cpu(*origptr);
   214          rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, 
oinsn);
   215  
   216          /* movz rd, #(val & 0xffff) */
   217          insn = aarch64_insn_gen_movewide(rd,
   218                                           (u16)val,
   219                                           0,
   220                                           AARCH64_INSN_VARIANT_64BIT,
   221                                           AARCH64_INSN_MOVEWIDE_ZERO);
   222          *updptr++ = cpu_to_le32(insn);
   223  
   224          /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
   225          insn = aarch64_insn_gen_movewide(rd,
   226                                           (u16)(val >> 16),
   227                                           16,
   228                                           AARCH64_INSN_VARIANT_64BIT,
   229                                           AARCH64_INSN_MOVEWIDE_KEEP);
   230          *updptr++ = cpu_to_le32(insn);
   231  
   232          /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
   233          insn = aarch64_insn_gen_movewide(rd,
   234                                           (u16)(val >> 32),
   235                                           32,
   236                                           AARCH64_INSN_VARIANT_64BIT,
   237                                           AARCH64_INSN_MOVEWIDE_KEEP);
   238          *updptr++ = cpu_to_le32(insn);
   239  
   240          /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
   241          insn = aarch64_insn_gen_movewide(rd,
   242                                           (u16)(val >> 48),
   243                                           48,
   244                                           AARCH64_INSN_VARIANT_64BIT,
   245                                           AARCH64_INSN_MOVEWIDE_KEEP);
   246          *updptr++ = cpu_to_le32(insn);
   247  }
   248  
   249  void kvm_update_kimg_phys_offset(struct alt_instr *alt,
   250                                   __le32 *origptr, __le32 *updptr, int 
nr_inst)
   251  {
   252          generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, 
nr_inst);
   253  }
   254  
 > 255  void kvm_get_kimage_voffset(struct alt_instr *alt,

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]

Attachment: .config.gz
Description: application/gzip

Reply via email to