================
@@ -4189,6 +4189,53 @@ static bool interp__builtin_ia32_gfni_mul(InterpState 
&S, CodePtr OpPC,
   return true;
 }
 
+static bool interp__builtin_ia32_vpdp(InterpState &S, CodePtr OpPC,
+                                      const CallExpr *Call, bool IsDottingWord,
+                                      bool IsSaturating) {
+  const auto *SrcVecT = Call->getArg(0)->getType()->castAs<VectorType>();
+  const auto *OpAVecT = Call->getArg(1)->getType()->castAs<VectorType>();
+  const auto *OpBVecT = Call->getArg(2)->getType()->castAs<VectorType>();
+
+  PrimType SrcElemT = *S.getContext().classify(SrcVecT->getElementType());
+  PrimType OpAElemT = *S.getContext().classify(OpAVecT->getElementType());
+  PrimType OpBElemT = *S.getContext().classify(OpBVecT->getElementType());
+
+  unsigned NumElements = SrcVecT->getNumElements();
----------------
AkashDeoNU wrote:

I don't _think_ this is an issue. We only care about the number of lanes in the 
source vector. This set of intrinsics is performing a dot product for each lane 
in the source vector. Even if the operand vectors' sizes differ (and they 
always do differ), that is okay. But I'll try and add a commit to clarify this 
issue so it's more readable.  

https://github.com/llvm/llvm-project/pull/190549
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to