tqchen commented on code in PR #283: URL: https://github.com/apache/tvm-ffi/pull/283#discussion_r2553123334
########## examples/cubin_launcher/src/lib_embedded.cc: ########## @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/*! + * \file examples/cubin_launcher/src/lib_embedded.cc + * \brief TVM-FFI library with embedded CUBIN kernels. + * + * This library exports TVM-FFI functions to launch CUDA kernels from + * embedded CUBIN data. + */ + +#include <tvm/ffi/container/tensor.h> +#include <tvm/ffi/error.h> +#include <tvm/ffi/extra/c_env_api.h> +#include <tvm/ffi/extra/cubin_launcher.h> +#include <tvm/ffi/function.h> + +#include <cstdint> +#include <memory> + +// External symbols for embedded CUBIN data (linked via objcopy) +extern "C" const char __cubin_data[]; +extern "C" const char __cubin_data_end[]; + +// Calculate size from the symbols +static const uint64_t cubin_data_size = + reinterpret_cast<const char*>(&__cubin_data_end) - reinterpret_cast<const char*>(&__cubin_data); + +// Global CUBIN module and kernels (initialized on first use) +static std::unique_ptr<tvm::ffi::CubinModule> g_cubin_module; +static std::unique_ptr<tvm::ffi::CubinKernel> g_add_one_kernel; +static std::unique_ptr<tvm::ffi::CubinKernel> g_mul_two_kernel; + +// Initialize the CUBIN module and kernels +void InitializeCubinModule() { + if (g_cubin_module == nullptr) { + g_cubin_module = std::make_unique<tvm::ffi::CubinModule>(__cubin_data, cubin_data_size); + g_add_one_kernel = std::make_unique<tvm::ffi::CubinKernel>((*g_cubin_module)["add_one_cuda"]); + g_mul_two_kernel = std::make_unique<tvm::ffi::CubinKernel>((*g_cubin_module)["mul_two_cuda"]); + } +} + +namespace cubin_embedded { + +/*! + * \brief Launch add_one_cuda kernel on input tensor. + * \param x Input tensor (float32, 1D) + * \param y Output tensor (float32, 1D, same shape as x) + */ +void AddOne(tvm::ffi::TensorView x, tvm::ffi::TensorView y) { + InitializeCubinModule(); Review Comment: consider use a different pattern via static single singleton ```c++ // maybe as macro TVM_FFI_EMBED_CUBIN_MODULE(env); // use env as the indicator key in case we want to enbed other cubins extern "C" static const char __tvm_ffi__cubin_env[]; extern "C" static const char __tvm_ffi__cubin_env_end[]; namespace { struct CubinModule_env { tvm::ffi::CubinModule mod {__tvm_ffi__cubin_env, __tvm_ffi__cubin_env_end}; static CubinModule_env* Global() { static CubinModule_env* inst; return &inst; } }; } // anonymous namespace to avoid symbol conflict void MulTwo(tvm::ffi::TensorView x, tvm::ffi::TensorView y) { // maybe as macro // static auto kernel =TVM_FFI_EMBED_CUBIN_GET_KERNEL(env, "mul_two_cuda"); static tvm::ffi::CubinKernel kernel = CubinModule_env::Global()->mod["mul_two_cuda"]; kernel->add_two_cuda.launch(...); } ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
