Running an ExecuTorch Model Using the Module Extension in C++

#include <executorch/extension/module/module.h>
#include <executorch/extension/tensor/tensor.h>
 
using namespace ::executorch::extension;
 
// Create a Module.
Module module("/path/to/model.pte");
 
// Wrap the input data with a Tensor.
float input[1 * 3 * 256 * 256];
auto tensor = from_blob(input, {1, 3, 256, 256});
 
// Perform an inference.
const auto result = module.forward(tensor);
 
// Check for success or failure.
if (result.ok()) {
  // Retrieve the output data.
  const auto output = result->at(0).toTensor().const_data_ptr<float>();
} 

Managing Tensor Memory in C++