FluxSand 1.0
FluxSand - Interactive Digital Hourglass
Loading...
Searching...
No Matches
InferenceEngine Class Reference
Collaboration diagram for InferenceEngine:

Public Member Functions

 InferenceEngine (const std::string &model_path, float update_ratio=0.1f, float confidence_threshold=0.6f, size_t history_size=5, size_t min_consensus_votes=3)
 Constructor for the InferenceEngine.
 
void RecordData (int duration, const char *prefix)
 
void InferenceTask ()
 
void OnData (const Type::Vector3 &accel, const Type::Vector3 &gyro, const Type::Eulr &eulr)
 
void RegisterDataCallback (const std::function< void(ModelOutput)> &callback)
 
void RunUnitTest ()
 

Private Member Functions

void CollectSensorData ()
 
ModelOutput RunInference (std::vector< float > &input_data)
 Runs inference on the collected sensor data.
 
template<typename T >
std::string VectorToString (const std::vector< T > &vec)
 

Private Attributes

Ort::Env env_
 
Ort::SessionOptions session_options_
 
Ort::Session session_
 
Ort::AllocatorWithDefaultOptions allocator_
 
std::vector< std::string > input_names_
 
std::vector< const char * > input_names_cstr_
 
std::vector< int64_t > input_shape_
 
size_t input_tensor_size_
 
std::vector< std::string > output_names_
 
std::vector< const char * > output_names_cstr_
 
std::vector< int64_t > output_shape_
 
std::deque< float > sensor_buffer_
 
std::deque< ModelOutput > prediction_history_
 
float confidence_threshold_
 
size_t history_size_
 
size_t min_consensus_votes_
 
Type::Eulr eulr_ {}
 
Type::Vector3 gyro_ {}
 
Type::Vector3 accel_ {}
 
std::function< void(ModelOutput)> data_callback_
 
std::binary_semaphore ready_
 
std::thread inference_thread_
 
int new_data_number_
 

Detailed Description

Definition at line 53 of file comp_inference.hpp.

Constructor & Destructor Documentation

◆ InferenceEngine()

InferenceEngine::InferenceEngine ( const std::string &  model_path,
float  update_ratio = 0.1f,
float  confidence_threshold = 0.6f,
size_t  history_size = 5,
size_t  min_consensus_votes = 3 
)
inlineexplicit

Constructor for the InferenceEngine.

Parameters
model_pathPath to the ONNX model file.
update_ratioRatio for updating the sensor buffer.
confidence_thresholdMinimum probability required to accept a prediction.
history_sizeNumber of past predictions stored for voting.
min_consensus_votesMinimum votes required to confirm a prediction.

Definition at line 65 of file comp_inference.hpp.

70 : env_(ORT_LOGGING_LEVEL_WARNING, "ONNXModel"),
71 session_options_(),
72 session_(env_, model_path.c_str(), session_options_),
73 allocator_(),
74 ready_(0),
75 confidence_threshold_(confidence_threshold),
76 history_size_(history_size),
77 min_consensus_votes_(min_consensus_votes) {
78 /* Retrieve input tensor metadata */
79 size_t num_input_nodes = session_.GetInputCount();
80 std::cout << "Model Input Tensors:\n";
81
82 for (size_t i = 0; i < num_input_nodes; ++i) {
83 auto name = session_.GetInputNameAllocated(i, allocator_);
84 input_names_.push_back(name.get());
85 input_names_cstr_.push_back(input_names_.back().c_str());
86
87 Ort::TypeInfo input_type_info = session_.GetInputTypeInfo(i);
88 auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
89 input_shape_ = input_tensor_info.GetShape();
90
91 /* Handle dynamic batch dimension */
92 if (input_shape_[0] == -1) {
93 input_shape_[0] = 1;
94 }
95
96 std::cout << " Name: " << name.get() << "\n Shape: ["
97 << VectorToString(input_shape_) << "]\n";
98
99 input_tensor_size_ =
100 std::accumulate(input_shape_.begin(), input_shape_.end(), 1,
101 std::multiplies<int64_t>());
102 }
103
104 /* Retrieve output tensor metadata */
105 size_t num_output_nodes = session_.GetOutputCount();
106 for (size_t i = 0; i < num_output_nodes; ++i) {
107 output_names_.push_back(
108 session_.GetOutputNameAllocated(i, allocator_).get());
109 output_names_cstr_.push_back(output_names_.back().c_str());
110
111 Ort::TypeInfo output_type_info = session_.GetOutputTypeInfo(i);
112 auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
113 output_shape_ = output_tensor_info.GetShape();
114
115 std::cout << "Model Output Tensor:\n Name: " << output_names_.back()
116 << "\n Shape: [" << VectorToString(output_shape_) << "]\n";
117 }
118
119 /* Configure data collection parameters */
120 new_data_number_ =
121 static_cast<int>(static_cast<float>(input_shape_[1]) * update_ratio);
122
123 std::cout << std::format("Model initialized: {}\n\n", model_path);
124
125 /* Start inference thread */
126 inference_thread_ = std::thread(&InferenceEngine::InferenceTask, this);
127 }

Member Function Documentation

◆ CollectSensorData()

void InferenceEngine::CollectSensorData ( )
inlineprivate

Definition at line 244 of file comp_inference.hpp.

244 {
245 /* Normalize and store sensor readings */
246 sensor_buffer_.push_back(eulr_.pit.Value());
247 sensor_buffer_.push_back(eulr_.rol.Value());
248 sensor_buffer_.push_back(gyro_.x);
249 sensor_buffer_.push_back(gyro_.y);
250 sensor_buffer_.push_back(gyro_.z);
251 sensor_buffer_.push_back(accel_.x / GRAVITY);
252 sensor_buffer_.push_back(accel_.y / GRAVITY);
253 sensor_buffer_.push_back(accel_.z / GRAVITY);
254
255 /* Maintain fixed buffer size */
256 while (sensor_buffer_.size() > input_tensor_size_) {
257 sensor_buffer_.pop_front();
258 }
259 }
float Value()
Returns the current value.

◆ InferenceTask()

void InferenceEngine::InferenceTask ( )
inline

Definition at line 166 of file comp_inference.hpp.

166 {
167 int update_counter = 0;
168
169 while (true) {
170 ready_.acquire();
171
172 /* Update sensor buffer */
173 CollectSensorData();
174
175 if (update_counter++ >= new_data_number_) {
176 update_counter = 0;
177
178 if (sensor_buffer_.size() >= input_tensor_size_) {
179 std::vector<float> input_data(
180 sensor_buffer_.begin(),
181 sensor_buffer_.begin() + static_cast<int>(input_tensor_size_));
182 static ModelOutput last_result = ModelOutput::UNRECOGNIZED;
183 ModelOutput result = RunInference(input_data);
184 if (last_result != result && result != ModelOutput::UNRECOGNIZED) {
185 last_result = result;
186 if (data_callback_) {
187 data_callback_(result);
188 }
189 }
190 }
191 }
192 }
193 }
ModelOutput RunInference(std::vector< float > &input_data)
Runs inference on the collected sensor data.

◆ OnData()

void InferenceEngine::OnData ( const Type::Vector3 accel,
const Type::Vector3 gyro,
const Type::Eulr eulr 
)
inline

Definition at line 195 of file comp_inference.hpp.

196 {
197 accel_ = accel;
198 gyro_ = gyro;
199 eulr_ = eulr;
200 ready_.release();
201 }

◆ RecordData()

void InferenceEngine::RecordData ( int  duration,
const char *  prefix 
)
inline

Definition at line 129 of file comp_inference.hpp.

129 {
130 /* Generate timestamped filename */
131 auto t = std::time(nullptr);
132 std::tm tm = *std::localtime(&t);
133 std::string filename =
134 std::format("{}_record_{:04}{:02}{:02}_{:02}{:02}{:02}.csv", prefix,
135 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
136 tm.tm_min, tm.tm_sec);
137
138 /* Create data file */
139 std::ofstream file(filename);
140 if (!file.is_open()) {
141 std::cerr << std::format("Failed to create: {}\n", filename);
142 return;
143 }
144
145 /* Write CSV header */
146 file << "Pitch,Roll,Gyro_X,Gyro_Y,Gyro_Z,Accel_X,Accel_Y,Accel_Z\n";
147
148 /* Collect data at 1kHz */
149 constexpr std::chrono::microseconds RUN_CYCLE(1000);
150 auto next_sample = std::chrono::steady_clock::now();
151
152 for (int i = 0; i < duration; ++i) {
153 file << std::format("{},{},{},{},{},{},{},{}\n", eulr_.pit.Value(),
154 eulr_.rol.Value(), gyro_.x, gyro_.y, gyro_.z,
155 accel_.x, accel_.y, accel_.z);
156
157 next_sample += RUN_CYCLE;
158 std::this_thread::sleep_until(next_sample);
159 }
160
161 file.close();
162 std::cout << std::format("Recorded {} samples to {}\n", duration, filename);
163 }

◆ RegisterDataCallback()

void InferenceEngine::RegisterDataCallback ( const std::function< void(ModelOutput)> &  callback)
inline

Definition at line 203 of file comp_inference.hpp.

203 {
204 data_callback_ = callback;
205 }

◆ RunInference()

ModelOutput InferenceEngine::RunInference ( std::vector< float > &  input_data)
inlineprivate

Runs inference on the collected sensor data.

Parameters
input_dataVector containing preprocessed sensor data.
Returns
The predicted motion category as a string label.

Definition at line 266 of file comp_inference.hpp.

266 {
267 /* Validate output tensor dimensions */
268 if (output_shape_.size() < 2 || output_shape_[1] <= 0) {
269 std::perror("Invalid model output dimensions");
270 }
271
272 /* Prepare input tensor */
273 Ort::MemoryInfo memory_info =
274 Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
275 Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
276 memory_info, input_data.data(), input_data.size(), input_shape_.data(),
277 input_shape_.size());
278
279 /* Perform inference */
280 auto outputs =
281 session_.Run(Ort::RunOptions{nullptr}, input_names_cstr_.data(),
282 &input_tensor, 1, output_names_cstr_.data(), 1);
283
284 /* Get the class with the highest probability */
285 float* probs = outputs.front().GetTensorMutableData<float>();
286 auto max_prob = std::max_element(probs, probs + output_shape_[1]);
287 int pred_class = static_cast<int>(max_prob - probs);
288
289 /* Apply confidence threshold */
290 if (*max_prob < confidence_threshold_) {
291 pred_class = static_cast<int>(ModelOutput::UNRECOGNIZED);
292 }
293
294 /* Update prediction history */
295 prediction_history_.push_back(static_cast<ModelOutput>(pred_class));
296 if (prediction_history_.size() > history_size_) {
297 prediction_history_.pop_front();
298 }
299
300 /* Perform majority voting to ensure stable predictions */
301 std::map<ModelOutput, int> votes;
302 for (auto label : prediction_history_) {
303 votes[label]++;
304 }
305
306 auto consensus =
307 std::max_element(votes.begin(), votes.end(),
308 [](auto& a, auto& b) { return a.second < b.second; });
309
310 /* Return the final motion category if consensus is reached */
311 ModelOutput result = (consensus->second >= min_consensus_votes_)
312 ? consensus->first
313 : ModelOutput::UNRECOGNIZED;
314
315 return result;
316 }

◆ RunUnitTest()

void InferenceEngine::RunUnitTest ( )
inline

Definition at line 207 of file comp_inference.hpp.

207 {
208 std::cout
209 << "[InferenceEngine::UnitTest] Starting inference timing test...\n";
210
211 const int N = 50; // Number of inference runs
212 std::vector<float> dummy_input(input_tensor_size_, 0.0f); // All zero input
213
214 std::vector<float> timings_ms;
215 timings_ms.reserve(N);
216
217 for (int i = 0; i < N; ++i) {
218 auto t_start = std::chrono::high_resolution_clock::now();
219 ModelOutput result = RunInference(dummy_input);
220 auto t_end = std::chrono::high_resolution_clock::now();
221
222 float ms =
223 std::chrono::duration<float, std::milli>(t_end - t_start).count();
224 timings_ms.push_back(ms);
225
226 std::cout << std::format("Run {:02d} → {:>7.3f} ms | Result: {}\n", i + 1,
227 ms, LABELS.at(result));
228 }
229
230 auto [min_it, max_it] =
231 std::minmax_element(timings_ms.begin(), timings_ms.end());
232 float avg = std::accumulate(timings_ms.begin(), timings_ms.end(), 0.0f) / N;
233
234 std::cout << "\n[Inference Timing Summary]\n";
235 std::cout << std::format(" Total Runs : {}\n", N);
236 std::cout << std::format(" Min Time (ms) : {:>7.3f}\n", *min_it);
237 std::cout << std::format(" Max Time (ms) : {:>7.3f}\n", *max_it);
238 std::cout << std::format(" Avg Time (ms) : {:>7.3f}\n", avg);
239 std::cout << "[InferenceEngine::UnitTest] ✅ Timing test complete.\n";
240 }

◆ VectorToString()

template<typename T >
std::string InferenceEngine::VectorToString ( const std::vector< T > &  vec)
inlineprivate

Definition at line 320 of file comp_inference.hpp.

320 {
321 std::stringstream ss;
322 for (size_t i = 0; i < vec.size(); ++i) {
323 ss << vec[i] << (i < vec.size() - 1 ? ", " : "");
324 }
325 return ss.str();
326 }

Field Documentation

◆ accel_

Type::Vector3 InferenceEngine::accel_ {}
private

Definition at line 358 of file comp_inference.hpp.

358{};

◆ allocator_

Ort::AllocatorWithDefaultOptions InferenceEngine::allocator_
private

Definition at line 332 of file comp_inference.hpp.

◆ confidence_threshold_

float InferenceEngine::confidence_threshold_
private

Definition at line 349 of file comp_inference.hpp.

◆ data_callback_

std::function<void(ModelOutput)> InferenceEngine::data_callback_
private

Definition at line 361 of file comp_inference.hpp.

◆ env_

Ort::Env InferenceEngine::env_
private

Definition at line 329 of file comp_inference.hpp.

◆ eulr_

Type::Eulr InferenceEngine::eulr_ {}
private

Definition at line 356 of file comp_inference.hpp.

356{};

◆ gyro_

Type::Vector3 InferenceEngine::gyro_ {}
private

Definition at line 357 of file comp_inference.hpp.

357{};

◆ history_size_

size_t InferenceEngine::history_size_
private

Definition at line 351 of file comp_inference.hpp.

◆ inference_thread_

std::thread InferenceEngine::inference_thread_
private

Definition at line 365 of file comp_inference.hpp.

◆ input_names_

std::vector<std::string> InferenceEngine::input_names_
private

Definition at line 335 of file comp_inference.hpp.

◆ input_names_cstr_

std::vector<const char*> InferenceEngine::input_names_cstr_
private

Definition at line 336 of file comp_inference.hpp.

◆ input_shape_

std::vector<int64_t> InferenceEngine::input_shape_
private

Definition at line 337 of file comp_inference.hpp.

◆ input_tensor_size_

size_t InferenceEngine::input_tensor_size_
private

Definition at line 338 of file comp_inference.hpp.

◆ min_consensus_votes_

size_t InferenceEngine::min_consensus_votes_
private

Definition at line 353 of file comp_inference.hpp.

◆ new_data_number_

int InferenceEngine::new_data_number_
private

Definition at line 366 of file comp_inference.hpp.

◆ output_names_

std::vector<std::string> InferenceEngine::output_names_
private

Definition at line 340 of file comp_inference.hpp.

◆ output_names_cstr_

std::vector<const char*> InferenceEngine::output_names_cstr_
private

Definition at line 341 of file comp_inference.hpp.

◆ output_shape_

std::vector<int64_t> InferenceEngine::output_shape_
private

Definition at line 342 of file comp_inference.hpp.

◆ prediction_history_

std::deque<ModelOutput> InferenceEngine::prediction_history_
private

Definition at line 346 of file comp_inference.hpp.

◆ ready_

std::binary_semaphore InferenceEngine::ready_
private

Definition at line 364 of file comp_inference.hpp.

◆ sensor_buffer_

std::deque<float> InferenceEngine::sensor_buffer_
private

Definition at line 345 of file comp_inference.hpp.

◆ session_

Ort::Session InferenceEngine::session_
private

Definition at line 331 of file comp_inference.hpp.

◆ session_options_

Ort::SessionOptions InferenceEngine::session_options_
private

Definition at line 330 of file comp_inference.hpp.


The documentation for this class was generated from the following file: