main_functions.cc 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #include "tensorflow/lite/micro/all_ops_resolver.h"
  13. #include "tensorflow/lite/micro/micro_error_reporter.h"
  14. #include "tensorflow/lite/micro/micro_interpreter.h"
  15. #include "tensorflow/lite/schema/schema_generated.h"
  16. #include "tensorflow/lite/version.h"
  17. #include "sine_model_quantized.h"
  18. #include "main_functions.h"
  19. #include "data_types.h"
  20. #include "constants.h"
  21. // extern const unsigned char sine_model_quantized_tflite[];
  22. // extern const unsigned int sine_model_quantized_tflite_len;
  23. // Globals, used for compatibility with Arduino-style sketches.
  24. namespace
  25. {
  26. tflite::ErrorReporter *error_reporter = nullptr;
  27. const tflite::Model *model = nullptr;
  28. tflite::MicroInterpreter *interpreter = nullptr;
  29. TfLiteTensor *input = nullptr;
  30. TfLiteTensor *output = nullptr;
  31. int inference_count = 0;
  32. // Create an area of memory to use for input, output, and intermediate arrays.
  33. // Finding the minimum value for your model may require some trial and error.
  34. const int kModelArenaSize = 2468;
  35. const int kExtraArenaSize = 560 + 16 + 100;
  36. const int kTensorArenaSize = kModelArenaSize + kExtraArenaSize;
  37. uint8_t tensor_arena[kTensorArenaSize];
  38. } // namespace
  39. // The name of this function is important for Arduino compatibility.
  40. void setup()
  41. {
  42. // Set up logging. Google style is to avoid globals or statics because of
  43. // lifetime uncertainty, but since this has a trivial destructor it's okay.
  44. // NOLINTNEXTLINE(runtime-global-variables)
  45. static tflite::MicroErrorReporter micro_error_reporter;
  46. error_reporter = &micro_error_reporter;
  47. error_reporter->Report("Hello from the error reporter");
  48. // Map the model into a usable data structure. This doesn't involve any
  49. // copying or parsing, it's a very lightweight operation.
  50. model = tflite::GetModel(sine_model_quantized_tflite);
  51. if (model->version() != TFLITE_SCHEMA_VERSION) {
  52. error_reporter->Report(
  53. "Model provided is schema version %d not equal "
  54. "to supported version %d.",
  55. model->version(), TFLITE_SCHEMA_VERSION);
  56. return;
  57. }
  58. // This pulls in all the operation implementations we need.
  59. // NOLINTNEXTLINE(runtime-global-variables)
  60. static tflite::AllOpsResolver resolver;
  61. // Build an interpreter to run the model with.
  62. static tflite::MicroInterpreter static_interpreter(
  63. model, resolver, tensor_arena, kTensorArenaSize, error_reporter,
  64. nullptr);
  65. interpreter = &static_interpreter;
  66. // Allocate memory from the tensor_arena for the model's tensors.
  67. TfLiteStatus allocate_status = interpreter->AllocateTensors();
  68. if (allocate_status != kTfLiteOk) {
  69. error_reporter->Report("AllocateTensors() failed");
  70. return;
  71. }
  72. // Obtain pointers to the model's input and output tensors.
  73. input = interpreter->input(0);
  74. output = interpreter->output(0);
  75. // Keep track of how many inferences we have performed.
  76. inference_count = 0;
  77. }
  78. // The name of this function is important for Arduino compatibility.
  79. circle_t *loop()
  80. {
  81. static circle_t ret;
  82. ret.size = 4;
  83. // Calculate an x value to feed into the model. We compare the current
  84. // inference_count to the number of inferences per cycle to determine
  85. // our position within the range of possible x values the model was
  86. // trained on, and use this to calculate a value.
  87. float position = static_cast<float>(inference_count) /
  88. static_cast<float>(kInferencesPerCycle);
  89. float x_val = position * kXrange;
  90. // Place our calculated x value in the model's input tensor
  91. input->data.f[0] = x_val;
  92. // Run inference, and report any error
  93. TfLiteStatus invoke_status = interpreter->Invoke();
  94. if (invoke_status != kTfLiteOk) {
  95. error_reporter->Report("Invoke failed on x_val: %f\n",
  96. static_cast<double>(x_val));
  97. return NULL;
  98. }
  99. // Read the predicted y value from the model's output tensor
  100. float y_val = output->data.f[0];
  101. ret.x = x_val;
  102. ret.y = y_val;
  103. // Increment the inference_counter, and reset it if we have reached
  104. // the total number per cycle
  105. inference_count++;
  106. if (inference_count >= kInferencesPerCycle)
  107. inference_count = 0;
  108. return &ret;
  109. }