stdglue.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. #include "stdglue.h"
  2. #include "check.h"
  3. #include "memmgr.h"
  4. #include <FreeRTOS.h>
  5. #include <task.h>
  6. #include <furi_hal.h>
  7. #include <m-dict.h>
  8. DICT_DEF2(
  9. FuriStdglueCallbackDict,
  10. uint32_t,
  11. M_DEFAULT_OPLIST,
  12. FuriStdglueWriteCallback,
  13. M_PTR_OPLIST)
  14. typedef struct {
  15. FuriMutex* mutex;
  16. FuriStdglueCallbackDict_t thread_outputs;
  17. } FuriStdglue;
  18. static FuriStdglue* furi_stdglue = NULL;
  19. static ssize_t stdout_write(void* _cookie, const char* data, size_t size) {
  20. furi_assert(furi_stdglue);
  21. bool consumed = false;
  22. FuriThreadId task_id = furi_thread_get_current_id();
  23. if(xTaskGetSchedulerState() == taskSCHEDULER_RUNNING && task_id &&
  24. furi_mutex_acquire(furi_stdglue->mutex, FuriWaitForever) == FuriStatusOk) {
  25. // We are in the thread context
  26. // Handle thread callbacks
  27. FuriStdglueWriteCallback* callback_ptr =
  28. FuriStdglueCallbackDict_get(furi_stdglue->thread_outputs, (uint32_t)task_id);
  29. if(callback_ptr) {
  30. (*callback_ptr)(_cookie, data, size);
  31. consumed = true;
  32. }
  33. furi_check(furi_mutex_release(furi_stdglue->mutex) == FuriStatusOk);
  34. }
  35. // Flush
  36. if(data == 0) {
  37. /*
  38. * This means that we should flush internal buffers. Since we
  39. * don't we just return. (Remember, "handle" == -1 means that all
  40. * handles should be flushed.)
  41. */
  42. return 0;
  43. }
  44. // Debug uart
  45. if(!consumed) furi_hal_console_tx((const uint8_t*)data, size);
  46. // All data consumed
  47. return size;
  48. }
  49. void furi_stdglue_init() {
  50. furi_stdglue = malloc(sizeof(FuriStdglue));
  51. // Init outputs structures
  52. furi_stdglue->mutex = furi_mutex_alloc(FuriMutexTypeNormal);
  53. furi_check(furi_stdglue->mutex);
  54. FuriStdglueCallbackDict_init(furi_stdglue->thread_outputs);
  55. // Prepare and set stdout descriptor
  56. FILE* fp = fopencookie(
  57. NULL,
  58. "w",
  59. (cookie_io_functions_t){
  60. .read = NULL,
  61. .write = stdout_write,
  62. .seek = NULL,
  63. .close = NULL,
  64. });
  65. setvbuf(fp, NULL, _IOLBF, 0);
  66. stdout = fp;
  67. }
  68. bool furi_stdglue_set_thread_stdout_callback(FuriStdglueWriteCallback callback) {
  69. furi_assert(furi_stdglue);
  70. FuriThreadId task_id = furi_thread_get_current_id();
  71. if(task_id) {
  72. furi_check(furi_mutex_acquire(furi_stdglue->mutex, FuriWaitForever) == FuriStatusOk);
  73. if(callback) {
  74. FuriStdglueCallbackDict_set_at(
  75. furi_stdglue->thread_outputs, (uint32_t)task_id, callback);
  76. } else {
  77. FuriStdglueCallbackDict_erase(furi_stdglue->thread_outputs, (uint32_t)task_id);
  78. }
  79. furi_check(furi_mutex_release(furi_stdglue->mutex) == FuriStatusOk);
  80. return true;
  81. } else {
  82. return false;
  83. }
  84. }
  85. void __malloc_lock(struct _reent* REENT) {
  86. UNUSED(REENT);
  87. vTaskSuspendAll();
  88. }
  89. void __malloc_unlock(struct _reent* REENT) {
  90. UNUSED(REENT);
  91. xTaskResumeAll();
  92. }