furi_hal_spi.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. #include <furi.h>
  2. #include <furi_hal_spi.h>
  3. #include <furi_hal_resources.h>
  4. #include <furi_hal_power.h>
  5. #include <furi_hal_interrupt.h>
  6. #include <stm32wbxx_ll_dma.h>
  7. #include <stm32wbxx_ll_spi.h>
  8. #include <stm32wbxx_ll_utils.h>
  9. #include <stm32wbxx_ll_cortex.h>
  10. #define TAG "FuriHalSpi"
  11. #define SPI_DMA DMA2
  12. #define SPI_DMA_RX_CHANNEL LL_DMA_CHANNEL_3
  13. #define SPI_DMA_TX_CHANNEL LL_DMA_CHANNEL_4
  14. #define SPI_DMA_RX_IRQ FuriHalInterruptIdDma2Ch3
  15. #define SPI_DMA_TX_IRQ FuriHalInterruptIdDma2Ch4
  16. #define SPI_DMA_RX_DEF SPI_DMA, SPI_DMA_RX_CHANNEL
  17. #define SPI_DMA_TX_DEF SPI_DMA, SPI_DMA_TX_CHANNEL
  18. // For simplicity, I assume that only one SPI DMA transaction can occur at a time.
  19. static FuriSemaphore* spi_dma_lock = NULL;
  20. static FuriSemaphore* spi_dma_completed = NULL;
  21. void furi_hal_spi_dma_init() {
  22. spi_dma_lock = furi_semaphore_alloc(1, 1);
  23. spi_dma_completed = furi_semaphore_alloc(1, 1);
  24. }
  25. void furi_hal_spi_bus_init(FuriHalSpiBus* bus) {
  26. furi_assert(bus);
  27. bus->callback(bus, FuriHalSpiBusEventInit);
  28. }
  29. void furi_hal_spi_bus_deinit(FuriHalSpiBus* bus) {
  30. furi_assert(bus);
  31. bus->callback(bus, FuriHalSpiBusEventDeinit);
  32. }
  33. void furi_hal_spi_bus_handle_init(FuriHalSpiBusHandle* handle) {
  34. furi_assert(handle);
  35. handle->callback(handle, FuriHalSpiBusHandleEventInit);
  36. }
  37. void furi_hal_spi_bus_handle_deinit(FuriHalSpiBusHandle* handle) {
  38. furi_assert(handle);
  39. handle->callback(handle, FuriHalSpiBusHandleEventDeinit);
  40. }
  41. void furi_hal_spi_acquire(FuriHalSpiBusHandle* handle) {
  42. furi_assert(handle);
  43. furi_hal_power_insomnia_enter();
  44. handle->bus->callback(handle->bus, FuriHalSpiBusEventLock);
  45. handle->bus->callback(handle->bus, FuriHalSpiBusEventActivate);
  46. furi_assert(handle->bus->current_handle == NULL);
  47. handle->bus->current_handle = handle;
  48. handle->callback(handle, FuriHalSpiBusHandleEventActivate);
  49. }
  50. void furi_hal_spi_release(FuriHalSpiBusHandle* handle) {
  51. furi_assert(handle);
  52. furi_assert(handle->bus->current_handle == handle);
  53. // Handle event and unset handle
  54. handle->callback(handle, FuriHalSpiBusHandleEventDeactivate);
  55. handle->bus->current_handle = NULL;
  56. // Bus events
  57. handle->bus->callback(handle->bus, FuriHalSpiBusEventDeactivate);
  58. handle->bus->callback(handle->bus, FuriHalSpiBusEventUnlock);
  59. furi_hal_power_insomnia_exit();
  60. }
  61. static void furi_hal_spi_bus_end_txrx(FuriHalSpiBusHandle* handle, uint32_t timeout) {
  62. UNUSED(timeout); // FIXME
  63. while(LL_SPI_GetTxFIFOLevel(handle->bus->spi) != LL_SPI_TX_FIFO_EMPTY)
  64. ;
  65. while(LL_SPI_IsActiveFlag_BSY(handle->bus->spi))
  66. ;
  67. while(LL_SPI_GetRxFIFOLevel(handle->bus->spi) != LL_SPI_RX_FIFO_EMPTY) {
  68. LL_SPI_ReceiveData8(handle->bus->spi);
  69. }
  70. }
  71. bool furi_hal_spi_bus_rx(
  72. FuriHalSpiBusHandle* handle,
  73. uint8_t* buffer,
  74. size_t size,
  75. uint32_t timeout) {
  76. furi_assert(handle);
  77. furi_assert(handle->bus->current_handle == handle);
  78. furi_assert(buffer);
  79. furi_assert(size > 0);
  80. return furi_hal_spi_bus_trx(handle, buffer, buffer, size, timeout);
  81. }
  82. bool furi_hal_spi_bus_tx(
  83. FuriHalSpiBusHandle* handle,
  84. const uint8_t* buffer,
  85. size_t size,
  86. uint32_t timeout) {
  87. furi_assert(handle);
  88. furi_assert(handle->bus->current_handle == handle);
  89. furi_assert(buffer);
  90. furi_assert(size > 0);
  91. bool ret = true;
  92. while(size > 0) {
  93. if(LL_SPI_IsActiveFlag_TXE(handle->bus->spi)) {
  94. LL_SPI_TransmitData8(handle->bus->spi, *buffer);
  95. buffer++;
  96. size--;
  97. }
  98. }
  99. furi_hal_spi_bus_end_txrx(handle, timeout);
  100. LL_SPI_ClearFlag_OVR(handle->bus->spi);
  101. return ret;
  102. }
  103. bool furi_hal_spi_bus_trx(
  104. FuriHalSpiBusHandle* handle,
  105. const uint8_t* tx_buffer,
  106. uint8_t* rx_buffer,
  107. size_t size,
  108. uint32_t timeout) {
  109. furi_assert(handle);
  110. furi_assert(handle->bus->current_handle == handle);
  111. furi_assert(size > 0);
  112. bool ret = true;
  113. size_t tx_size = size;
  114. bool tx_allowed = true;
  115. while(size > 0) {
  116. if(tx_size > 0 && LL_SPI_IsActiveFlag_TXE(handle->bus->spi) && tx_allowed) {
  117. if(tx_buffer) {
  118. LL_SPI_TransmitData8(handle->bus->spi, *tx_buffer);
  119. tx_buffer++;
  120. } else {
  121. LL_SPI_TransmitData8(handle->bus->spi, 0xFF);
  122. }
  123. tx_size--;
  124. tx_allowed = false;
  125. }
  126. if(LL_SPI_IsActiveFlag_RXNE(handle->bus->spi)) {
  127. if(rx_buffer) {
  128. *rx_buffer = LL_SPI_ReceiveData8(handle->bus->spi);
  129. rx_buffer++;
  130. } else {
  131. LL_SPI_ReceiveData8(handle->bus->spi);
  132. }
  133. size--;
  134. tx_allowed = true;
  135. }
  136. }
  137. furi_hal_spi_bus_end_txrx(handle, timeout);
  138. return ret;
  139. }
  140. static void spi_dma_isr() {
  141. #if SPI_DMA_RX_CHANNEL == LL_DMA_CHANNEL_3
  142. if(LL_DMA_IsActiveFlag_TC3(SPI_DMA) && LL_DMA_IsEnabledIT_TC(SPI_DMA_RX_DEF)) {
  143. LL_DMA_ClearFlag_TC3(SPI_DMA);
  144. furi_check(furi_semaphore_release(spi_dma_completed) == FuriStatusOk);
  145. }
  146. #else
  147. #error Update this code. Would you kindly?
  148. #endif
  149. #if SPI_DMA_TX_CHANNEL == LL_DMA_CHANNEL_4
  150. if(LL_DMA_IsActiveFlag_TC4(SPI_DMA) && LL_DMA_IsEnabledIT_TC(SPI_DMA_TX_DEF)) {
  151. LL_DMA_ClearFlag_TC4(SPI_DMA);
  152. furi_check(furi_semaphore_release(spi_dma_completed) == FuriStatusOk);
  153. }
  154. #else
  155. #error Update this code. Would you kindly?
  156. #endif
  157. }
  158. bool furi_hal_spi_bus_trx_dma(
  159. FuriHalSpiBusHandle* handle,
  160. uint8_t* tx_buffer,
  161. uint8_t* rx_buffer,
  162. size_t size,
  163. uint32_t timeout_ms) {
  164. furi_assert(handle);
  165. furi_assert(handle->bus->current_handle == handle);
  166. furi_assert(size > 0);
  167. // If scheduler is not running, use blocking mode
  168. if(xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
  169. return furi_hal_spi_bus_trx(handle, tx_buffer, rx_buffer, size, timeout_ms);
  170. }
  171. // Lock DMA
  172. furi_check(furi_semaphore_acquire(spi_dma_lock, FuriWaitForever) == FuriStatusOk);
  173. const uint32_t dma_dummy_u32 = 0xFFFFFFFF;
  174. bool ret = true;
  175. SPI_TypeDef* spi = handle->bus->spi;
  176. uint32_t dma_rx_req;
  177. uint32_t dma_tx_req;
  178. if(spi == SPI1) {
  179. dma_rx_req = LL_DMAMUX_REQ_SPI1_RX;
  180. dma_tx_req = LL_DMAMUX_REQ_SPI1_TX;
  181. } else if(spi == SPI2) {
  182. dma_rx_req = LL_DMAMUX_REQ_SPI2_RX;
  183. dma_tx_req = LL_DMAMUX_REQ_SPI2_TX;
  184. } else {
  185. furi_crash(NULL);
  186. }
  187. if(rx_buffer == NULL) {
  188. // Only TX mode, do not use RX channel
  189. LL_DMA_InitTypeDef dma_config = {0};
  190. dma_config.PeriphOrM2MSrcAddress = (uint32_t) & (spi->DR);
  191. dma_config.MemoryOrM2MDstAddress = (uint32_t)tx_buffer;
  192. dma_config.Direction = LL_DMA_DIRECTION_MEMORY_TO_PERIPH;
  193. dma_config.Mode = LL_DMA_MODE_NORMAL;
  194. dma_config.PeriphOrM2MSrcIncMode = LL_DMA_PERIPH_NOINCREMENT;
  195. dma_config.MemoryOrM2MDstIncMode = LL_DMA_MEMORY_INCREMENT;
  196. dma_config.PeriphOrM2MSrcDataSize = LL_DMA_PDATAALIGN_BYTE;
  197. dma_config.MemoryOrM2MDstDataSize = LL_DMA_MDATAALIGN_BYTE;
  198. dma_config.NbData = size;
  199. dma_config.PeriphRequest = dma_tx_req;
  200. dma_config.Priority = LL_DMA_PRIORITY_MEDIUM;
  201. LL_DMA_Init(SPI_DMA_TX_DEF, &dma_config);
  202. #if SPI_DMA_TX_CHANNEL == LL_DMA_CHANNEL_4
  203. LL_DMA_ClearFlag_TC4(SPI_DMA);
  204. #else
  205. #error Update this code. Would you kindly?
  206. #endif
  207. furi_hal_interrupt_set_isr(SPI_DMA_TX_IRQ, spi_dma_isr, NULL);
  208. bool dma_tx_was_enabled = LL_SPI_IsEnabledDMAReq_TX(spi);
  209. if(!dma_tx_was_enabled) {
  210. LL_SPI_EnableDMAReq_TX(spi);
  211. }
  212. // acquire semaphore before enabling DMA
  213. furi_check(furi_semaphore_acquire(spi_dma_completed, timeout_ms) == FuriStatusOk);
  214. LL_DMA_EnableIT_TC(SPI_DMA_TX_DEF);
  215. LL_DMA_EnableChannel(SPI_DMA_TX_DEF);
  216. // and wait for it to be released (DMA transfer complete)
  217. if(furi_semaphore_acquire(spi_dma_completed, timeout_ms) != FuriStatusOk) {
  218. ret = false;
  219. FURI_LOG_E(TAG, "DMA timeout\r\n");
  220. }
  221. // release semaphore, because we are using it as a flag
  222. furi_semaphore_release(spi_dma_completed);
  223. LL_DMA_DisableIT_TC(SPI_DMA_TX_DEF);
  224. LL_DMA_DisableChannel(SPI_DMA_TX_DEF);
  225. if(!dma_tx_was_enabled) {
  226. LL_SPI_DisableDMAReq_TX(spi);
  227. }
  228. furi_hal_interrupt_set_isr(SPI_DMA_TX_IRQ, NULL, NULL);
  229. LL_DMA_DeInit(SPI_DMA_TX_DEF);
  230. } else {
  231. // TRX or RX mode, use both channels
  232. uint32_t tx_mem_increase_mode;
  233. if(tx_buffer == NULL) {
  234. // RX mode, use dummy data instead of TX buffer
  235. tx_buffer = (uint8_t*)&dma_dummy_u32;
  236. tx_mem_increase_mode = LL_DMA_PERIPH_NOINCREMENT;
  237. } else {
  238. tx_mem_increase_mode = LL_DMA_MEMORY_INCREMENT;
  239. }
  240. LL_DMA_InitTypeDef dma_config = {0};
  241. dma_config.PeriphOrM2MSrcAddress = (uint32_t) & (spi->DR);
  242. dma_config.MemoryOrM2MDstAddress = (uint32_t)tx_buffer;
  243. dma_config.Direction = LL_DMA_DIRECTION_MEMORY_TO_PERIPH;
  244. dma_config.Mode = LL_DMA_MODE_NORMAL;
  245. dma_config.PeriphOrM2MSrcIncMode = LL_DMA_PERIPH_NOINCREMENT;
  246. dma_config.MemoryOrM2MDstIncMode = tx_mem_increase_mode;
  247. dma_config.PeriphOrM2MSrcDataSize = LL_DMA_PDATAALIGN_BYTE;
  248. dma_config.MemoryOrM2MDstDataSize = LL_DMA_MDATAALIGN_BYTE;
  249. dma_config.NbData = size;
  250. dma_config.PeriphRequest = dma_tx_req;
  251. dma_config.Priority = LL_DMA_PRIORITY_MEDIUM;
  252. LL_DMA_Init(SPI_DMA_TX_DEF, &dma_config);
  253. dma_config.PeriphOrM2MSrcAddress = (uint32_t) & (spi->DR);
  254. dma_config.MemoryOrM2MDstAddress = (uint32_t)rx_buffer;
  255. dma_config.Direction = LL_DMA_DIRECTION_PERIPH_TO_MEMORY;
  256. dma_config.Mode = LL_DMA_MODE_NORMAL;
  257. dma_config.PeriphOrM2MSrcIncMode = LL_DMA_PERIPH_NOINCREMENT;
  258. dma_config.MemoryOrM2MDstIncMode = LL_DMA_MEMORY_INCREMENT;
  259. dma_config.PeriphOrM2MSrcDataSize = LL_DMA_PDATAALIGN_BYTE;
  260. dma_config.MemoryOrM2MDstDataSize = LL_DMA_MDATAALIGN_BYTE;
  261. dma_config.NbData = size;
  262. dma_config.PeriphRequest = dma_rx_req;
  263. dma_config.Priority = LL_DMA_PRIORITY_MEDIUM;
  264. LL_DMA_Init(SPI_DMA_RX_DEF, &dma_config);
  265. #if SPI_DMA_RX_CHANNEL == LL_DMA_CHANNEL_3
  266. LL_DMA_ClearFlag_TC3(SPI_DMA);
  267. #else
  268. #error Update this code. Would you kindly?
  269. #endif
  270. furi_hal_interrupt_set_isr(SPI_DMA_RX_IRQ, spi_dma_isr, NULL);
  271. bool dma_tx_was_enabled = LL_SPI_IsEnabledDMAReq_TX(spi);
  272. bool dma_rx_was_enabled = LL_SPI_IsEnabledDMAReq_RX(spi);
  273. if(!dma_tx_was_enabled) {
  274. LL_SPI_EnableDMAReq_TX(spi);
  275. }
  276. if(!dma_rx_was_enabled) {
  277. LL_SPI_EnableDMAReq_RX(spi);
  278. }
  279. // acquire semaphore before enabling DMA
  280. furi_check(furi_semaphore_acquire(spi_dma_completed, timeout_ms) == FuriStatusOk);
  281. LL_DMA_EnableIT_TC(SPI_DMA_RX_DEF);
  282. LL_DMA_EnableChannel(SPI_DMA_RX_DEF);
  283. LL_DMA_EnableChannel(SPI_DMA_TX_DEF);
  284. // and wait for it to be released (DMA transfer complete)
  285. if(furi_semaphore_acquire(spi_dma_completed, timeout_ms) != FuriStatusOk) {
  286. ret = false;
  287. FURI_LOG_E(TAG, "DMA timeout\r\n");
  288. }
  289. // release semaphore, because we are using it as a flag
  290. furi_semaphore_release(spi_dma_completed);
  291. LL_DMA_DisableIT_TC(SPI_DMA_RX_DEF);
  292. LL_DMA_DisableChannel(SPI_DMA_TX_DEF);
  293. LL_DMA_DisableChannel(SPI_DMA_RX_DEF);
  294. if(!dma_tx_was_enabled) {
  295. LL_SPI_DisableDMAReq_TX(spi);
  296. }
  297. if(!dma_rx_was_enabled) {
  298. LL_SPI_DisableDMAReq_RX(spi);
  299. }
  300. furi_hal_interrupt_set_isr(SPI_DMA_RX_IRQ, NULL, NULL);
  301. LL_DMA_DeInit(SPI_DMA_TX_DEF);
  302. LL_DMA_DeInit(SPI_DMA_RX_DEF);
  303. }
  304. furi_hal_spi_bus_end_txrx(handle, timeout_ms);
  305. furi_check(furi_semaphore_release(spi_dma_lock) == FuriStatusOk);
  306. return ret;
  307. }