cmsis_os2.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874
  1. /* --------------------------------------------------------------------------
  2. * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. *
  18. * Name: cmsis_os2.c
  19. * Purpose: CMSIS RTOS2 wrapper for FreeRTOS
  20. *
  21. *---------------------------------------------------------------------------*/
  22. #include <string.h>
  23. #include <furi/common_defines.h>
  24. #include "cmsis_os2.h" // ::CMSIS:RTOS2
  25. #include "cmsis_compiler.h" // Compiler agnostic definitions
  26. #include "os_tick.h" // OS Tick API
  27. #include "FreeRTOS.h" // ARM.FreeRTOS::RTOS:Core
  28. #include "task.h" // ARM.FreeRTOS::RTOS:Core
  29. #include "event_groups.h" // ARM.FreeRTOS::RTOS:Event Groups
  30. #include "semphr.h" // ARM.FreeRTOS::RTOS:Core
  31. #include "timers.h" // ARM.FreeRTOS::RTOS:Timers
  32. #include "freertos_mpool.h" // osMemoryPool definitions
  33. #include "freertos_os2.h" // Configuration check and setup
  34. #include CMSIS_device_header
  35. #ifndef CMSIS_TASK_NOTIFY_INDEX
  36. #define CMSIS_TASK_NOTIFY_INDEX 0
  37. #endif
  38. /*---------------------------------------------------------------------------*/
  39. #ifndef __ARM_ARCH_6M__
  40. #define __ARM_ARCH_6M__ 0
  41. #endif
  42. #ifndef __ARM_ARCH_7M__
  43. #define __ARM_ARCH_7M__ 0
  44. #endif
  45. #ifndef __ARM_ARCH_7EM__
  46. #define __ARM_ARCH_7EM__ 0
  47. #endif
  48. #ifndef __ARM_ARCH_8M_MAIN__
  49. #define __ARM_ARCH_8M_MAIN__ 0
  50. #endif
  51. #ifndef __ARM_ARCH_7A__
  52. #define __ARM_ARCH_7A__ 0
  53. #endif
  54. #if ((__ARM_ARCH_7M__ == 1U) || \
  55. (__ARM_ARCH_7EM__ == 1U) || \
  56. (__ARM_ARCH_8M_MAIN__ == 1U))
  57. #define IS_IRQ_MASKED() ((__get_PRIMASK() != 0U) || (__get_BASEPRI() != 0U))
  58. #elif (__ARM_ARCH_6M__ == 1U)
  59. #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
  60. #elif (__ARM_ARCH_7A__ == 1U)
  61. /* CPSR mask bits */
  62. #define CPSR_MASKBIT_I 0x80U
  63. #define IS_IRQ_MASKED() ((__get_CPSR() & CPSR_MASKBIT_I) != 0U)
  64. #else
  65. #define IS_IRQ_MASKED() (__get_PRIMASK() != 0U)
  66. #endif
  67. #if (__ARM_ARCH_7A__ == 1U)
  68. /* CPSR mode bitmasks */
  69. #define CPSR_MODE_USER 0x10U
  70. #define CPSR_MODE_SYSTEM 0x1FU
  71. #define IS_IRQ_MODE() ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM))
  72. #else
  73. #define IS_IRQ_MODE() (__get_IPSR() != 0U)
  74. #endif
  75. /* Limits */
  76. #define MAX_BITS_TASK_NOTIFY 31U
  77. #define MAX_BITS_EVENT_GROUPS 24U
  78. #define THREAD_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_TASK_NOTIFY) - 1U))
  79. #define EVENT_FLAGS_INVALID_BITS (~((1UL << MAX_BITS_EVENT_GROUPS) - 1U))
  80. /* Kernel version and identification string definition (major.minor.rev: mmnnnrrrr dec) */
  81. #define KERNEL_VERSION (((uint32_t)tskKERNEL_VERSION_MAJOR * 10000000UL) | \
  82. ((uint32_t)tskKERNEL_VERSION_MINOR * 10000UL) | \
  83. ((uint32_t)tskKERNEL_VERSION_BUILD * 1UL))
  84. #define KERNEL_ID ("FreeRTOS " tskKERNEL_VERSION_NUMBER)
  85. /* Timer callback information structure definition */
  86. typedef struct {
  87. osTimerFunc_t func;
  88. void *arg;
  89. } TimerCallback_t;
  90. /* Kernel initialization state */
  91. static osKernelState_t KernelState = osKernelInactive;
  92. /*
  93. Heap region definition used by heap_5 variant
  94. Define configAPPLICATION_ALLOCATED_HEAP as nonzero value in FreeRTOSConfig.h if
  95. heap regions are already defined and vPortDefineHeapRegions is called in application.
  96. Otherwise vPortDefineHeapRegions will be called by osKernelInitialize using
  97. definition configHEAP_5_REGIONS as parameter. Overriding configHEAP_5_REGIONS
  98. is possible by defining it globally or in FreeRTOSConfig.h.
  99. */
  100. #if defined(USE_FreeRTOS_HEAP_5)
  101. #if (configAPPLICATION_ALLOCATED_HEAP == 0)
  102. /*
  103. FreeRTOS heap is not defined by the application.
  104. Single region of size configTOTAL_HEAP_SIZE (defined in FreeRTOSConfig.h)
  105. is provided by default. Define configHEAP_5_REGIONS to provide custom
  106. HeapRegion_t array.
  107. */
  108. #define HEAP_5_REGION_SETUP 1
  109. #ifndef configHEAP_5_REGIONS
  110. #define configHEAP_5_REGIONS xHeapRegions
  111. static uint8_t ucHeap[configTOTAL_HEAP_SIZE];
  112. static HeapRegion_t xHeapRegions[] = {
  113. { ucHeap, configTOTAL_HEAP_SIZE },
  114. { NULL, 0 }
  115. };
  116. #else
  117. /* Global definition is provided to override default heap array */
  118. extern HeapRegion_t configHEAP_5_REGIONS[];
  119. #endif
  120. #else
  121. /*
  122. The application already defined the array used for the FreeRTOS heap and
  123. called vPortDefineHeapRegions to initialize heap.
  124. */
  125. #define HEAP_5_REGION_SETUP 0
  126. #endif /* configAPPLICATION_ALLOCATED_HEAP */
  127. #endif /* USE_FreeRTOS_HEAP_5 */
  128. /*
  129. Setup SVC to reset value.
  130. */
  131. __STATIC_INLINE void SVC_Setup (void) {
  132. #if (__ARM_ARCH_7A__ == 0U)
  133. /* Service Call interrupt might be configured before kernel start */
  134. /* and when its priority is lower or equal to BASEPRI, svc intruction */
  135. /* causes a Hard Fault. */
  136. NVIC_SetPriority (SVCall_IRQn, 0U);
  137. #endif
  138. }
  139. /*
  140. Function macro used to retrieve semaphore count from ISR
  141. */
  142. #ifndef uxSemaphoreGetCountFromISR
  143. #define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) )
  144. #endif
  145. /*
  146. Determine if CPU executes from interrupt context or if interrupts are masked.
  147. */
  148. __STATIC_INLINE uint32_t IRQ_Context (void) {
  149. uint32_t irq;
  150. BaseType_t state;
  151. irq = 0U;
  152. if (IS_IRQ_MODE()) {
  153. /* Called from interrupt context */
  154. irq = 1U;
  155. }
  156. else {
  157. /* Get FreeRTOS scheduler state */
  158. state = xTaskGetSchedulerState();
  159. if (state != taskSCHEDULER_NOT_STARTED) {
  160. /* Scheduler was started */
  161. if (IS_IRQ_MASKED()) {
  162. /* Interrupts are masked */
  163. irq = 1U;
  164. }
  165. }
  166. }
  167. /* Return context, 0: thread context, 1: IRQ context */
  168. return (irq);
  169. }
  170. /* ==== Kernel Management Functions ==== */
  171. /*
  172. Initialize the RTOS Kernel.
  173. */
  174. osStatus_t osKernelInitialize (void) {
  175. osStatus_t stat;
  176. BaseType_t state;
  177. if (IRQ_Context() != 0U) {
  178. stat = osErrorISR;
  179. }
  180. else {
  181. state = xTaskGetSchedulerState();
  182. /* Initialize if scheduler not started and not initialized before */
  183. if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelInactive)) {
  184. #if defined(USE_TRACE_EVENT_RECORDER)
  185. /* Initialize the trace macro debugging output channel */
  186. EvrFreeRTOSSetup(0U);
  187. #endif
  188. #if defined(USE_FreeRTOS_HEAP_5) && (HEAP_5_REGION_SETUP == 1)
  189. /* Initialize the memory regions when using heap_5 variant */
  190. vPortDefineHeapRegions (configHEAP_5_REGIONS);
  191. #endif
  192. KernelState = osKernelReady;
  193. stat = osOK;
  194. } else {
  195. stat = osError;
  196. }
  197. }
  198. /* Return execution status */
  199. return (stat);
  200. }
  201. /*
  202. Get RTOS Kernel Information.
  203. */
  204. osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size) {
  205. if (version != NULL) {
  206. /* Version encoding is major.minor.rev: mmnnnrrrr dec */
  207. version->api = KERNEL_VERSION;
  208. version->kernel = KERNEL_VERSION;
  209. }
  210. if ((id_buf != NULL) && (id_size != 0U)) {
  211. /* Buffer for retrieving identification string is provided */
  212. if (id_size > sizeof(KERNEL_ID)) {
  213. id_size = sizeof(KERNEL_ID);
  214. }
  215. /* Copy kernel identification string into provided buffer */
  216. memcpy(id_buf, KERNEL_ID, id_size);
  217. }
  218. /* Return execution status */
  219. return (osOK);
  220. }
  221. /*
  222. Get the current RTOS Kernel state.
  223. */
  224. osKernelState_t osKernelGetState (void) {
  225. osKernelState_t state;
  226. switch (xTaskGetSchedulerState()) {
  227. case taskSCHEDULER_RUNNING:
  228. state = osKernelRunning;
  229. break;
  230. case taskSCHEDULER_SUSPENDED:
  231. state = osKernelLocked;
  232. break;
  233. case taskSCHEDULER_NOT_STARTED:
  234. default:
  235. if (KernelState == osKernelReady) {
  236. /* Ready, osKernelInitialize was already called */
  237. state = osKernelReady;
  238. } else {
  239. /* Not initialized */
  240. state = osKernelInactive;
  241. }
  242. break;
  243. }
  244. /* Return current state */
  245. return (state);
  246. }
  247. /*
  248. Start the RTOS Kernel scheduler.
  249. */
  250. osStatus_t osKernelStart (void) {
  251. osStatus_t stat;
  252. BaseType_t state;
  253. if (IRQ_Context() != 0U) {
  254. stat = osErrorISR;
  255. }
  256. else {
  257. state = xTaskGetSchedulerState();
  258. /* Start scheduler if initialized and not started before */
  259. if ((state == taskSCHEDULER_NOT_STARTED) && (KernelState == osKernelReady)) {
  260. /* Ensure SVC priority is at the reset value */
  261. SVC_Setup();
  262. /* Change state to ensure correct API flow */
  263. KernelState = osKernelRunning;
  264. /* Start the kernel scheduler */
  265. vTaskStartScheduler();
  266. stat = osOK;
  267. } else {
  268. stat = osError;
  269. }
  270. }
  271. /* Return execution status */
  272. return (stat);
  273. }
  274. /*
  275. Lock the RTOS Kernel scheduler.
  276. */
  277. int32_t osKernelLock (void) {
  278. int32_t lock;
  279. if (IRQ_Context() != 0U) {
  280. lock = (int32_t)osErrorISR;
  281. }
  282. else {
  283. switch (xTaskGetSchedulerState()) {
  284. case taskSCHEDULER_SUSPENDED:
  285. lock = 1;
  286. break;
  287. case taskSCHEDULER_RUNNING:
  288. vTaskSuspendAll();
  289. lock = 0;
  290. break;
  291. case taskSCHEDULER_NOT_STARTED:
  292. default:
  293. lock = (int32_t)osError;
  294. break;
  295. }
  296. }
  297. /* Return previous lock state */
  298. return (lock);
  299. }
  300. /*
  301. Unlock the RTOS Kernel scheduler.
  302. */
  303. int32_t osKernelUnlock (void) {
  304. int32_t lock;
  305. if (IRQ_Context() != 0U) {
  306. lock = (int32_t)osErrorISR;
  307. }
  308. else {
  309. switch (xTaskGetSchedulerState()) {
  310. case taskSCHEDULER_SUSPENDED:
  311. lock = 1;
  312. if (xTaskResumeAll() != pdTRUE) {
  313. if (xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) {
  314. lock = (int32_t)osError;
  315. }
  316. }
  317. break;
  318. case taskSCHEDULER_RUNNING:
  319. lock = 0;
  320. break;
  321. case taskSCHEDULER_NOT_STARTED:
  322. default:
  323. lock = (int32_t)osError;
  324. break;
  325. }
  326. }
  327. /* Return previous lock state */
  328. return (lock);
  329. }
  330. /*
  331. Restore the RTOS Kernel scheduler lock state.
  332. */
  333. int32_t osKernelRestoreLock (int32_t lock) {
  334. if (IRQ_Context() != 0U) {
  335. lock = (int32_t)osErrorISR;
  336. }
  337. else {
  338. switch (xTaskGetSchedulerState()) {
  339. case taskSCHEDULER_SUSPENDED:
  340. case taskSCHEDULER_RUNNING:
  341. if (lock == 1) {
  342. vTaskSuspendAll();
  343. }
  344. else {
  345. if (lock != 0) {
  346. lock = (int32_t)osError;
  347. }
  348. else {
  349. if (xTaskResumeAll() != pdTRUE) {
  350. if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
  351. lock = (int32_t)osError;
  352. }
  353. }
  354. }
  355. }
  356. break;
  357. case taskSCHEDULER_NOT_STARTED:
  358. default:
  359. lock = (int32_t)osError;
  360. break;
  361. }
  362. }
  363. /* Return new lock state */
  364. return (lock);
  365. }
  366. /*
  367. Get the RTOS kernel tick count.
  368. */
  369. uint32_t osKernelGetTickCount (void) {
  370. TickType_t ticks;
  371. if (IRQ_Context() != 0U) {
  372. ticks = xTaskGetTickCountFromISR();
  373. } else {
  374. ticks = xTaskGetTickCount();
  375. }
  376. /* Return kernel tick count */
  377. return (ticks);
  378. }
  379. /*
  380. Get the RTOS kernel tick frequency.
  381. */
  382. uint32_t osKernelGetTickFreq (void) {
  383. /* Return frequency in hertz */
  384. return (configTICK_RATE_HZ);
  385. }
  386. /*
  387. Get the RTOS kernel system timer count.
  388. */
  389. uint32_t osKernelGetSysTimerCount (void) {
  390. TickType_t ticks;
  391. uint32_t val;
  392. FURI_CRITICAL_ENTER();
  393. ticks = xTaskGetTickCount();
  394. val = OS_Tick_GetCount();
  395. /* Update tick count and timer value when timer overflows */
  396. if (OS_Tick_GetOverflow() != 0U) {
  397. val = OS_Tick_GetCount();
  398. ticks++;
  399. }
  400. val += ticks * OS_Tick_GetInterval();
  401. FURI_CRITICAL_EXIT();
  402. /* Return system timer count */
  403. return (val);
  404. }
  405. /*
  406. Get the RTOS kernel system timer frequency.
  407. */
  408. uint32_t osKernelGetSysTimerFreq (void) {
  409. /* Return frequency in hertz */
  410. return (configCPU_CLOCK_HZ);
  411. }
  412. /* ==== Thread Management Functions ==== */
  413. /*
  414. Create a thread and add it to Active Threads.
  415. Limitations:
  416. - The memory for control block and stack must be provided in the osThreadAttr_t
  417. structure in order to allocate object statically.
  418. - Attribute osThreadJoinable is not supported, NULL is returned if used.
  419. */
  420. osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) {
  421. const char *name;
  422. uint32_t stack;
  423. TaskHandle_t hTask;
  424. UBaseType_t prio;
  425. int32_t mem;
  426. hTask = NULL;
  427. if ((IRQ_Context() == 0U) && (func != NULL)) {
  428. stack = configMINIMAL_STACK_SIZE;
  429. prio = (UBaseType_t)osPriorityNormal;
  430. name = NULL;
  431. mem = -1;
  432. if (attr != NULL) {
  433. if (attr->name != NULL) {
  434. name = attr->name;
  435. }
  436. if (attr->priority != osPriorityNone) {
  437. prio = (UBaseType_t)attr->priority;
  438. }
  439. if ((prio < osPriorityIdle) || (prio > osPriorityISR) || ((attr->attr_bits & osThreadJoinable) == osThreadJoinable)) {
  440. /* Invalid priority or unsupported osThreadJoinable attribute used */
  441. return (NULL);
  442. }
  443. if (attr->stack_size > 0U) {
  444. /* In FreeRTOS stack is not in bytes, but in sizeof(StackType_t) which is 4 on ARM ports. */
  445. /* Stack size should be therefore 4 byte aligned in order to avoid division caused side effects */
  446. stack = attr->stack_size / sizeof(StackType_t);
  447. }
  448. if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTask_t)) &&
  449. (attr->stack_mem != NULL) && (attr->stack_size > 0U)) {
  450. /* The memory for control block and stack is provided, use static object */
  451. mem = 1;
  452. }
  453. else {
  454. if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) && (attr->stack_mem == NULL)) {
  455. /* Control block and stack memory will be allocated from the dynamic pool */
  456. mem = 0;
  457. }
  458. }
  459. }
  460. else {
  461. mem = 0;
  462. }
  463. if (mem == 1) {
  464. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  465. hTask = xTaskCreateStatic ((TaskFunction_t)func, name, stack, argument, prio, (StackType_t *)attr->stack_mem,
  466. (StaticTask_t *)attr->cb_mem);
  467. #endif
  468. }
  469. else {
  470. if (mem == 0) {
  471. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  472. if (xTaskCreate ((TaskFunction_t)func, name, (configSTACK_DEPTH_TYPE)stack, argument, prio, &hTask) != pdPASS) {
  473. hTask = NULL;
  474. }
  475. #endif
  476. }
  477. }
  478. }
  479. /* Return thread ID */
  480. return ((osThreadId_t)hTask);
  481. }
  482. /*
  483. Get name of a thread.
  484. */
  485. const char *osThreadGetName (osThreadId_t thread_id) {
  486. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  487. const char *name;
  488. if ((IRQ_Context() != 0U) || (hTask == NULL)) {
  489. name = NULL;
  490. } else if(osKernelGetState() == osKernelRunning) {
  491. name = pcTaskGetName (hTask);
  492. } else {
  493. name = NULL;
  494. }
  495. /* Return name as null-terminated string */
  496. return (name);
  497. }
  498. /*
  499. Return the thread ID of the current running thread.
  500. */
  501. osThreadId_t osThreadGetId (void) {
  502. osThreadId_t id;
  503. id = (osThreadId_t)xTaskGetCurrentTaskHandle();
  504. /* Return thread ID */
  505. return (id);
  506. }
  507. /*
  508. Get current thread state of a thread.
  509. */
  510. osThreadState_t osThreadGetState (osThreadId_t thread_id) {
  511. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  512. osThreadState_t state;
  513. if ((IRQ_Context() != 0U) || (hTask == NULL)) {
  514. state = osThreadError;
  515. }
  516. else {
  517. switch (eTaskGetState (hTask)) {
  518. case eRunning: state = osThreadRunning; break;
  519. case eReady: state = osThreadReady; break;
  520. case eBlocked:
  521. case eSuspended: state = osThreadBlocked; break;
  522. case eDeleted: state = osThreadTerminated; break;
  523. case eInvalid:
  524. default: state = osThreadError; break;
  525. }
  526. }
  527. /* Return current thread state */
  528. return (state);
  529. }
  530. /*
  531. Get available stack space of a thread based on stack watermark recording during execution.
  532. */
  533. uint32_t osThreadGetStackSpace (osThreadId_t thread_id) {
  534. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  535. uint32_t sz;
  536. if ((IRQ_Context() != 0U) || (hTask == NULL)) {
  537. sz = 0U;
  538. } else {
  539. sz = (uint32_t)(uxTaskGetStackHighWaterMark(hTask) * sizeof(StackType_t));
  540. }
  541. /* Return remaining stack space in bytes */
  542. return (sz);
  543. }
  544. /*
  545. Change priority of a thread.
  546. */
  547. osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
  548. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  549. osStatus_t stat;
  550. if (IRQ_Context() != 0U) {
  551. stat = osErrorISR;
  552. }
  553. else if ((hTask == NULL) || (priority < osPriorityIdle) || (priority > osPriorityISR)) {
  554. stat = osErrorParameter;
  555. }
  556. else {
  557. stat = osOK;
  558. vTaskPrioritySet (hTask, (UBaseType_t)priority);
  559. }
  560. /* Return execution status */
  561. return (stat);
  562. }
  563. /*
  564. Get current priority of a thread.
  565. */
  566. osPriority_t osThreadGetPriority (osThreadId_t thread_id) {
  567. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  568. osPriority_t prio;
  569. if ((IRQ_Context() != 0U) || (hTask == NULL)) {
  570. prio = osPriorityError;
  571. } else {
  572. prio = (osPriority_t)((int32_t)uxTaskPriorityGet (hTask));
  573. }
  574. /* Return current thread priority */
  575. return (prio);
  576. }
  577. /*
  578. Pass control to next thread that is in state READY.
  579. */
  580. osStatus_t osThreadYield (void) {
  581. osStatus_t stat;
  582. if (IRQ_Context() != 0U) {
  583. stat = osErrorISR;
  584. } else {
  585. stat = osOK;
  586. taskYIELD();
  587. }
  588. /* Return execution status */
  589. return (stat);
  590. }
  591. #if (configUSE_OS2_THREAD_SUSPEND_RESUME == 1)
  592. /*
  593. Suspend execution of a thread.
  594. */
  595. osStatus_t osThreadSuspend (osThreadId_t thread_id) {
  596. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  597. osStatus_t stat;
  598. if (IRQ_Context() != 0U) {
  599. stat = osErrorISR;
  600. }
  601. else if (hTask == NULL) {
  602. stat = osErrorParameter;
  603. }
  604. else {
  605. stat = osOK;
  606. vTaskSuspend (hTask);
  607. }
  608. /* Return execution status */
  609. return (stat);
  610. }
  611. /*
  612. Resume execution of a thread.
  613. */
  614. osStatus_t osThreadResume (osThreadId_t thread_id) {
  615. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  616. osStatus_t stat;
  617. if (IRQ_Context() != 0U) {
  618. stat = osErrorISR;
  619. }
  620. else if (hTask == NULL) {
  621. stat = osErrorParameter;
  622. }
  623. else {
  624. stat = osOK;
  625. vTaskResume (hTask);
  626. }
  627. /* Return execution status */
  628. return (stat);
  629. }
  630. #endif /* (configUSE_OS2_THREAD_SUSPEND_RESUME == 1) */
  631. /*
  632. Terminate execution of current running thread.
  633. */
  634. __NO_RETURN void osThreadExit (void) {
  635. #ifndef USE_FreeRTOS_HEAP_1
  636. vTaskDelete (NULL);
  637. #endif
  638. for (;;);
  639. }
  640. /*
  641. Terminate execution of a thread.
  642. */
  643. osStatus_t osThreadTerminate (osThreadId_t thread_id) {
  644. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  645. osStatus_t stat;
  646. #ifndef USE_FreeRTOS_HEAP_1
  647. eTaskState tstate;
  648. if (IRQ_Context() != 0U) {
  649. stat = osErrorISR;
  650. }
  651. else if (hTask == NULL) {
  652. stat = osErrorParameter;
  653. }
  654. else {
  655. tstate = eTaskGetState (hTask);
  656. if (tstate != eDeleted) {
  657. stat = osOK;
  658. vTaskDelete (hTask);
  659. } else {
  660. stat = osErrorResource;
  661. }
  662. }
  663. #else
  664. stat = osError;
  665. #endif
  666. /* Return execution status */
  667. return (stat);
  668. }
  669. /*
  670. Get number of active threads.
  671. */
  672. uint32_t osThreadGetCount (void) {
  673. uint32_t count;
  674. if (IRQ_Context() != 0U) {
  675. count = 0U;
  676. } else {
  677. count = uxTaskGetNumberOfTasks();
  678. }
  679. /* Return number of active threads */
  680. return (count);
  681. }
  682. #if (configUSE_OS2_THREAD_ENUMERATE == 1)
  683. /*
  684. Enumerate active threads.
  685. */
  686. uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) {
  687. uint32_t i, count;
  688. TaskStatus_t *task;
  689. if ((IRQ_Context() != 0U) || (thread_array == NULL) || (array_items == 0U)) {
  690. count = 0U;
  691. } else {
  692. vTaskSuspendAll();
  693. /* Allocate memory on heap to temporarily store TaskStatus_t information */
  694. count = uxTaskGetNumberOfTasks();
  695. task = pvPortMalloc (count * sizeof(TaskStatus_t));
  696. if (task != NULL) {
  697. /* Retrieve task status information */
  698. count = uxTaskGetSystemState (task, count, NULL);
  699. /* Copy handles from task status array into provided thread array */
  700. for (i = 0U; (i < count) && (i < array_items); i++) {
  701. thread_array[i] = (osThreadId_t)task[i].xHandle;
  702. }
  703. count = i;
  704. }
  705. (void)xTaskResumeAll();
  706. vPortFree (task);
  707. }
  708. /* Return number of enumerated threads */
  709. return (count);
  710. }
  711. #endif /* (configUSE_OS2_THREAD_ENUMERATE == 1) */
  712. /* ==== Thread Flags Functions ==== */
  713. #if (configUSE_OS2_THREAD_FLAGS == 1)
  714. /*
  715. Set the specified Thread Flags of a thread.
  716. */
  717. uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
  718. TaskHandle_t hTask = (TaskHandle_t)thread_id;
  719. uint32_t rflags;
  720. BaseType_t yield;
  721. if ((hTask == NULL) || ((flags & THREAD_FLAGS_INVALID_BITS) != 0U)) {
  722. rflags = (uint32_t)osErrorParameter;
  723. }
  724. else {
  725. rflags = (uint32_t)osError;
  726. if (IRQ_Context() != 0U) {
  727. yield = pdFALSE;
  728. (void)xTaskNotifyIndexedFromISR (hTask, CMSIS_TASK_NOTIFY_INDEX, flags, eSetBits, &yield);
  729. (void)xTaskNotifyAndQueryIndexedFromISR (hTask, CMSIS_TASK_NOTIFY_INDEX, 0, eNoAction, &rflags, NULL);
  730. portYIELD_FROM_ISR (yield);
  731. }
  732. else {
  733. (void)xTaskNotifyIndexed (hTask, CMSIS_TASK_NOTIFY_INDEX, flags, eSetBits);
  734. (void)xTaskNotifyAndQueryIndexed (hTask, CMSIS_TASK_NOTIFY_INDEX, 0, eNoAction, &rflags);
  735. }
  736. }
  737. /* Return flags after setting */
  738. return (rflags);
  739. }
  740. /*
  741. Clear the specified Thread Flags of current running thread.
  742. */
  743. uint32_t osThreadFlagsClear (uint32_t flags) {
  744. TaskHandle_t hTask;
  745. uint32_t rflags, cflags;
  746. if (IRQ_Context() != 0U) {
  747. rflags = (uint32_t)osErrorISR;
  748. }
  749. else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  750. rflags = (uint32_t)osErrorParameter;
  751. }
  752. else {
  753. hTask = xTaskGetCurrentTaskHandle();
  754. if (xTaskNotifyAndQueryIndexed (hTask, CMSIS_TASK_NOTIFY_INDEX, 0, eNoAction, &cflags) == pdPASS) {
  755. rflags = cflags;
  756. cflags &= ~flags;
  757. if (xTaskNotifyIndexed (hTask, CMSIS_TASK_NOTIFY_INDEX, cflags, eSetValueWithOverwrite) != pdPASS) {
  758. rflags = (uint32_t)osError;
  759. }
  760. }
  761. else {
  762. rflags = (uint32_t)osError;
  763. }
  764. }
  765. /* Return flags before clearing */
  766. return (rflags);
  767. }
  768. /*
  769. Get the current Thread Flags of current running thread.
  770. */
  771. uint32_t osThreadFlagsGet (void) {
  772. TaskHandle_t hTask;
  773. uint32_t rflags;
  774. if (IRQ_Context() != 0U) {
  775. rflags = (uint32_t)osErrorISR;
  776. }
  777. else {
  778. hTask = xTaskGetCurrentTaskHandle();
  779. if (xTaskNotifyAndQueryIndexed (hTask, CMSIS_TASK_NOTIFY_INDEX, 0, eNoAction, &rflags) != pdPASS) {
  780. rflags = (uint32_t)osError;
  781. }
  782. }
  783. /* Return current flags */
  784. return (rflags);
  785. }
  786. /*
  787. Wait for one or more Thread Flags of the current running thread to become signaled.
  788. */
  789. uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) {
  790. uint32_t rflags, nval;
  791. uint32_t clear;
  792. TickType_t t0, td, tout;
  793. BaseType_t rval;
  794. if (IRQ_Context() != 0U) {
  795. rflags = (uint32_t)osErrorISR;
  796. }
  797. else if ((flags & THREAD_FLAGS_INVALID_BITS) != 0U) {
  798. rflags = (uint32_t)osErrorParameter;
  799. }
  800. else {
  801. if ((options & osFlagsNoClear) == osFlagsNoClear) {
  802. clear = 0U;
  803. } else {
  804. clear = flags;
  805. }
  806. rflags = 0U;
  807. tout = timeout;
  808. t0 = xTaskGetTickCount();
  809. do {
  810. rval = xTaskNotifyWaitIndexed (CMSIS_TASK_NOTIFY_INDEX, 0, clear, &nval, tout);
  811. if (rval == pdPASS) {
  812. rflags &= flags;
  813. rflags |= nval;
  814. if ((options & osFlagsWaitAll) == osFlagsWaitAll) {
  815. if ((flags & rflags) == flags) {
  816. break;
  817. } else {
  818. if (timeout == 0U) {
  819. rflags = (uint32_t)osErrorResource;
  820. break;
  821. }
  822. }
  823. }
  824. else {
  825. if ((flags & rflags) != 0) {
  826. break;
  827. } else {
  828. if (timeout == 0U) {
  829. rflags = (uint32_t)osErrorResource;
  830. break;
  831. }
  832. }
  833. }
  834. /* Update timeout */
  835. td = xTaskGetTickCount() - t0;
  836. if (td > timeout) {
  837. tout = 0;
  838. } else {
  839. tout = timeout - td;
  840. }
  841. }
  842. else {
  843. if (timeout == 0) {
  844. rflags = (uint32_t)osErrorResource;
  845. } else {
  846. rflags = (uint32_t)osErrorTimeout;
  847. }
  848. }
  849. }
  850. while (rval != pdFAIL);
  851. }
  852. /* Return flags before clearing */
  853. return (rflags);
  854. }
  855. #endif /* (configUSE_OS2_THREAD_FLAGS == 1) */
  856. /* ==== Generic Wait Functions ==== */
  857. /*
  858. Wait for Timeout (Time Delay).
  859. */
  860. osStatus_t osDelay (uint32_t ticks) {
  861. osStatus_t stat;
  862. if (IRQ_Context() != 0U) {
  863. stat = osErrorISR;
  864. }
  865. else {
  866. stat = osOK;
  867. if (ticks != 0U) {
  868. vTaskDelay(ticks);
  869. }
  870. }
  871. /* Return execution status */
  872. return (stat);
  873. }
  874. /*
  875. Wait until specified time.
  876. */
  877. osStatus_t osDelayUntil (uint32_t ticks) {
  878. TickType_t tcnt, delay;
  879. osStatus_t stat;
  880. if (IRQ_Context() != 0U) {
  881. stat = osErrorISR;
  882. }
  883. else {
  884. stat = osOK;
  885. tcnt = xTaskGetTickCount();
  886. /* Determine remaining number of ticks to delay */
  887. delay = (TickType_t)ticks - tcnt;
  888. /* Check if target tick has not expired */
  889. if((delay != 0U) && (0 == (delay >> (8 * sizeof(TickType_t) - 1)))) {
  890. if (xTaskDelayUntil (&tcnt, delay) == pdFALSE) {
  891. /* Did not delay */
  892. stat = osError;
  893. }
  894. }
  895. else
  896. {
  897. /* No delay or already expired */
  898. stat = osErrorParameter;
  899. }
  900. }
  901. /* Return execution status */
  902. return (stat);
  903. }
  904. /* ==== Timer Management Functions ==== */
  905. #if (configUSE_OS2_TIMER == 1)
  906. static void TimerCallback (TimerHandle_t hTimer) {
  907. TimerCallback_t *callb;
  908. /* Retrieve pointer to callback function and argument */
  909. callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
  910. /* Remove dynamic allocation flag */
  911. callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
  912. if (callb != NULL) {
  913. callb->func (callb->arg);
  914. }
  915. }
  916. /*
  917. Create and Initialize a timer.
  918. */
  919. osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
  920. const char *name;
  921. TimerHandle_t hTimer;
  922. TimerCallback_t *callb;
  923. UBaseType_t reload;
  924. int32_t mem;
  925. uint32_t callb_dyn;
  926. hTimer = NULL;
  927. if ((IRQ_Context() == 0U) && (func != NULL)) {
  928. callb = NULL;
  929. callb_dyn = 0U;
  930. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  931. /* Static memory allocation is available: check if memory for control block */
  932. /* is provided and if it also contains space for callback and its argument */
  933. if ((attr != NULL) && (attr->cb_mem != NULL)) {
  934. if (attr->cb_size >= (sizeof(StaticTimer_t) + sizeof(TimerCallback_t))) {
  935. callb = (TimerCallback_t *)((uint32_t)attr->cb_mem + sizeof(StaticTimer_t));
  936. }
  937. }
  938. #endif
  939. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  940. /* Dynamic memory allocation is available: if memory for callback and */
  941. /* its argument is not provided, allocate it from dynamic memory pool */
  942. if (callb == NULL) {
  943. callb = (TimerCallback_t *)pvPortMalloc (sizeof(TimerCallback_t));
  944. if (callb != NULL) {
  945. /* Callback memory was allocated from dynamic pool, set flag */
  946. callb_dyn = 1U;
  947. }
  948. }
  949. #endif
  950. if (callb != NULL) {
  951. callb->func = func;
  952. callb->arg = argument;
  953. if (type == osTimerOnce) {
  954. reload = pdFALSE;
  955. } else {
  956. reload = pdTRUE;
  957. }
  958. mem = -1;
  959. name = NULL;
  960. if (attr != NULL) {
  961. if (attr->name != NULL) {
  962. name = attr->name;
  963. }
  964. if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticTimer_t))) {
  965. /* The memory for control block is provided, use static object */
  966. mem = 1;
  967. }
  968. else {
  969. if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
  970. /* Control block will be allocated from the dynamic pool */
  971. mem = 0;
  972. }
  973. }
  974. }
  975. else {
  976. mem = 0;
  977. }
  978. /* Store callback memory dynamic allocation flag */
  979. callb = (TimerCallback_t *)((uint32_t)callb | callb_dyn);
  980. /*
  981. TimerCallback function is always provided as a callback and is used to call application
  982. specified function with its argument both stored in structure callb.
  983. */
  984. if (mem == 1) {
  985. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  986. hTimer = xTimerCreateStatic (name, 1, reload, callb, TimerCallback, (StaticTimer_t *)attr->cb_mem);
  987. #endif
  988. }
  989. else {
  990. if (mem == 0) {
  991. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  992. hTimer = xTimerCreate (name, 1, reload, callb, TimerCallback);
  993. #endif
  994. }
  995. }
  996. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  997. if ((hTimer == NULL) && (callb != NULL) && (callb_dyn == 1U)) {
  998. /* Failed to create a timer, release allocated resources */
  999. callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
  1000. vPortFree (callb);
  1001. }
  1002. #endif
  1003. }
  1004. }
  1005. /* Return timer ID */
  1006. return ((osTimerId_t)hTimer);
  1007. }
  1008. /*
  1009. Get name of a timer.
  1010. */
  1011. const char *osTimerGetName (osTimerId_t timer_id) {
  1012. TimerHandle_t hTimer = (TimerHandle_t)timer_id;
  1013. const char *p;
  1014. if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
  1015. p = NULL;
  1016. } else {
  1017. p = pcTimerGetName (hTimer);
  1018. }
  1019. /* Return name as null-terminated string */
  1020. return (p);
  1021. }
  1022. /*
  1023. Start or restart a timer.
  1024. */
  1025. osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
  1026. TimerHandle_t hTimer = (TimerHandle_t)timer_id;
  1027. osStatus_t stat;
  1028. if (IRQ_Context() != 0U) {
  1029. stat = osErrorISR;
  1030. }
  1031. else if (hTimer == NULL) {
  1032. stat = osErrorParameter;
  1033. }
  1034. else {
  1035. if (xTimerChangePeriod (hTimer, ticks, portMAX_DELAY) == pdPASS) {
  1036. stat = osOK;
  1037. } else {
  1038. stat = osErrorResource;
  1039. }
  1040. }
  1041. /* Return execution status */
  1042. return (stat);
  1043. }
  1044. /*
  1045. Stop a timer.
  1046. */
  1047. osStatus_t osTimerStop (osTimerId_t timer_id) {
  1048. TimerHandle_t hTimer = (TimerHandle_t)timer_id;
  1049. osStatus_t stat;
  1050. if (IRQ_Context() != 0U) {
  1051. stat = osErrorISR;
  1052. }
  1053. else if (hTimer == NULL) {
  1054. stat = osErrorParameter;
  1055. }
  1056. else {
  1057. if (xTimerIsTimerActive (hTimer) == pdFALSE) {
  1058. stat = osErrorResource;
  1059. }
  1060. else {
  1061. if (xTimerStop (hTimer, portMAX_DELAY) == pdPASS) {
  1062. stat = osOK;
  1063. } else {
  1064. stat = osError;
  1065. }
  1066. }
  1067. }
  1068. /* Return execution status */
  1069. return (stat);
  1070. }
  1071. /*
  1072. Check if a timer is running.
  1073. */
  1074. uint32_t osTimerIsRunning (osTimerId_t timer_id) {
  1075. TimerHandle_t hTimer = (TimerHandle_t)timer_id;
  1076. uint32_t running;
  1077. if ((IRQ_Context() != 0U) || (hTimer == NULL)) {
  1078. running = 0U;
  1079. } else {
  1080. running = (uint32_t)xTimerIsTimerActive (hTimer);
  1081. }
  1082. /* Return 0: not running, 1: running */
  1083. return (running);
  1084. }
  1085. /*
  1086. Delete a timer.
  1087. */
  1088. osStatus_t osTimerDelete (osTimerId_t timer_id) {
  1089. TimerHandle_t hTimer = (TimerHandle_t)timer_id;
  1090. osStatus_t stat;
  1091. #ifndef USE_FreeRTOS_HEAP_1
  1092. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  1093. TimerCallback_t *callb;
  1094. #endif
  1095. if (IRQ_Context() != 0U) {
  1096. stat = osErrorISR;
  1097. }
  1098. else if (hTimer == NULL) {
  1099. stat = osErrorParameter;
  1100. }
  1101. else {
  1102. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  1103. callb = (TimerCallback_t *)pvTimerGetTimerID (hTimer);
  1104. #endif
  1105. if (xTimerDelete (hTimer, portMAX_DELAY) == pdPASS) {
  1106. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  1107. if ((uint32_t)callb & 1U) {
  1108. /* Callback memory was allocated from dynamic pool, clear flag */
  1109. callb = (TimerCallback_t *)((uint32_t)callb & ~1U);
  1110. /* Return allocated memory to dynamic pool */
  1111. vPortFree (callb);
  1112. }
  1113. #endif
  1114. stat = osOK;
  1115. } else {
  1116. stat = osErrorResource;
  1117. }
  1118. }
  1119. #else
  1120. stat = osError;
  1121. #endif
  1122. /* Return execution status */
  1123. return (stat);
  1124. }
  1125. #endif /* (configUSE_OS2_TIMER == 1) */
  1126. /* ==== Event Flags Management Functions ==== */
  1127. /*
  1128. Create and Initialize an Event Flags object.
  1129. Limitations:
  1130. - Event flags are limited to 24 bits.
  1131. */
  1132. osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
  1133. EventGroupHandle_t hEventGroup;
  1134. int32_t mem;
  1135. hEventGroup = NULL;
  1136. if (IRQ_Context() == 0U) {
  1137. mem = -1;
  1138. if (attr != NULL) {
  1139. if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticEventGroup_t))) {
  1140. /* The memory for control block is provided, use static object */
  1141. mem = 1;
  1142. }
  1143. else {
  1144. if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
  1145. /* Control block will be allocated from the dynamic pool */
  1146. mem = 0;
  1147. }
  1148. }
  1149. }
  1150. else {
  1151. mem = 0;
  1152. }
  1153. if (mem == 1) {
  1154. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  1155. hEventGroup = xEventGroupCreateStatic (attr->cb_mem);
  1156. #endif
  1157. }
  1158. else {
  1159. if (mem == 0) {
  1160. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  1161. hEventGroup = xEventGroupCreate();
  1162. #endif
  1163. }
  1164. }
  1165. }
  1166. /* Return event flags ID */
  1167. return ((osEventFlagsId_t)hEventGroup);
  1168. }
  1169. /*
  1170. Set the specified Event Flags.
  1171. Limitations:
  1172. - Event flags are limited to 24 bits.
  1173. */
  1174. uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
  1175. EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
  1176. uint32_t rflags;
  1177. BaseType_t yield;
  1178. if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
  1179. rflags = (uint32_t)osErrorParameter;
  1180. }
  1181. else if (IRQ_Context() != 0U) {
  1182. #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
  1183. (void)yield;
  1184. /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
  1185. rflags = (uint32_t)osErrorResource;
  1186. #else
  1187. yield = pdFALSE;
  1188. if (xEventGroupSetBitsFromISR (hEventGroup, (EventBits_t)flags, &yield) == pdFAIL) {
  1189. rflags = (uint32_t)osErrorResource;
  1190. } else {
  1191. rflags = flags;
  1192. portYIELD_FROM_ISR (yield);
  1193. }
  1194. #endif
  1195. }
  1196. else {
  1197. rflags = xEventGroupSetBits (hEventGroup, (EventBits_t)flags);
  1198. }
  1199. /* Return event flags after setting */
  1200. return (rflags);
  1201. }
  1202. /*
  1203. Clear the specified Event Flags.
  1204. Limitations:
  1205. - Event flags are limited to 24 bits.
  1206. */
  1207. uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
  1208. EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
  1209. uint32_t rflags;
  1210. if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
  1211. rflags = (uint32_t)osErrorParameter;
  1212. }
  1213. else if (IRQ_Context() != 0U) {
  1214. #if (configUSE_OS2_EVENTFLAGS_FROM_ISR == 0)
  1215. /* Enable timers and xTimerPendFunctionCall function to support osEventFlagsSet from ISR */
  1216. rflags = (uint32_t)osErrorResource;
  1217. #else
  1218. rflags = xEventGroupGetBitsFromISR (hEventGroup);
  1219. if (xEventGroupClearBitsFromISR (hEventGroup, (EventBits_t)flags) == pdFAIL) {
  1220. rflags = (uint32_t)osErrorResource;
  1221. }
  1222. else {
  1223. /* xEventGroupClearBitsFromISR only registers clear operation in the timer command queue. */
  1224. /* Yield is required here otherwise clear operation might not execute in the right order. */
  1225. /* See https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/93 for more info. */
  1226. portYIELD_FROM_ISR (pdTRUE);
  1227. }
  1228. #endif
  1229. }
  1230. else {
  1231. rflags = xEventGroupClearBits (hEventGroup, (EventBits_t)flags);
  1232. }
  1233. /* Return event flags before clearing */
  1234. return (rflags);
  1235. }
  1236. /*
  1237. Get the current Event Flags.
  1238. Limitations:
  1239. - Event flags are limited to 24 bits.
  1240. */
  1241. uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
  1242. EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
  1243. uint32_t rflags;
  1244. if (ef_id == NULL) {
  1245. rflags = 0U;
  1246. }
  1247. else if (IRQ_Context() != 0U) {
  1248. rflags = xEventGroupGetBitsFromISR (hEventGroup);
  1249. }
  1250. else {
  1251. rflags = xEventGroupGetBits (hEventGroup);
  1252. }
  1253. /* Return current event flags */
  1254. return (rflags);
  1255. }
  1256. /*
  1257. Wait for one or more Event Flags to become signaled.
  1258. Limitations:
  1259. - Event flags are limited to 24 bits.
  1260. - osEventFlagsWait cannot be called from an ISR.
  1261. */
  1262. uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
  1263. EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
  1264. BaseType_t wait_all;
  1265. BaseType_t exit_clr;
  1266. uint32_t rflags;
  1267. if ((hEventGroup == NULL) || ((flags & EVENT_FLAGS_INVALID_BITS) != 0U)) {
  1268. rflags = (uint32_t)osErrorParameter;
  1269. }
  1270. else if (IRQ_Context() != 0U) {
  1271. rflags = (uint32_t)osErrorISR;
  1272. }
  1273. else {
  1274. if (options & osFlagsWaitAll) {
  1275. wait_all = pdTRUE;
  1276. } else {
  1277. wait_all = pdFAIL;
  1278. }
  1279. if (options & osFlagsNoClear) {
  1280. exit_clr = pdFAIL;
  1281. } else {
  1282. exit_clr = pdTRUE;
  1283. }
  1284. rflags = xEventGroupWaitBits (hEventGroup, (EventBits_t)flags, exit_clr, wait_all, (TickType_t)timeout);
  1285. if (options & osFlagsWaitAll) {
  1286. if ((flags & rflags) != flags) {
  1287. if (timeout > 0U) {
  1288. rflags = (uint32_t)osErrorTimeout;
  1289. } else {
  1290. rflags = (uint32_t)osErrorResource;
  1291. }
  1292. }
  1293. }
  1294. else {
  1295. if ((flags & rflags) == 0U) {
  1296. if (timeout > 0U) {
  1297. rflags = (uint32_t)osErrorTimeout;
  1298. } else {
  1299. rflags = (uint32_t)osErrorResource;
  1300. }
  1301. }
  1302. }
  1303. }
  1304. /* Return event flags before clearing */
  1305. return (rflags);
  1306. }
  1307. /*
  1308. Delete an Event Flags object.
  1309. */
  1310. osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
  1311. EventGroupHandle_t hEventGroup = (EventGroupHandle_t)ef_id;
  1312. osStatus_t stat;
  1313. #ifndef USE_FreeRTOS_HEAP_1
  1314. if (IRQ_Context() != 0U) {
  1315. stat = osErrorISR;
  1316. }
  1317. else if (hEventGroup == NULL) {
  1318. stat = osErrorParameter;
  1319. }
  1320. else {
  1321. stat = osOK;
  1322. vEventGroupDelete (hEventGroup);
  1323. }
  1324. #else
  1325. stat = osError;
  1326. #endif
  1327. /* Return execution status */
  1328. return (stat);
  1329. }
  1330. /* ==== Mutex Management Functions ==== */
  1331. #if (configUSE_OS2_MUTEX == 1)
  1332. /*
  1333. Create and Initialize a Mutex object.
  1334. Limitations:
  1335. - Priority inherit protocol is used by default, osMutexPrioInherit attribute is ignored.
  1336. - Robust mutex is not supported, NULL is returned if used.
  1337. */
  1338. osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
  1339. SemaphoreHandle_t hMutex;
  1340. uint32_t type;
  1341. uint32_t rmtx;
  1342. int32_t mem;
  1343. hMutex = NULL;
  1344. if (IRQ_Context() == 0U) {
  1345. if (attr != NULL) {
  1346. type = attr->attr_bits;
  1347. } else {
  1348. type = 0U;
  1349. }
  1350. if ((type & osMutexRecursive) == osMutexRecursive) {
  1351. rmtx = 1U;
  1352. } else {
  1353. rmtx = 0U;
  1354. }
  1355. if ((type & osMutexRobust) != osMutexRobust) {
  1356. mem = -1;
  1357. if (attr != NULL) {
  1358. if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
  1359. /* The memory for control block is provided, use static object */
  1360. mem = 1;
  1361. }
  1362. else {
  1363. if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
  1364. /* Control block will be allocated from the dynamic pool */
  1365. mem = 0;
  1366. }
  1367. }
  1368. }
  1369. else {
  1370. mem = 0;
  1371. }
  1372. if (mem == 1) {
  1373. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  1374. if (rmtx != 0U) {
  1375. #if (configUSE_RECURSIVE_MUTEXES == 1)
  1376. hMutex = xSemaphoreCreateRecursiveMutexStatic (attr->cb_mem);
  1377. #endif
  1378. }
  1379. else {
  1380. hMutex = xSemaphoreCreateMutexStatic (attr->cb_mem);
  1381. }
  1382. #endif
  1383. }
  1384. else {
  1385. if (mem == 0) {
  1386. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  1387. if (rmtx != 0U) {
  1388. #if (configUSE_RECURSIVE_MUTEXES == 1)
  1389. hMutex = xSemaphoreCreateRecursiveMutex ();
  1390. #endif
  1391. } else {
  1392. hMutex = xSemaphoreCreateMutex ();
  1393. }
  1394. #endif
  1395. }
  1396. }
  1397. #if (configQUEUE_REGISTRY_SIZE > 0)
  1398. if (hMutex != NULL) {
  1399. if ((attr != NULL) && (attr->name != NULL)) {
  1400. /* Only non-NULL name objects are added to the Queue Registry */
  1401. vQueueAddToRegistry (hMutex, attr->name);
  1402. }
  1403. }
  1404. #endif
  1405. if ((hMutex != NULL) && (rmtx != 0U)) {
  1406. /* Set LSB as 'recursive mutex flag' */
  1407. hMutex = (SemaphoreHandle_t)((uint32_t)hMutex | 1U);
  1408. }
  1409. }
  1410. }
  1411. /* Return mutex ID */
  1412. return ((osMutexId_t)hMutex);
  1413. }
  1414. /*
  1415. Acquire a Mutex or timeout if it is locked.
  1416. */
  1417. osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
  1418. SemaphoreHandle_t hMutex;
  1419. osStatus_t stat;
  1420. uint32_t rmtx;
  1421. hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
  1422. /* Extract recursive mutex flag */
  1423. rmtx = (uint32_t)mutex_id & 1U;
  1424. stat = osOK;
  1425. if (IRQ_Context() != 0U) {
  1426. stat = osErrorISR;
  1427. }
  1428. else if (hMutex == NULL) {
  1429. stat = osErrorParameter;
  1430. }
  1431. else {
  1432. if (rmtx != 0U) {
  1433. #if (configUSE_RECURSIVE_MUTEXES == 1)
  1434. if (xSemaphoreTakeRecursive (hMutex, timeout) != pdPASS) {
  1435. if (timeout != 0U) {
  1436. stat = osErrorTimeout;
  1437. } else {
  1438. stat = osErrorResource;
  1439. }
  1440. }
  1441. #endif
  1442. }
  1443. else {
  1444. if (xSemaphoreTake (hMutex, timeout) != pdPASS) {
  1445. if (timeout != 0U) {
  1446. stat = osErrorTimeout;
  1447. } else {
  1448. stat = osErrorResource;
  1449. }
  1450. }
  1451. }
  1452. }
  1453. /* Return execution status */
  1454. return (stat);
  1455. }
  1456. /*
  1457. Release a Mutex that was acquired by osMutexAcquire.
  1458. */
  1459. osStatus_t osMutexRelease (osMutexId_t mutex_id) {
  1460. SemaphoreHandle_t hMutex;
  1461. osStatus_t stat;
  1462. uint32_t rmtx;
  1463. hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
  1464. /* Extract recursive mutex flag */
  1465. rmtx = (uint32_t)mutex_id & 1U;
  1466. stat = osOK;
  1467. if (IRQ_Context() != 0U) {
  1468. stat = osErrorISR;
  1469. }
  1470. else if (hMutex == NULL) {
  1471. stat = osErrorParameter;
  1472. }
  1473. else {
  1474. if (rmtx != 0U) {
  1475. #if (configUSE_RECURSIVE_MUTEXES == 1)
  1476. if (xSemaphoreGiveRecursive (hMutex) != pdPASS) {
  1477. stat = osErrorResource;
  1478. }
  1479. #endif
  1480. }
  1481. else {
  1482. if (xSemaphoreGive (hMutex) != pdPASS) {
  1483. stat = osErrorResource;
  1484. }
  1485. }
  1486. }
  1487. /* Return execution status */
  1488. return (stat);
  1489. }
  1490. /*
  1491. Get Thread which owns a Mutex object.
  1492. */
  1493. osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
  1494. SemaphoreHandle_t hMutex;
  1495. osThreadId_t owner;
  1496. hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
  1497. if ((IRQ_Context() != 0U) || (hMutex == NULL)) {
  1498. owner = NULL;
  1499. } else {
  1500. owner = (osThreadId_t)xSemaphoreGetMutexHolder (hMutex);
  1501. }
  1502. /* Return owner thread ID */
  1503. return (owner);
  1504. }
  1505. /*
  1506. Delete a Mutex object.
  1507. */
  1508. osStatus_t osMutexDelete (osMutexId_t mutex_id) {
  1509. osStatus_t stat;
  1510. #ifndef USE_FreeRTOS_HEAP_1
  1511. SemaphoreHandle_t hMutex;
  1512. hMutex = (SemaphoreHandle_t)((uint32_t)mutex_id & ~1U);
  1513. if (IRQ_Context() != 0U) {
  1514. stat = osErrorISR;
  1515. }
  1516. else if (hMutex == NULL) {
  1517. stat = osErrorParameter;
  1518. }
  1519. else {
  1520. #if (configQUEUE_REGISTRY_SIZE > 0)
  1521. vQueueUnregisterQueue (hMutex);
  1522. #endif
  1523. stat = osOK;
  1524. vSemaphoreDelete (hMutex);
  1525. }
  1526. #else
  1527. stat = osError;
  1528. #endif
  1529. /* Return execution status */
  1530. return (stat);
  1531. }
  1532. #endif /* (configUSE_OS2_MUTEX == 1) */
  1533. /* ==== Semaphore Management Functions ==== */
  1534. /*
  1535. Create and Initialize a Semaphore object.
  1536. */
  1537. osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
  1538. SemaphoreHandle_t hSemaphore;
  1539. int32_t mem;
  1540. hSemaphore = NULL;
  1541. if ((IRQ_Context() == 0U) && (max_count > 0U) && (initial_count <= max_count)) {
  1542. mem = -1;
  1543. if (attr != NULL) {
  1544. if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticSemaphore_t))) {
  1545. /* The memory for control block is provided, use static object */
  1546. mem = 1;
  1547. }
  1548. else {
  1549. if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
  1550. /* Control block will be allocated from the dynamic pool */
  1551. mem = 0;
  1552. }
  1553. }
  1554. }
  1555. else {
  1556. mem = 0;
  1557. }
  1558. if (mem != -1) {
  1559. if (max_count == 1U) {
  1560. if (mem == 1) {
  1561. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  1562. hSemaphore = xSemaphoreCreateBinaryStatic ((StaticSemaphore_t *)attr->cb_mem);
  1563. #endif
  1564. }
  1565. else {
  1566. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  1567. hSemaphore = xSemaphoreCreateBinary();
  1568. #endif
  1569. }
  1570. if ((hSemaphore != NULL) && (initial_count != 0U)) {
  1571. if (xSemaphoreGive (hSemaphore) != pdPASS) {
  1572. vSemaphoreDelete (hSemaphore);
  1573. hSemaphore = NULL;
  1574. }
  1575. }
  1576. }
  1577. else {
  1578. if (mem == 1) {
  1579. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  1580. hSemaphore = xSemaphoreCreateCountingStatic (max_count, initial_count, (StaticSemaphore_t *)attr->cb_mem);
  1581. #endif
  1582. }
  1583. else {
  1584. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  1585. hSemaphore = xSemaphoreCreateCounting (max_count, initial_count);
  1586. #endif
  1587. }
  1588. }
  1589. #if (configQUEUE_REGISTRY_SIZE > 0)
  1590. if (hSemaphore != NULL) {
  1591. if ((attr != NULL) && (attr->name != NULL)) {
  1592. /* Only non-NULL name objects are added to the Queue Registry */
  1593. vQueueAddToRegistry (hSemaphore, attr->name);
  1594. }
  1595. }
  1596. #endif
  1597. }
  1598. }
  1599. /* Return semaphore ID */
  1600. return ((osSemaphoreId_t)hSemaphore);
  1601. }
  1602. /*
  1603. Acquire a Semaphore token or timeout if no tokens are available.
  1604. */
  1605. osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
  1606. SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
  1607. osStatus_t stat;
  1608. BaseType_t yield;
  1609. stat = osOK;
  1610. if (hSemaphore == NULL) {
  1611. stat = osErrorParameter;
  1612. }
  1613. else if (IRQ_Context() != 0U) {
  1614. if (timeout != 0U) {
  1615. stat = osErrorParameter;
  1616. }
  1617. else {
  1618. yield = pdFALSE;
  1619. if (xSemaphoreTakeFromISR (hSemaphore, &yield) != pdPASS) {
  1620. stat = osErrorResource;
  1621. } else {
  1622. portYIELD_FROM_ISR (yield);
  1623. }
  1624. }
  1625. }
  1626. else {
  1627. if (xSemaphoreTake (hSemaphore, (TickType_t)timeout) != pdPASS) {
  1628. if (timeout != 0U) {
  1629. stat = osErrorTimeout;
  1630. } else {
  1631. stat = osErrorResource;
  1632. }
  1633. }
  1634. }
  1635. /* Return execution status */
  1636. return (stat);
  1637. }
  1638. /*
  1639. Release a Semaphore token up to the initial maximum count.
  1640. */
  1641. osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
  1642. SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
  1643. osStatus_t stat;
  1644. BaseType_t yield;
  1645. stat = osOK;
  1646. if (hSemaphore == NULL) {
  1647. stat = osErrorParameter;
  1648. }
  1649. else if (IRQ_Context() != 0U) {
  1650. yield = pdFALSE;
  1651. if (xSemaphoreGiveFromISR (hSemaphore, &yield) != pdTRUE) {
  1652. stat = osErrorResource;
  1653. } else {
  1654. portYIELD_FROM_ISR (yield);
  1655. }
  1656. }
  1657. else {
  1658. if (xSemaphoreGive (hSemaphore) != pdPASS) {
  1659. stat = osErrorResource;
  1660. }
  1661. }
  1662. /* Return execution status */
  1663. return (stat);
  1664. }
  1665. /*
  1666. Get current Semaphore token count.
  1667. */
  1668. uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
  1669. SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
  1670. uint32_t count;
  1671. if (hSemaphore == NULL) {
  1672. count = 0U;
  1673. }
  1674. else if (IRQ_Context() != 0U) {
  1675. count = (uint32_t)uxSemaphoreGetCountFromISR (hSemaphore);
  1676. } else {
  1677. count = (uint32_t)uxSemaphoreGetCount (hSemaphore);
  1678. }
  1679. /* Return number of tokens */
  1680. return (count);
  1681. }
  1682. /*
  1683. Delete a Semaphore object.
  1684. */
  1685. osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
  1686. SemaphoreHandle_t hSemaphore = (SemaphoreHandle_t)semaphore_id;
  1687. osStatus_t stat;
  1688. #ifndef USE_FreeRTOS_HEAP_1
  1689. if (IRQ_Context() != 0U) {
  1690. stat = osErrorISR;
  1691. }
  1692. else if (hSemaphore == NULL) {
  1693. stat = osErrorParameter;
  1694. }
  1695. else {
  1696. #if (configQUEUE_REGISTRY_SIZE > 0)
  1697. vQueueUnregisterQueue (hSemaphore);
  1698. #endif
  1699. stat = osOK;
  1700. vSemaphoreDelete (hSemaphore);
  1701. }
  1702. #else
  1703. stat = osError;
  1704. #endif
  1705. /* Return execution status */
  1706. return (stat);
  1707. }
  1708. /* ==== Message Queue Management Functions ==== */
  1709. /*
  1710. Create and Initialize a Message Queue object.
  1711. Limitations:
  1712. - The memory for control block and and message data must be provided in the
  1713. osThreadAttr_t structure in order to allocate object statically.
  1714. */
  1715. osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) {
  1716. QueueHandle_t hQueue;
  1717. int32_t mem;
  1718. hQueue = NULL;
  1719. if ((IRQ_Context() == 0U) && (msg_count > 0U) && (msg_size > 0U)) {
  1720. mem = -1;
  1721. if (attr != NULL) {
  1722. if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(StaticQueue_t)) &&
  1723. (attr->mq_mem != NULL) && (attr->mq_size >= (msg_count * msg_size))) {
  1724. /* The memory for control block and message data is provided, use static object */
  1725. mem = 1;
  1726. }
  1727. else {
  1728. if ((attr->cb_mem == NULL) && (attr->cb_size == 0U) &&
  1729. (attr->mq_mem == NULL) && (attr->mq_size == 0U)) {
  1730. /* Control block will be allocated from the dynamic pool */
  1731. mem = 0;
  1732. }
  1733. }
  1734. }
  1735. else {
  1736. mem = 0;
  1737. }
  1738. if (mem == 1) {
  1739. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  1740. hQueue = xQueueCreateStatic (msg_count, msg_size, attr->mq_mem, attr->cb_mem);
  1741. #endif
  1742. }
  1743. else {
  1744. if (mem == 0) {
  1745. #if (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  1746. hQueue = xQueueCreate (msg_count, msg_size);
  1747. #endif
  1748. }
  1749. }
  1750. #if (configQUEUE_REGISTRY_SIZE > 0)
  1751. if (hQueue != NULL) {
  1752. if ((attr != NULL) && (attr->name != NULL)) {
  1753. /* Only non-NULL name objects are added to the Queue Registry */
  1754. vQueueAddToRegistry (hQueue, attr->name);
  1755. }
  1756. }
  1757. #endif
  1758. }
  1759. /* Return message queue ID */
  1760. return ((osMessageQueueId_t)hQueue);
  1761. }
  1762. /*
  1763. Put a Message into a Queue or timeout if Queue is full.
  1764. Limitations:
  1765. - Message priority is ignored
  1766. */
  1767. osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uint8_t msg_prio, uint32_t timeout) {
  1768. QueueHandle_t hQueue = (QueueHandle_t)mq_id;
  1769. osStatus_t stat;
  1770. BaseType_t yield;
  1771. (void)msg_prio; /* Message priority is ignored */
  1772. stat = osOK;
  1773. if (IRQ_Context() != 0U) {
  1774. if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
  1775. stat = osErrorParameter;
  1776. }
  1777. else {
  1778. yield = pdFALSE;
  1779. if (xQueueSendToBackFromISR (hQueue, msg_ptr, &yield) != pdTRUE) {
  1780. stat = osErrorResource;
  1781. } else {
  1782. portYIELD_FROM_ISR (yield);
  1783. }
  1784. }
  1785. }
  1786. else {
  1787. if ((hQueue == NULL) || (msg_ptr == NULL)) {
  1788. stat = osErrorParameter;
  1789. }
  1790. else {
  1791. if (xQueueSendToBack (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
  1792. if (timeout != 0U) {
  1793. stat = osErrorTimeout;
  1794. } else {
  1795. stat = osErrorResource;
  1796. }
  1797. }
  1798. }
  1799. }
  1800. /* Return execution status */
  1801. return (stat);
  1802. }
  1803. /*
  1804. Get a Message from a Queue or timeout if Queue is empty.
  1805. Limitations:
  1806. - Message priority is ignored
  1807. */
  1808. osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *msg_prio, uint32_t timeout) {
  1809. QueueHandle_t hQueue = (QueueHandle_t)mq_id;
  1810. osStatus_t stat;
  1811. BaseType_t yield;
  1812. (void)msg_prio; /* Message priority is ignored */
  1813. stat = osOK;
  1814. if (IRQ_Context() != 0U) {
  1815. if ((hQueue == NULL) || (msg_ptr == NULL) || (timeout != 0U)) {
  1816. stat = osErrorParameter;
  1817. }
  1818. else {
  1819. yield = pdFALSE;
  1820. if (xQueueReceiveFromISR (hQueue, msg_ptr, &yield) != pdPASS) {
  1821. stat = osErrorResource;
  1822. } else {
  1823. portYIELD_FROM_ISR (yield);
  1824. }
  1825. }
  1826. }
  1827. else {
  1828. if ((hQueue == NULL) || (msg_ptr == NULL)) {
  1829. stat = osErrorParameter;
  1830. }
  1831. else {
  1832. if (xQueueReceive (hQueue, msg_ptr, (TickType_t)timeout) != pdPASS) {
  1833. if (timeout != 0U) {
  1834. stat = osErrorTimeout;
  1835. } else {
  1836. stat = osErrorResource;
  1837. }
  1838. }
  1839. }
  1840. }
  1841. /* Return execution status */
  1842. return (stat);
  1843. }
  1844. /*
  1845. Get maximum number of messages in a Message Queue.
  1846. */
  1847. uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
  1848. StaticQueue_t *mq = (StaticQueue_t *)mq_id;
  1849. uint32_t capacity;
  1850. if (mq == NULL) {
  1851. capacity = 0U;
  1852. } else {
  1853. /* capacity = pxQueue->uxLength */
  1854. capacity = mq->uxDummy4[1];
  1855. }
  1856. /* Return maximum number of messages */
  1857. return (capacity);
  1858. }
  1859. /*
  1860. Get maximum message size in a Message Queue.
  1861. */
  1862. uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
  1863. StaticQueue_t *mq = (StaticQueue_t *)mq_id;
  1864. uint32_t size;
  1865. if (mq == NULL) {
  1866. size = 0U;
  1867. } else {
  1868. /* size = pxQueue->uxItemSize */
  1869. size = mq->uxDummy4[2];
  1870. }
  1871. /* Return maximum message size */
  1872. return (size);
  1873. }
  1874. /*
  1875. Get number of queued messages in a Message Queue.
  1876. */
  1877. uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
  1878. QueueHandle_t hQueue = (QueueHandle_t)mq_id;
  1879. UBaseType_t count;
  1880. if (hQueue == NULL) {
  1881. count = 0U;
  1882. }
  1883. else if (IRQ_Context() != 0U) {
  1884. count = uxQueueMessagesWaitingFromISR (hQueue);
  1885. }
  1886. else {
  1887. count = uxQueueMessagesWaiting (hQueue);
  1888. }
  1889. /* Return number of queued messages */
  1890. return ((uint32_t)count);
  1891. }
  1892. /*
  1893. Get number of available slots for messages in a Message Queue.
  1894. */
  1895. uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) {
  1896. StaticQueue_t *mq = (StaticQueue_t *)mq_id;
  1897. uint32_t space;
  1898. uint32_t isrm;
  1899. if (mq == NULL) {
  1900. space = 0U;
  1901. }
  1902. else if (IRQ_Context() != 0U) {
  1903. isrm = taskENTER_CRITICAL_FROM_ISR();
  1904. /* space = pxQueue->uxLength - pxQueue->uxMessagesWaiting; */
  1905. space = mq->uxDummy4[1] - mq->uxDummy4[0];
  1906. taskEXIT_CRITICAL_FROM_ISR(isrm);
  1907. }
  1908. else {
  1909. space = (uint32_t)uxQueueSpacesAvailable ((QueueHandle_t)mq);
  1910. }
  1911. /* Return number of available slots */
  1912. return (space);
  1913. }
  1914. /*
  1915. Reset a Message Queue to initial empty state.
  1916. */
  1917. osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) {
  1918. QueueHandle_t hQueue = (QueueHandle_t)mq_id;
  1919. osStatus_t stat;
  1920. if (IRQ_Context() != 0U) {
  1921. stat = osErrorISR;
  1922. }
  1923. else if (hQueue == NULL) {
  1924. stat = osErrorParameter;
  1925. }
  1926. else {
  1927. stat = osOK;
  1928. (void)xQueueReset (hQueue);
  1929. }
  1930. /* Return execution status */
  1931. return (stat);
  1932. }
  1933. /*
  1934. Delete a Message Queue object.
  1935. */
  1936. osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) {
  1937. QueueHandle_t hQueue = (QueueHandle_t)mq_id;
  1938. osStatus_t stat;
  1939. #ifndef USE_FreeRTOS_HEAP_1
  1940. if (IRQ_Context() != 0U) {
  1941. stat = osErrorISR;
  1942. }
  1943. else if (hQueue == NULL) {
  1944. stat = osErrorParameter;
  1945. }
  1946. else {
  1947. #if (configQUEUE_REGISTRY_SIZE > 0)
  1948. vQueueUnregisterQueue (hQueue);
  1949. #endif
  1950. stat = osOK;
  1951. vQueueDelete (hQueue);
  1952. }
  1953. #else
  1954. stat = osError;
  1955. #endif
  1956. /* Return execution status */
  1957. return (stat);
  1958. }
  1959. /* ==== Memory Pool Management Functions ==== */
  1960. #ifdef FREERTOS_MPOOL_H_
  1961. /* Static memory pool functions */
  1962. static void FreeBlock (MemPool_t *mp, void *block);
  1963. static void *AllocBlock (MemPool_t *mp);
  1964. static void *CreateBlock (MemPool_t *mp);
  1965. /*
  1966. Create and Initialize a Memory Pool object.
  1967. */
  1968. osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
  1969. MemPool_t *mp;
  1970. const char *name;
  1971. int32_t mem_cb, mem_mp;
  1972. uint32_t sz;
  1973. if (IRQ_Context() != 0U) {
  1974. mp = NULL;
  1975. }
  1976. else if ((block_count == 0U) || (block_size == 0U)) {
  1977. mp = NULL;
  1978. }
  1979. else {
  1980. mp = NULL;
  1981. sz = MEMPOOL_ARR_SIZE (block_count, block_size);
  1982. name = NULL;
  1983. mem_cb = -1;
  1984. mem_mp = -1;
  1985. if (attr != NULL) {
  1986. if (attr->name != NULL) {
  1987. name = attr->name;
  1988. }
  1989. if ((attr->cb_mem != NULL) && (attr->cb_size >= sizeof(MemPool_t))) {
  1990. /* Static control block is provided */
  1991. mem_cb = 1;
  1992. }
  1993. else if ((attr->cb_mem == NULL) && (attr->cb_size == 0U)) {
  1994. /* Allocate control block memory on heap */
  1995. mem_cb = 0;
  1996. }
  1997. if ((attr->mp_mem == NULL) && (attr->mp_size == 0U)) {
  1998. /* Allocate memory array on heap */
  1999. mem_mp = 0;
  2000. }
  2001. else {
  2002. if (attr->mp_mem != NULL) {
  2003. /* Check if array is 4-byte aligned */
  2004. if (((uint32_t)attr->mp_mem & 3U) == 0U) {
  2005. /* Check if array big enough */
  2006. if (attr->mp_size >= sz) {
  2007. /* Static memory pool array is provided */
  2008. mem_mp = 1;
  2009. }
  2010. }
  2011. }
  2012. }
  2013. }
  2014. else {
  2015. /* Attributes not provided, allocate memory on heap */
  2016. mem_cb = 0;
  2017. mem_mp = 0;
  2018. }
  2019. if (mem_cb == 0) {
  2020. mp = pvPortMalloc (sizeof(MemPool_t));
  2021. } else {
  2022. mp = attr->cb_mem;
  2023. }
  2024. if (mp != NULL) {
  2025. /* Create a semaphore (max count == initial count == block_count) */
  2026. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  2027. mp->sem = xSemaphoreCreateCountingStatic (block_count, block_count, &mp->mem_sem);
  2028. #elif (configSUPPORT_DYNAMIC_ALLOCATION == 1)
  2029. mp->sem = xSemaphoreCreateCounting (block_count, block_count);
  2030. #else
  2031. mp->sem = NULL;
  2032. #endif
  2033. if (mp->sem != NULL) {
  2034. /* Setup memory array */
  2035. if (mem_mp == 0) {
  2036. mp->mem_arr = pvPortMalloc (sz);
  2037. } else {
  2038. mp->mem_arr = attr->mp_mem;
  2039. }
  2040. }
  2041. }
  2042. if ((mp != NULL) && (mp->mem_arr != NULL)) {
  2043. /* Memory pool can be created */
  2044. mp->head = NULL;
  2045. mp->mem_sz = sz;
  2046. mp->name = name;
  2047. mp->bl_sz = block_size;
  2048. mp->bl_cnt = block_count;
  2049. mp->n = 0U;
  2050. /* Set heap allocated memory flags */
  2051. mp->status = MPOOL_STATUS;
  2052. if (mem_cb == 0) {
  2053. /* Control block on heap */
  2054. mp->status |= 1U;
  2055. }
  2056. if (mem_mp == 0) {
  2057. /* Memory array on heap */
  2058. mp->status |= 2U;
  2059. }
  2060. }
  2061. else {
  2062. /* Memory pool cannot be created, release allocated resources */
  2063. if ((mem_cb == 0) && (mp != NULL)) {
  2064. /* Free control block memory */
  2065. vPortFree (mp);
  2066. }
  2067. mp = NULL;
  2068. }
  2069. }
  2070. /* Return memory pool ID */
  2071. return (mp);
  2072. }
  2073. /*
  2074. Get name of a Memory Pool object.
  2075. */
  2076. const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) {
  2077. MemPool_t *mp = (osMemoryPoolId_t)mp_id;
  2078. const char *p;
  2079. if (IRQ_Context() != 0U) {
  2080. p = NULL;
  2081. }
  2082. else if (mp_id == NULL) {
  2083. p = NULL;
  2084. }
  2085. else {
  2086. p = mp->name;
  2087. }
  2088. /* Return name as null-terminated string */
  2089. return (p);
  2090. }
  2091. /*
  2092. Allocate a memory block from a Memory Pool.
  2093. */
  2094. void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
  2095. MemPool_t *mp;
  2096. void *block;
  2097. uint32_t isrm;
  2098. if (mp_id == NULL) {
  2099. /* Invalid input parameters */
  2100. block = NULL;
  2101. }
  2102. else {
  2103. block = NULL;
  2104. mp = (MemPool_t *)mp_id;
  2105. if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
  2106. if (IRQ_Context() != 0U) {
  2107. if (timeout == 0U) {
  2108. if (xSemaphoreTakeFromISR (mp->sem, NULL) == pdTRUE) {
  2109. if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
  2110. isrm = taskENTER_CRITICAL_FROM_ISR();
  2111. /* Get a block from the free-list */
  2112. block = AllocBlock(mp);
  2113. if (block == NULL) {
  2114. /* List of free blocks is empty, 'create' new block */
  2115. block = CreateBlock(mp);
  2116. }
  2117. taskEXIT_CRITICAL_FROM_ISR(isrm);
  2118. }
  2119. }
  2120. }
  2121. }
  2122. else {
  2123. if (xSemaphoreTake (mp->sem, (TickType_t)timeout) == pdTRUE) {
  2124. if ((mp->status & MPOOL_STATUS) == MPOOL_STATUS) {
  2125. taskENTER_CRITICAL();
  2126. /* Get a block from the free-list */
  2127. block = AllocBlock(mp);
  2128. if (block == NULL) {
  2129. /* List of free blocks is empty, 'create' new block */
  2130. block = CreateBlock(mp);
  2131. }
  2132. taskEXIT_CRITICAL();
  2133. }
  2134. }
  2135. }
  2136. }
  2137. }
  2138. /* Return memory block address */
  2139. return (block);
  2140. }
  2141. /*
  2142. Return an allocated memory block back to a Memory Pool.
  2143. */
  2144. osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
  2145. MemPool_t *mp;
  2146. osStatus_t stat;
  2147. uint32_t isrm;
  2148. BaseType_t yield;
  2149. if ((mp_id == NULL) || (block == NULL)) {
  2150. /* Invalid input parameters */
  2151. stat = osErrorParameter;
  2152. }
  2153. else {
  2154. mp = (MemPool_t *)mp_id;
  2155. if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
  2156. /* Invalid object status */
  2157. stat = osErrorResource;
  2158. }
  2159. else if ((block < (void *)&mp->mem_arr[0]) || (block > (void*)&mp->mem_arr[mp->mem_sz-1])) {
  2160. /* Block pointer outside of memory array area */
  2161. stat = osErrorParameter;
  2162. }
  2163. else {
  2164. stat = osOK;
  2165. if (IRQ_Context() != 0U) {
  2166. if (uxSemaphoreGetCountFromISR (mp->sem) == mp->bl_cnt) {
  2167. stat = osErrorResource;
  2168. }
  2169. else {
  2170. isrm = taskENTER_CRITICAL_FROM_ISR();
  2171. /* Add block to the list of free blocks */
  2172. FreeBlock(mp, block);
  2173. taskEXIT_CRITICAL_FROM_ISR(isrm);
  2174. yield = pdFALSE;
  2175. xSemaphoreGiveFromISR (mp->sem, &yield);
  2176. portYIELD_FROM_ISR (yield);
  2177. }
  2178. }
  2179. else {
  2180. if (uxSemaphoreGetCount (mp->sem) == mp->bl_cnt) {
  2181. stat = osErrorResource;
  2182. }
  2183. else {
  2184. taskENTER_CRITICAL();
  2185. /* Add block to the list of free blocks */
  2186. FreeBlock(mp, block);
  2187. taskEXIT_CRITICAL();
  2188. xSemaphoreGive (mp->sem);
  2189. }
  2190. }
  2191. }
  2192. }
  2193. /* Return execution status */
  2194. return (stat);
  2195. }
  2196. /*
  2197. Get maximum number of memory blocks in a Memory Pool.
  2198. */
  2199. uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
  2200. MemPool_t *mp;
  2201. uint32_t n;
  2202. if (mp_id == NULL) {
  2203. /* Invalid input parameters */
  2204. n = 0U;
  2205. }
  2206. else {
  2207. mp = (MemPool_t *)mp_id;
  2208. if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
  2209. /* Invalid object status */
  2210. n = 0U;
  2211. }
  2212. else {
  2213. n = mp->bl_cnt;
  2214. }
  2215. }
  2216. /* Return maximum number of memory blocks */
  2217. return (n);
  2218. }
  2219. /*
  2220. Get memory block size in a Memory Pool.
  2221. */
  2222. uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
  2223. MemPool_t *mp;
  2224. uint32_t sz;
  2225. if (mp_id == NULL) {
  2226. /* Invalid input parameters */
  2227. sz = 0U;
  2228. }
  2229. else {
  2230. mp = (MemPool_t *)mp_id;
  2231. if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
  2232. /* Invalid object status */
  2233. sz = 0U;
  2234. }
  2235. else {
  2236. sz = mp->bl_sz;
  2237. }
  2238. }
  2239. /* Return memory block size in bytes */
  2240. return (sz);
  2241. }
  2242. /*
  2243. Get number of memory blocks used in a Memory Pool.
  2244. */
  2245. uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
  2246. MemPool_t *mp;
  2247. uint32_t n;
  2248. if (mp_id == NULL) {
  2249. /* Invalid input parameters */
  2250. n = 0U;
  2251. }
  2252. else {
  2253. mp = (MemPool_t *)mp_id;
  2254. if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
  2255. /* Invalid object status */
  2256. n = 0U;
  2257. }
  2258. else {
  2259. if (IRQ_Context() != 0U) {
  2260. n = uxSemaphoreGetCountFromISR (mp->sem);
  2261. } else {
  2262. n = uxSemaphoreGetCount (mp->sem);
  2263. }
  2264. n = mp->bl_cnt - n;
  2265. }
  2266. }
  2267. /* Return number of memory blocks used */
  2268. return (n);
  2269. }
  2270. /*
  2271. Get number of memory blocks available in a Memory Pool.
  2272. */
  2273. uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
  2274. MemPool_t *mp;
  2275. uint32_t n;
  2276. if (mp_id == NULL) {
  2277. /* Invalid input parameters */
  2278. n = 0U;
  2279. }
  2280. else {
  2281. mp = (MemPool_t *)mp_id;
  2282. if ((mp->status & MPOOL_STATUS) != MPOOL_STATUS) {
  2283. /* Invalid object status */
  2284. n = 0U;
  2285. }
  2286. else {
  2287. if (IRQ_Context() != 0U) {
  2288. n = uxSemaphoreGetCountFromISR (mp->sem);
  2289. } else {
  2290. n = uxSemaphoreGetCount (mp->sem);
  2291. }
  2292. }
  2293. }
  2294. /* Return number of memory blocks available */
  2295. return (n);
  2296. }
  2297. /*
  2298. Delete a Memory Pool object.
  2299. */
  2300. osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) {
  2301. MemPool_t *mp;
  2302. osStatus_t stat;
  2303. if (mp_id == NULL) {
  2304. /* Invalid input parameters */
  2305. stat = osErrorParameter;
  2306. }
  2307. else if (IRQ_Context() != 0U) {
  2308. stat = osErrorISR;
  2309. }
  2310. else {
  2311. mp = (MemPool_t *)mp_id;
  2312. taskENTER_CRITICAL();
  2313. /* Invalidate control block status */
  2314. mp->status = mp->status & 3U;
  2315. /* Wake-up tasks waiting for pool semaphore */
  2316. while (xSemaphoreGive (mp->sem) == pdTRUE);
  2317. mp->head = NULL;
  2318. mp->bl_sz = 0U;
  2319. mp->bl_cnt = 0U;
  2320. if ((mp->status & 2U) != 0U) {
  2321. /* Memory pool array allocated on heap */
  2322. vPortFree (mp->mem_arr);
  2323. }
  2324. if ((mp->status & 1U) != 0U) {
  2325. /* Memory pool control block allocated on heap */
  2326. vPortFree (mp);
  2327. }
  2328. taskEXIT_CRITICAL();
  2329. stat = osOK;
  2330. }
  2331. /* Return execution status */
  2332. return (stat);
  2333. }
  2334. /*
  2335. Create new block given according to the current block index.
  2336. */
  2337. static void *CreateBlock (MemPool_t *mp) {
  2338. MemPoolBlock_t *p = NULL;
  2339. if (mp->n < mp->bl_cnt) {
  2340. /* Unallocated blocks exist, set pointer to new block */
  2341. p = (void *)(mp->mem_arr + (mp->bl_sz * mp->n));
  2342. /* Increment block index */
  2343. mp->n += 1U;
  2344. }
  2345. return (p);
  2346. }
  2347. /*
  2348. Allocate a block by reading the list of free blocks.
  2349. */
  2350. static void *AllocBlock (MemPool_t *mp) {
  2351. MemPoolBlock_t *p = NULL;
  2352. if (mp->head != NULL) {
  2353. /* List of free block exists, get head block */
  2354. p = mp->head;
  2355. /* Head block is now next on the list */
  2356. mp->head = p->next;
  2357. }
  2358. return (p);
  2359. }
  2360. /*
  2361. Free block by putting it to the list of free blocks.
  2362. */
  2363. static void FreeBlock (MemPool_t *mp, void *block) {
  2364. MemPoolBlock_t *p = block;
  2365. /* Store current head into block memory space */
  2366. p->next = mp->head;
  2367. /* Store current block as new head */
  2368. mp->head = p;
  2369. }
  2370. #endif /* FREERTOS_MPOOL_H_ */
  2371. /*---------------------------------------------------------------------------*/
  2372. /* Callback function prototypes */
  2373. extern void vApplicationIdleHook (void);
  2374. extern void vApplicationMallocFailedHook (void);
  2375. extern void vApplicationDaemonTaskStartupHook (void);
  2376. /**
  2377. Dummy implementation of the callback function vApplicationIdleHook().
  2378. */
  2379. #if (configUSE_IDLE_HOOK == 1)
  2380. __WEAK void vApplicationIdleHook (void){}
  2381. #endif
  2382. /**
  2383. Dummy implementation of the callback function vApplicationTickHook().
  2384. */
  2385. #if (configUSE_TICK_HOOK == 1)
  2386. __WEAK void vApplicationTickHook (void){}
  2387. #endif
  2388. /**
  2389. Dummy implementation of the callback function vApplicationMallocFailedHook().
  2390. */
  2391. #if (configUSE_MALLOC_FAILED_HOOK == 1)
  2392. __WEAK void vApplicationMallocFailedHook (void) {
  2393. /* Assert when malloc failed hook is enabled but no application defined function exists */
  2394. configASSERT(0);
  2395. }
  2396. #endif
  2397. /**
  2398. Dummy implementation of the callback function vApplicationDaemonTaskStartupHook().
  2399. */
  2400. #if (configUSE_DAEMON_TASK_STARTUP_HOOK == 1)
  2401. __WEAK void vApplicationDaemonTaskStartupHook (void){}
  2402. #endif
  2403. /**
  2404. Dummy implementation of the callback function vApplicationStackOverflowHook().
  2405. */
  2406. #if (configCHECK_FOR_STACK_OVERFLOW > 0)
  2407. __WEAK void vApplicationStackOverflowHook (TaskHandle_t xTask, char *pcTaskName) {
  2408. (void)xTask;
  2409. (void)pcTaskName;
  2410. /* Assert when stack overflow is enabled but no application defined function exists */
  2411. configASSERT(0);
  2412. }
  2413. #endif
  2414. /*---------------------------------------------------------------------------*/
  2415. #if (configSUPPORT_STATIC_ALLOCATION == 1)
  2416. /*
  2417. vApplicationGetIdleTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
  2418. equals to 1 and is required for static memory allocation support.
  2419. */
  2420. __WEAK void vApplicationGetIdleTaskMemory (StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize) {
  2421. /* Idle task control block and stack */
  2422. static StaticTask_t Idle_TCB;
  2423. static StackType_t Idle_Stack[configMINIMAL_STACK_SIZE];
  2424. *ppxIdleTaskTCBBuffer = &Idle_TCB;
  2425. *ppxIdleTaskStackBuffer = &Idle_Stack[0];
  2426. *pulIdleTaskStackSize = (uint32_t)configMINIMAL_STACK_SIZE;
  2427. }
  2428. /*
  2429. vApplicationGetTimerTaskMemory gets called when configSUPPORT_STATIC_ALLOCATION
  2430. equals to 1 and is required for static memory allocation support.
  2431. */
  2432. __WEAK void vApplicationGetTimerTaskMemory (StaticTask_t **ppxTimerTaskTCBBuffer, StackType_t **ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize) {
  2433. /* Timer task control block and stack */
  2434. static StaticTask_t Timer_TCB;
  2435. static StackType_t Timer_Stack[configTIMER_TASK_STACK_DEPTH];
  2436. *ppxTimerTaskTCBBuffer = &Timer_TCB;
  2437. *ppxTimerTaskStackBuffer = &Timer_Stack[0];
  2438. *pulTimerTaskStackSize = (uint32_t)configTIMER_TASK_STACK_DEPTH;
  2439. }
  2440. #endif