diff options
| author | jacqueline <me@jacqueline.id.au> | 2024-02-01 10:54:20 +1100 |
|---|---|---|
| committer | jacqueline <me@jacqueline.id.au> | 2024-02-01 10:54:20 +1100 |
| commit | dad159dc3c4ebc89d395b4b695a0626b7be10578 (patch) | |
| tree | e46c9b00f6a2879088b164347f9f35a5636ddad6 /src/tasks | |
| parent | fde45dba39152064add0379a8ae68b905adff9b9 (diff) | |
| download | tangara-fw-dad159dc3c4ebc89d395b4b695a0626b7be10578.tar.gz | |
Use a single pool of bg workers instead of separate tasks per use case
Also: bump the number of workers tasks up to 3 from 2!
This makes bg db updates + playback work :)
Diffstat (limited to 'src/tasks')
| -rw-r--r-- | src/tasks/tasks.cpp | 119 | ||||
| -rw-r--r-- | src/tasks/tasks.hpp | 44 |
2 files changed, 47 insertions, 116 deletions
diff --git a/src/tasks/tasks.cpp b/src/tasks/tasks.cpp index 981ee20a..ea174039 100644 --- a/src/tasks/tasks.cpp +++ b/src/tasks/tasks.cpp @@ -31,14 +31,6 @@ template <> auto Name<Type::kAudioConverter>() -> std::pmr::string { return "audio_conv"; } -template <> -auto Name<Type::kDatabase>() -> std::pmr::string { - return "db_fg"; -} -template <> -auto Name<Type::kBackgroundWorker>() -> std::pmr::string { - return "bg_worker"; -} template <Type t> auto AllocateStack() -> cpp::span<StackType_t>; @@ -68,14 +60,10 @@ auto AllocateStack<Type::kAudioConverter>() -> cpp::span<StackType_t> { static StackType_t sStack[size]; return {sStack, size}; } -// Leveldb is designed for non-embedded use cases, where stack space isn't so -// much of a concern. It therefore uses an eye-wateringly large amount of stack. -template <> -auto AllocateStack<Type::kDatabase>() -> cpp::span<StackType_t> { - std::size_t size = 256 * 1024; - return {static_cast<StackType_t*>(heap_caps_malloc(size, MALLOC_CAP_SPIRAM)), - size}; -} +// Background workers receive huge stacks in PSRAM. This is mostly to faciliate +// use of LevelDB from any bg worker; Leveldb is designed for non-embedded use +// cases, where large stack usage isn't so much of a concern. It therefore uses +// an eye-wateringly large amount of stack. template <> auto AllocateStack<Type::kBackgroundWorker>() -> cpp::span<StackType_t> { std::size_t size = 256 * 1024; @@ -115,26 +103,10 @@ auto Priority<Type::kUi>() -> UBaseType_t { // couple of ms extra delay due to scheduling, so give this task the lowest // priority. template <> -auto Priority<Type::kDatabase>() -> UBaseType_t { - return 2; -} -template <> auto Priority<Type::kBackgroundWorker>() -> UBaseType_t { return 1; } -template <Type t> -auto WorkerQueueSize() -> std::size_t; - -template <> -auto WorkerQueueSize<Type::kDatabase>() -> std::size_t { - return 8; -} -template <> -auto WorkerQueueSize<Type::kBackgroundWorker>() -> std::size_t { - return 8; -} - auto PersistentMain(void* fn) -> void { auto* function = reinterpret_cast<std::function<void(void)>*>(fn); std::invoke(*function); @@ -142,69 +114,50 @@ auto PersistentMain(void* fn) -> void { vTaskDelete(NULL); } -auto Worker::Main(void* instance) { - Worker* i = reinterpret_cast<Worker*>(instance); +auto WorkerPool::Main(void* q) { + QueueHandle_t queue = reinterpret_cast<QueueHandle_t>(q); while (1) { WorkItem item; - if (xQueueReceive(i->queue_, &item, portMAX_DELAY)) { - if (item.quit) { - break; - } else if (item.fn != nullptr) { - std::invoke(*item.fn); - delete item.fn; - } + if (xQueueReceive(queue, &item, portMAX_DELAY)) { + std::invoke(*item); + delete item; } } - i->is_task_running_.store(false); - i->is_task_running_.notify_all(); - // Wait for the instance's destructor to delete this task. We do this instead - // of just deleting ourselves so that it's 100% certain that it's safe to - // delete or reuse this task's stack. - while (1) { - vTaskDelay(portMAX_DELAY); +} + +static constexpr size_t kNumWorkers = 3; +static constexpr size_t kMaxPendingItems = 8; + +WorkerPool::WorkerPool() + : queue_(xQueueCreate(kMaxPendingItems, sizeof(WorkItem))) { + for (size_t i = 0; i < kNumWorkers; i++) { + auto stack = AllocateStack<Type::kBackgroundWorker>(); + // Task buffers must be in internal ram. Thankfully they're fairly small. + auto buffer = reinterpret_cast<StaticTask_t*>(heap_caps_malloc( + sizeof(StaticTask_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)); + + std::string name = "worker_" + std::to_string(i); + + xTaskCreateStatic(&Main, name.c_str(), stack.size(), queue_, + Priority<Type::kBackgroundWorker>(), stack.data(), + buffer); } } -Worker::Worker(const std::pmr::string& name, - cpp::span<StackType_t> stack, - std::size_t queue_size, - UBaseType_t priority) - : stack_(stack.data()), - queue_(xQueueCreate(queue_size, sizeof(WorkItem))), - is_task_running_(true), - task_buffer_(static_cast<StaticTask_t*>( - heap_caps_malloc(sizeof(StaticTask_t), - MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT))), - task_(xTaskCreateStatic(&Main, - name.c_str(), - stack.size(), - this, - priority, - stack_, - task_buffer_)) {} - -Worker::~Worker() { - WorkItem item{ - .fn = nullptr, - .quit = true, - }; - xQueueSend(queue_, &item, portMAX_DELAY); - is_task_running_.wait(true); - vTaskDelete(task_); - free(stack_); +WorkerPool::~WorkerPool() { + // This should never happen! + assert("worker pool destroyed" == 0); } template <> -auto Worker::Dispatch(const std::function<void(void)> fn) -> std::future<void> { +auto WorkerPool::Dispatch(const std::function<void(void)> fn) + -> std::future<void> { std::shared_ptr<std::promise<void>> promise = std::make_shared<std::promise<void>>(); - WorkItem item{ - .fn = new std::function<void(void)>([=]() { - std::invoke(fn); - promise->set_value(); - }), - .quit = false, - }; + WorkItem item = new std::function<void(void)>([=]() { + std::invoke(fn); + promise->set_value(); + }); xQueueSend(queue_, &item, portMAX_DELAY); return promise->get_future(); } diff --git a/src/tasks/tasks.hpp b/src/tasks/tasks.hpp index 6a3a0d09..1623a8d8 100644 --- a/src/tasks/tasks.hpp +++ b/src/tasks/tasks.hpp @@ -48,8 +48,6 @@ template <Type t> auto AllocateStack() -> cpp::span<StackType_t>; template <Type t> auto Priority() -> UBaseType_t; -template <Type t> -auto WorkerQueueSize() -> std::size_t; auto PersistentMain(void* fn) -> void; @@ -74,32 +72,15 @@ auto StartPersistent(BaseType_t core, const std::function<void(void)>& fn) Priority<t>(), stack.data(), task_buffer, core); } -class Worker { +class WorkerPool { private: - Worker(const std::pmr::string& name, - cpp::span<StackType_t> stack, - std::size_t queue_size, - UBaseType_t priority); - - StackType_t* stack_; QueueHandle_t queue_; - std::atomic<bool> is_task_running_; - StaticTask_t *task_buffer_; - TaskHandle_t task_; - - struct WorkItem { - std::function<void(void)>* fn; - bool quit; - }; + using WorkItem = std::function<void(void)>*; + static auto Main(void* instance); public: - template <Type t> - static auto Start() -> Worker* { - return new Worker(Name<t>(), AllocateStack<t>(), WorkerQueueSize<t>(), - Priority<t>()); - } - - static auto Main(void* instance); + WorkerPool(); + ~WorkerPool(); /* * Schedules the given function to be executed on the worker task, and @@ -109,22 +90,19 @@ class Worker { auto Dispatch(const std::function<T(void)> fn) -> std::future<T> { std::shared_ptr<std::promise<T>> promise = std::make_shared<std::promise<T>>(); - WorkItem item{ - .fn = new std::function([=]() { promise->set_value(std::invoke(fn)); }), - .quit = false, - }; + WorkItem item = + new std::function([=]() { promise->set_value(std::invoke(fn)); }); xQueueSend(queue_, &item, portMAX_DELAY); return promise->get_future(); } - ~Worker(); - - Worker(const Worker&) = delete; - Worker& operator=(const Worker&) = delete; + WorkerPool(const WorkerPool&) = delete; + WorkerPool& operator=(const WorkerPool&) = delete; }; /* Specialisation of Evaluate for functions that return nothing. */ template <> -auto Worker::Dispatch(const std::function<void(void)> fn) -> std::future<void>; +auto WorkerPool::Dispatch(const std::function<void(void)> fn) + -> std::future<void>; } // namespace tasks |
