summaryrefslogtreecommitdiff
path: root/src/memory
diff options
context:
space:
mode:
Diffstat (limited to 'src/memory')
-rw-r--r--src/memory/CMakeLists.txt2
-rw-r--r--src/memory/arena.cpp75
-rw-r--r--src/memory/include/arena.hpp85
3 files changed, 162 insertions, 0 deletions
diff --git a/src/memory/CMakeLists.txt b/src/memory/CMakeLists.txt
new file mode 100644
index 00000000..67e64267
--- /dev/null
+++ b/src/memory/CMakeLists.txt
@@ -0,0 +1,2 @@
+idf_component_register(SRCS "arena.cpp" INCLUDE_DIRS "include" REQUIRES "span")
+target_compile_options(${COMPONENT_LIB} PRIVATE ${EXTRA_WARNINGS})
diff --git a/src/memory/arena.cpp b/src/memory/arena.cpp
new file mode 100644
index 00000000..450ac4f2
--- /dev/null
+++ b/src/memory/arena.cpp
@@ -0,0 +1,75 @@
+#include "arena.hpp"
+
+#include <cstdint>
+#include <optional>
+
+#include "esp_heap_caps.h"
+#include "freertos/queue.h"
+#include "span.hpp"
+
+namespace memory {
+
+Arena::Arena(std::size_t block_size,
+ std::size_t num_blocks,
+ uint32_t alloc_flags)
+ : block_size_(block_size) {
+ pool_ = static_cast<std::byte*>(
+ heap_caps_malloc(block_size * num_blocks, alloc_flags));
+ free_blocks_ = xQueueCreate(num_blocks, sizeof(void*));
+ for (int i = 0; i < num_blocks; i++) {
+ std::byte* block = pool_ + (i * block_size);
+ xQueueSend(free_blocks_, &block, 0);
+ }
+}
+
+Arena::~Arena() {
+ // TODO: assert queue is full?
+ vQueueDelete(free_blocks_);
+ free(pool_);
+}
+
+auto Arena::Acquire() -> std::optional<ArenaPtr> {
+ std::byte* block;
+ bool result = xQueueReceive(free_blocks_, &block, 0);
+ if (result) {
+ ArenaPtr ptr{this, block, block_size_, 0};
+ return ptr;
+ } else {
+ return {};
+ }
+}
+
+auto Arena::Return(ArenaPtr ptr) -> void {
+ assert(ptr.owner == this);
+ xQueueSend(free_blocks_, &ptr.start, 0);
+}
+
+auto ArenaRef::Acquire(Arena* a) -> std::optional<ArenaRef> {
+ auto ptr = a->Acquire();
+ if (ptr) {
+ ArenaRef ref{*ptr};
+ return ref;
+ }
+ return {};
+}
+
+ArenaRef::ArenaRef(ArenaPtr p) : ptr(p) {}
+
+ArenaRef::ArenaRef(ArenaRef&& other) : ptr(other.Release()) {}
+
+auto ArenaRef::Release() -> ArenaPtr {
+ auto ret = ptr;
+ ptr.owner = nullptr;
+ ptr.start = nullptr;
+ ptr.size = 0;
+ ptr.used_size = 0;
+ return ret;
+}
+
+ArenaRef::~ArenaRef() {
+ if (ptr.owner != nullptr) {
+ ptr.owner->Return(ptr);
+ }
+}
+
+} // namespace memory
diff --git a/src/memory/include/arena.hpp b/src/memory/include/arena.hpp
new file mode 100644
index 00000000..26d49d27
--- /dev/null
+++ b/src/memory/include/arena.hpp
@@ -0,0 +1,85 @@
+#pragma once
+
+#include <cstdint>
+#include <optional>
+#include <utility>
+
+#include "freertos/FreeRTOS.h"
+#include "freertos/queue.h"
+#include "span.hpp"
+#include "sys/_stdint.h"
+
+namespace memory {
+
+class Arena;
+
+/*
+ * A pointer to data that has been given out by an Arena, plus extra accounting
+ * information so that it can be returned properly.
+ */
+struct ArenaPtr {
+ Arena* owner;
+ std::byte* start;
+ std::size_t size;
+ // A convenience for keeping track of the subset of the block that has had
+ // data placed within it.
+ std::size_t used_size;
+};
+
+/*
+ * A basic memory arena. This class mediates access to fixed-size blocks of
+ * memory within a larger contiguous block. This is faster than re-allocating
+ * smaller blocks every time they're needed, and lets us easily limit the
+ * maximum size of the memory used.
+ *
+ * A single arena instance is safe to be used concurrently by multiple tasks,
+ * however there is no built in synchronisation of the underlying memory.
+ */
+class Arena {
+ public:
+ Arena(std::size_t block_size, std::size_t num_blocks, uint32_t alloc_flags);
+ ~Arena();
+
+ /*
+ * Attempts to receive an allocation from this arena. Returns absent if
+ * there are no blocks left.
+ */
+ auto Acquire() -> std::optional<ArenaPtr>;
+
+ /* Returns a previously allocated block to this arena. */
+ auto Return(ArenaPtr) -> void;
+
+ /* Returns the number of blocks that are currently free. */
+ auto BlocksFree() -> std::size_t;
+
+ Arena(const Arena&) = delete;
+ Arena& operator=(const Arena&) = delete;
+
+ private:
+ std::size_t block_size_;
+ // The large memory allocation that is divided into blocks.
+ std::byte* pool_;
+ // A FreeRTOS queue containing the blocks that are currently unused.
+ QueueHandle_t free_blocks_;
+};
+
+/*
+ * Wrapper around an ArenaPtr that handles acquiring and returning the block
+ * through RAII.
+ */
+class ArenaRef {
+ public:
+ static auto Acquire(Arena* a) -> std::optional<ArenaRef>;
+ explicit ArenaRef(ArenaPtr ptr);
+ ~ArenaRef();
+
+ auto Release() -> ArenaPtr;
+
+ ArenaRef(ArenaRef&&);
+ ArenaRef(const ArenaRef&) = delete;
+ Arena& operator=(const Arena&) = delete;
+
+ ArenaPtr ptr;
+};
+
+} // namespace memory