Quellcode durchsuchen

qcacmn: Add qdf_flex_mem pool allocator

It often happens that the WLAN driver needs to allocate some pool of
structures to handle bursty operations. The traditional approach is to
statically allocate the maximum number of structures that we want to be
able to handle concurrently. This has the significant down side of
requiring manual tuning for every hardware combination for optimal
behavior, and wasting large amounts of memory during non-burst periods.

Add a new flexible, segmented memory allocator in QDF to help address
such scenarios. A small static buffer segment is used to service the
vast majority of operations, while additional segments are dynamically
allocated as needed to meet demand. Critically, these additional
segments are freed when not in use to reduce memory consumption. The
result is a self-tuning buffer that combines most of the benefits of
pure dynamic allocation with most of the benefits of pure static
allocation.

Change-Id: I5c27ecce72a450826494b5d13d6c9fdebda650a6
CRs-Fixed: 2224534
Dustin Brown vor 7 Jahren
Ursprung
Commit
19911f3a06
4 geänderte Dateien mit 286 neuen und 3 gelöschten Zeilen
  1. 129 0
      qdf/inc/qdf_flex_mem.h
  2. 8 0
      qdf/inc/qdf_util.h
  3. 2 3
      qdf/linux/src/i_qdf_util.h
  4. 147 0
      qdf/src/qdf_flex_mem.c

+ 129 - 0
qdf/inc/qdf_flex_mem.h

@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: qdf_flex_mem (flexibly sized memory allocator)
+ * QCA driver framework (QDF) flex mem APIs
+ *
+ * A flex memory allocator is a memory pool which not only dynamically expands,
+ * but also dynamically reduces as well. Benefits over full dynamic memory
+ * allocation are amoritized allocation cost, and reduced memory fragmentation.
+ *
+ * The allocator consists of 3 parts: the pool, segments, and items. Items are
+ * the smallest chuncks of memory that are handed out via the alloc call, and
+ * are all of a uniform size. Segments are groups of items, representing the
+ * smallest amount of memory that can be dynamically allocated or freed. A pool
+ * is simply a collection of segments.
+ */
+
+#ifndef __QDF_FLEX_MEM_H
+#define __QDF_FLEX_MEM_H
+
+#include "qdf_list.h"
+#include "qdf_lock.h"
+
+#define QDF_FM_BITMAP uint32_t
+#define QDF_FM_BITMAP_BITS (sizeof(QDF_FM_BITMAP) * 8)
+
+/**
+ * qdf_flex_mem_pool - a pool of memory segments
+ * @seg_list: the list containing the memory segments
+ * @lock: spinlock for protecting internal data structures
+ * @item_size: the size of the items the pool will allocate
+ */
+struct qdf_flex_mem_pool {
+	qdf_list_t seg_list;
+	struct qdf_spinlock lock;
+	uint16_t item_size;
+};
+
+/**
+ * qdf_flex_mem_segment - a memory pool segment
+ * @node: the list node for membership in the memory pool
+ * @dynamic: true if this segment was dynamically allocated
+ * @used_bitmap: bitmap for tracking which items in the segment are in use
+ * @bytes: raw memory for allocating items from
+ */
+struct qdf_flex_mem_segment {
+	qdf_list_node_t node;
+	bool dynamic;
+	QDF_FM_BITMAP used_bitmap;
+	uint8_t *bytes;
+};
+
+/**
+ * DEFINE_QDF_FLEX_MEM_POOL() - define a new flex mem pool with one segment
+ * @name: the name of the pool variable
+ * @size_of_item: size of the items the pool will allocate
+ */
+#define DEFINE_QDF_FLEX_MEM_POOL(name, size_of_item) \
+	struct qdf_flex_mem_pool name; \
+	uint8_t __ ## name ## _head_bytes[QDF_FM_BITMAP_BITS * (size_of_item)];\
+	struct qdf_flex_mem_segment __ ## name ## _head = { \
+		.node = QDF_LIST_NODE_INIT_SINGLE( \
+			QDF_LIST_ANCHOR(name.seg_list)), \
+		.bytes = __ ## name ## _head_bytes, \
+	}; \
+	struct qdf_flex_mem_pool name = { \
+		.seg_list = QDF_LIST_INIT_SINGLE(__ ## name ## _head.node), \
+		.item_size = (size_of_item), \
+	}
+
+/**
+ * qdf_flex_mem_init() - initialize a qdf_flex_mem_pool
+ * @pool: the pool to initialize
+ *
+ * Return: None
+ */
+void qdf_flex_mem_init(struct qdf_flex_mem_pool *pool);
+
+/**
+ * qdf_flex_mem_deinit() - deinitialize a qdf_flex_mem_pool
+ * @pool: the pool to deinitialize
+ *
+ * Return: None
+ */
+void qdf_flex_mem_deinit(struct qdf_flex_mem_pool *pool);
+
+/**
+ * qdf_flex_mem_alloc() - logically allocate memory from the pool
+ * @pool: the pool to allocate from
+ *
+ * This function returns any unused item from any existing segment in the pool.
+ * If there are no unused items in the pool, a new segment is dynamically
+ * allocated to service the request. The size of the allocated memory is the
+ * size originally used to create the pool.
+ *
+ * Return: Point to newly allocated memory, NULL on failure
+ */
+void *qdf_flex_mem_alloc(struct qdf_flex_mem_pool *pool);
+
+/**
+ * qdf_flex_mem_free() - logically frees @ptr from the pool
+ * @pool: the pool to return the memory to
+ * @ptr: a pointer recieved via a call to qdf_flex_mem_alloc()
+ *
+ * This function marks the item corresponding to @ptr as unused. If that item
+ * was the last used item in the segment it belongs to, and the segment was
+ * dynamically allocated, the segment will be freed.
+ *
+ * Return: None
+ */
+void qdf_flex_mem_free(struct qdf_flex_mem_pool *pool, void *ptr);
+
+#endif /* __QDF_FLEX_MEM_H */

+ 8 - 0
qdf/inc/qdf_util.h

@@ -527,6 +527,14 @@ static inline uint8_t *qdf_get_u32(uint8_t *ptr, uint32_t *value)
  */
 #define qdf_min(a, b)   __qdf_min(a, b)
 
+/**
+ * qdf_ffz() - find first (least significant) zero bit
+ * @mask: the bitmask to check
+ *
+ * Return: The zero-based index of the first zero bit, or -1 if none are found
+ */
+#define qdf_ffz(mask) __qdf_ffz(mask)
+
 /**
  * qdf_get_pwr2() - get next power of 2 integer from input value
  * @value: input value to find next power of 2 integer

+ 2 - 3
qdf/linux/src/i_qdf_util.h

@@ -233,12 +233,11 @@ static inline bool __qdf_is_macaddr_equal(struct qdf_mac_addr *mac_addr1,
  */
 #define qdf_in_interrupt          in_interrupt
 
-/**
- * @brief memory barriers.
- */
 #define __qdf_min(_a, _b) min(_a, _b)
 #define __qdf_max(_a, _b) max(_a, _b)
 
+#define __qdf_ffz(mask) (~(mask) == 0 ? -1 : ffz(mask))
+
 #define MEMINFO_KB(x)  ((x) << (PAGE_SHIFT - 10))   /* In kilobytes */
 
 /**

+ 147 - 0
qdf/src/qdf_flex_mem.c

@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "qdf_flex_mem.h"
+#include "qdf_list.h"
+#include "qdf_lock.h"
+#include "qdf_mem.h"
+#include "qdf_trace.h"
+#include "qdf_util.h"
+
+void qdf_flex_mem_init(struct qdf_flex_mem_pool *pool)
+{
+	qdf_spinlock_create(&pool->lock);
+}
+
+void qdf_flex_mem_deinit(struct qdf_flex_mem_pool *pool)
+{
+	qdf_spinlock_destroy(&pool->lock);
+}
+
+static struct qdf_flex_mem_segment *qdf_flex_mem_seg_alloc(uint16_t item_size)
+{
+	size_t bytes_size = item_size * QDF_FM_BITMAP_BITS;
+	size_t total_size = sizeof(struct qdf_flex_mem_segment) + bytes_size;
+	struct qdf_flex_mem_segment *seg;
+
+	seg = qdf_mem_malloc(total_size);
+	if (!seg)
+		return NULL;
+
+	seg->dynamic = true;
+	seg->bytes = (uint8_t *)(seg + 1);
+
+	return seg;
+}
+
+static void *__qdf_flex_mem_alloc(struct qdf_flex_mem_pool *pool)
+{
+	struct qdf_flex_mem_segment *seg;
+
+	qdf_list_for_each(&pool->seg_list, seg, node) {
+		int index;
+		void *ptr;
+
+		index = qdf_ffz(seg->used_bitmap);
+		if (index < 0)
+			continue;
+
+		QDF_BUG(index < QDF_FM_BITMAP_BITS);
+
+		seg->used_bitmap ^= (QDF_FM_BITMAP)1 << index;
+		ptr = &seg->bytes[index * pool->item_size];
+		qdf_mem_zero(ptr, pool->item_size);
+
+		return ptr;
+	}
+
+	seg = qdf_flex_mem_seg_alloc(pool->item_size);
+	if (!seg)
+		return NULL;
+
+	seg->used_bitmap = 1;
+	qdf_list_insert_back(&pool->seg_list, &seg->node);
+
+	return seg->bytes;
+}
+
+void *qdf_flex_mem_alloc(struct qdf_flex_mem_pool *pool)
+{
+	void *ptr;
+
+	QDF_BUG(pool);
+	if (!pool)
+		return NULL;
+
+	qdf_spin_lock_bh(&pool->lock);
+	ptr = __qdf_flex_mem_alloc(pool);
+	qdf_spin_unlock_bh(&pool->lock);
+
+	return ptr;
+}
+
+static void qdf_flex_mem_seg_free(struct qdf_flex_mem_pool *pool,
+				  struct qdf_flex_mem_segment *seg)
+{
+	qdf_list_remove_node(&pool->seg_list, &seg->node);
+	qdf_mem_free(seg);
+}
+
+static void __qdf_flex_mem_free(struct qdf_flex_mem_pool *pool, void *ptr)
+{
+	struct qdf_flex_mem_segment *seg;
+	void *low_addr;
+	void *high_addr;
+	unsigned long index;
+
+	qdf_list_for_each(&pool->seg_list, seg, node) {
+		low_addr = seg->bytes;
+		high_addr = low_addr + pool->item_size * QDF_FM_BITMAP_BITS;
+
+		if (ptr < low_addr || ptr > high_addr)
+			continue;
+
+		index = (ptr - low_addr) / pool->item_size;
+		QDF_BUG(index < QDF_FM_BITMAP_BITS);
+
+		seg->used_bitmap ^= (QDF_FM_BITMAP)1 << index;
+		if (!seg->used_bitmap && seg->dynamic)
+			qdf_flex_mem_seg_free(pool, seg);
+
+		return;
+	}
+
+	qdf_err("Failed to find pointer in segment pool");
+	QDF_DEBUG_PANIC();
+}
+
+void qdf_flex_mem_free(struct qdf_flex_mem_pool *pool, void *ptr)
+{
+	QDF_BUG(pool);
+	if (!pool)
+		return;
+
+	QDF_BUG(ptr);
+	if (!ptr)
+		return;
+
+	qdf_spin_lock_bh(&pool->lock);
+	__qdf_flex_mem_free(pool, ptr);
+	qdf_spin_unlock_bh(&pool->lock);
+}
+