BeRTOS
|
00001 00038 #include "heap.h" 00039 00040 #include <cfg/debug.h> // ASSERT() 00041 #include <string.h> // memset() 00042 00043 #define FREE_FILL_CODE 0xDEAD 00044 #define ALLOC_FILL_CODE 0xBEEF 00045 00046 00047 /* 00048 * This function prototype is deprecated, will change in: 00049 * void heap_init(struct Heap* h, heap_buf_t* memory, size_t size) 00050 * in the next BeRTOS release. 00051 */ 00052 void heap_init(struct Heap* h, void* memory, size_t size) 00053 { 00054 #ifdef _DEBUG 00055 memset(memory, FREE_FILL_CODE, size); 00056 #endif 00057 00058 ASSERT2(((size_t)memory % alignof(heap_buf_t)) == 0, 00059 "memory buffer is unaligned, please use the HEAP_DEFINE_BUF() macro to declare heap buffers!\n"); 00060 00061 /* Initialize heap with a single big chunk */ 00062 h->FreeList = (MemChunk *)memory; 00063 h->FreeList->next = NULL; 00064 h->FreeList->size = size; 00065 } 00066 00067 00068 void *heap_allocmem(struct Heap* h, size_t size) 00069 { 00070 MemChunk *chunk, *prev; 00071 00072 /* Round size up to the allocation granularity */ 00073 size = ROUND_UP2(size, sizeof(MemChunk)); 00074 00075 /* Handle allocations of 0 bytes */ 00076 if (!size) 00077 size = sizeof(MemChunk); 00078 00079 /* Walk on the free list looking for any chunk big enough to 00080 * fit the requested block size. 00081 */ 00082 for (prev = (MemChunk *)&h->FreeList, chunk = h->FreeList; 00083 chunk; 00084 prev = chunk, chunk = chunk->next) 00085 { 00086 if (chunk->size >= size) 00087 { 00088 if (chunk->size == size) 00089 { 00090 /* Just remove this chunk from the free list */ 00091 prev->next = chunk->next; 00092 #ifdef _DEBUG 00093 memset(chunk, ALLOC_FILL_CODE, size); 00094 #endif 00095 return (void *)chunk; 00096 } 00097 else 00098 { 00099 /* Allocate from the END of an existing chunk */ 00100 chunk->size -= size; 00101 #ifdef _DEBUG 00102 memset((uint8_t *)chunk + chunk->size, ALLOC_FILL_CODE, size); 00103 #endif 00104 return (void *)((uint8_t *)chunk + chunk->size); 00105 } 00106 } 00107 } 00108 00109 return NULL; /* fail */ 00110 } 00111 00112 00113 void heap_freemem(struct Heap* h, void *mem, size_t size) 00114 { 00115 MemChunk *prev; 00116 ASSERT(mem); 00117 00118 #ifdef _DEBUG 00119 memset(mem, FREE_FILL_CODE, size); 00120 #endif 00121 00122 /* Round size up to the allocation granularity */ 00123 size = ROUND_UP2(size, sizeof(MemChunk)); 00124 00125 /* Handle allocations of 0 bytes */ 00126 if (!size) 00127 size = sizeof(MemChunk); 00128 00129 /* Special cases: first chunk in the free list or memory completely full */ 00130 ASSERT((uint8_t*)mem != (uint8_t*)h->FreeList); 00131 if (((uint8_t *)mem) < ((uint8_t *)h->FreeList) || !h->FreeList) 00132 { 00133 /* Insert memory block before the current free list head */ 00134 prev = (MemChunk *)mem; 00135 prev->next = h->FreeList; 00136 prev->size = size; 00137 h->FreeList = prev; 00138 } 00139 else /* Normal case: not the first chunk in the free list */ 00140 { 00141 /* 00142 * Walk on the free list. Stop at the insertion point (when mem 00143 * is between prev and prev->next) 00144 */ 00145 prev = h->FreeList; 00146 while (prev->next < (MemChunk *)mem && prev->next) 00147 prev = prev->next; 00148 00149 /* Make sure mem is not *within* prev */ 00150 ASSERT((uint8_t*)mem >= (uint8_t*)prev + prev->size); 00151 00152 /* Should it be merged with previous block? */ 00153 if (((uint8_t *)prev) + prev->size == ((uint8_t *)mem)) 00154 { 00155 /* Yes */ 00156 prev->size += size; 00157 } 00158 else /* not merged with previous chunk */ 00159 { 00160 MemChunk *curr = (MemChunk*)mem; 00161 00162 /* insert it after the previous node 00163 * and move the 'prev' pointer forward 00164 * for the following operations 00165 */ 00166 curr->next = prev->next; 00167 curr->size = size; 00168 prev->next = curr; 00169 00170 /* Adjust for the following test */ 00171 prev = curr; 00172 } 00173 } 00174 00175 /* Also merge with next chunk? */ 00176 if (((uint8_t *)prev) + prev->size == ((uint8_t *)prev->next)) 00177 { 00178 prev->size += prev->next->size; 00179 prev->next = prev->next->next; 00180 00181 /* There should be only one merge opportunity, becuase we always merge on free */ 00182 ASSERT((uint8_t*)prev + prev->size != (uint8_t*)prev->next); 00183 } 00184 } 00185 00196 size_t heap_freeSpace(struct Heap *h) 00197 { 00198 size_t free_mem = 0; 00199 for (MemChunk *chunk = h->FreeList; chunk; chunk = chunk->next) 00200 free_mem += chunk->size; 00201 00202 return free_mem; 00203 } 00204 00205 #if CONFIG_HEAP_MALLOC 00206 00210 void *heap_malloc(struct Heap* h, size_t size) 00211 { 00212 size_t *mem; 00213 00214 size += sizeof(size_t); 00215 if ((mem = (size_t*)heap_allocmem(h, size))) 00216 *mem++ = size; 00217 00218 return mem; 00219 } 00220 00224 void *heap_calloc(struct Heap* h, size_t size) 00225 { 00226 void *mem; 00227 00228 if ((mem = heap_malloc(h, size))) 00229 memset(mem, 0, size); 00230 00231 return mem; 00232 } 00233 00247 void heap_free(struct Heap *h, void *mem) 00248 { 00249 size_t *_mem = (size_t *)mem; 00250 00251 if (_mem) 00252 { 00253 --_mem; 00254 heap_freemem(h, _mem, *_mem); 00255 } 00256 } 00257 00258 #endif /* CONFIG_HEAP_MALLOC */