2 * Copyright (c) 2013 Grzegorz Kostka (kostka.grzegorz@gmail.com)
\r
3 * All rights reserved.
\r
5 * Redistribution and use in source and binary forms, with or without
\r
6 * modification, are permitted provided that the following conditions
\r
9 * - Redistributions of source code must retain the above copyright
\r
10 * notice, this list of conditions and the following disclaimer.
\r
11 * - Redistributions in binary form must reproduce the above copyright
\r
12 * notice, this list of conditions and the following disclaimer in the
\r
13 * documentation and/or other materials provided with the distribution.
\r
14 * - The name of the author may not be used to endorse or promote products
\r
15 * derived from this software without specific prior written permission.
\r
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
\r
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
\r
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
\r
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
\r
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
\r
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
\r
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
\r
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
\r
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
\r
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\r
29 /** @addtogroup lwext4
\r
33 * @file ext4_bcache.c
\r
34 * @brief Block cache allocator.
\r
37 #include <ext4_config.h>
\r
38 #include <ext4_bcache.h>
\r
39 #include <ext4_debug.h>
\r
40 #include <ext4_errno.h>
\r
46 int ext4_bcache_init_dynamic(struct ext4_bcache *bc, uint32_t cnt,
\r
49 ext4_assert(bc && cnt && itemsize);
\r
51 memset(bc, 0, sizeof(struct ext4_bcache));
\r
53 bc->refctr = malloc(cnt * sizeof(uint32_t));
\r
57 bc->lru_id = malloc(cnt * sizeof(uint32_t));
\r
61 bc->free_delay = malloc(cnt * sizeof(uint8_t));
\r
65 bc->lba = malloc(cnt * sizeof(uint64_t));
\r
69 bc->dirty = malloc(cnt * sizeof(bool));
\r
74 bc->data = malloc(cnt * itemsize);
\r
78 memset(bc->refctr, 0, cnt * sizeof(uint32_t));
\r
79 memset(bc->lru_id, 0, cnt * sizeof(uint32_t));
\r
80 memset(bc->free_delay, 0, cnt * sizeof(uint8_t));
\r
81 memset(bc->lba, 0, cnt * sizeof(uint64_t));
\r
82 memset(bc->dirty, 0, cnt * sizeof(bool));
\r
85 bc->itemsize = itemsize;
\r
87 bc->max_ref_blocks = 0;
\r
100 free(bc->free_delay);
\r
111 memset(bc, 0, sizeof(struct ext4_bcache));
\r
116 int ext4_bcache_fini_dynamic(struct ext4_bcache *bc)
\r
125 free(bc->free_delay);
\r
136 memset(bc, 0, sizeof(struct ext4_bcache));
\r
142 int ext4_bcache_alloc(struct ext4_bcache *bc, struct ext4_block *b,
\r
146 ext4_assert(bc && b && is_new);
\r
148 /*Check if valid.*/
\r
149 ext4_assert(b->lb_id);
\r
151 ext4_assert(b->lb_id);
\r
154 uint32_t cache_id = bc->cnt;
\r
155 uint32_t alloc_id = 0;
\r
159 /*Find in free blocks (Last Recently Used).*/
\r
160 for (i = 0; i < bc->cnt; ++i) {
\r
162 /*Check if block is already in cache*/
\r
163 if(b->lb_id == bc->lba[i]){
\r
165 if(!bc->refctr[i] && !bc->free_delay[i])
\r
168 /*Update reference counter*/
\r
171 /*Update usage marker*/
\r
172 bc->lru_id[i] = ++bc->lru_ctr;
\r
174 /*Set valid cache data and id*/
\r
175 b->data = bc->data + i * bc->itemsize;
\r
181 /*Best fit calculations.*/
\r
185 if(bc->free_delay[i])
\r
188 /*Block is unreferenced, but it may exist block with
\r
189 * lower usage marker*/
\r
192 if(cache_id == bc->cnt){
\r
194 alloc_id = bc->lru_id[i];
\r
199 if(alloc_id <= bc->lru_id[i])
\r
202 /*This block has lower alloc id marker*/
\r
204 alloc_id = bc->lru_id[i];
\r
208 if(cache_id != bc->cnt){
\r
209 /*There was unreferenced block*/
\r
210 bc->lba[cache_id] = b->lb_id;
\r
211 bc->refctr[cache_id] = 1;
\r
212 bc->lru_id[cache_id] = ++bc->lru_ctr;
\r
214 /*Set valid cache data and id*/
\r
215 b->data = bc->data + cache_id * bc->itemsize;
\r
216 b->cache_id = cache_id;
\r
220 if(bc->ref_blocks > bc->max_ref_blocks)
\r
221 bc->max_ref_blocks = bc->ref_blocks;
\r
224 /*Block needs to be read.*/
\r
230 ext4_dprintf(EXT4_DEBUG_BCACHE,
\r
231 "ext4_bcache_alloc: FAIL, unable to alloc block cache!\n");
\r
235 int ext4_bcache_free (struct ext4_bcache *bc, struct ext4_block *b,
\r
236 uint8_t free_delay)
\r
238 ext4_assert(bc && b);
\r
240 /*Check if valid.*/
\r
241 ext4_assert(b->lb_id);
\r
243 /*Block should be in cache.*/
\r
244 ext4_assert(b->cache_id < bc->cnt);
\r
246 /*Check if someone don't try free unreferenced block cache.*/
\r
247 ext4_assert(bc->refctr[b->cache_id]);
\r
249 /*Just decrease reference counter*/
\r
250 if(bc->refctr[b->cache_id])
\r
251 bc->refctr[b->cache_id]--;
\r
254 bc->free_delay[b->cache_id] = free_delay;
\r
256 /*Update statistics*/
\r
257 if(!bc->refctr[b->cache_id] && !bc->free_delay[b->cache_id])
\r
269 bool ext4_bcache_is_full(struct ext4_bcache *bc)
\r
271 return (bc->cnt == bc->ref_blocks);
\r