ext4_bcache: fix return value of ext4_buf_lowest_lru (lba -> lru)
[lwext4.git] / lwext4 / ext4_blockdev.c
1 /*
2  * Copyright (c) 2013 Grzegorz Kostka (kostka.grzegorz@gmail.com)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * - Redistributions of source code must retain the above copyright
10  *   notice, this list of conditions and the following disclaimer.
11  * - Redistributions in binary form must reproduce the above copyright
12  *   notice, this list of conditions and the following disclaimer in the
13  *   documentation and/or other materials provided with the distribution.
14  * - The name of the author may not be used to endorse or promote products
15  *   derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 /** @addtogroup lwext4
30  * @{
31  */
32 /**
33  * @file  ext4_blockdev.c
34  * @brief Block device module.
35  */
36
37 #include "ext4_config.h"
38 #include "ext4_blockdev.h"
39 #include "ext4_errno.h"
40 #include "ext4_debug.h"
41
42 #include <string.h>
43 #include <stdlib.h>
44
45 int ext4_block_init(struct ext4_blockdev *bdev)
46 {
47         int rc;
48         ext4_assert(bdev);
49
50         ext4_assert(bdev->open && bdev->close && bdev->bread && bdev->bwrite);
51
52         /*Low level block init*/
53         rc = bdev->open(bdev);
54         if (rc != EOK)
55                 return rc;
56
57         bdev->flags |= EXT4_BDEV_INITIALIZED;
58
59         return EOK;
60 }
61
62 int ext4_block_bind_bcache(struct ext4_blockdev *bdev, struct ext4_bcache *bc)
63 {
64         ext4_assert(bdev && bc);
65         bdev->bc = bc;
66         return EOK;
67 }
68
69 void ext4_block_set_lb_size(struct ext4_blockdev *bdev, uint64_t lb_bsize)
70 {
71         /*Logical block size has to be multiply of physical */
72         ext4_assert(!(lb_bsize % bdev->ph_bsize));
73
74         bdev->lg_bsize = lb_bsize;
75         bdev->lg_bcnt = (bdev->ph_bcnt * bdev->ph_bsize) / lb_bsize;
76 }
77
78 int ext4_block_fini(struct ext4_blockdev *bdev)
79 {
80         ext4_assert(bdev);
81
82         bdev->flags &= ~(EXT4_BDEV_INITIALIZED);
83
84         /*Low level block fini*/
85         return bdev->close(bdev);
86 }
87
88 static int
89 ext4_block_flush_buf(struct ext4_blockdev *bdev, struct ext4_buf *buf)
90 {
91         int r;
92         struct ext4_bcache *bc = bdev->bc;
93         /*Only flushing unreferenced buffer is allowed.*/
94         ext4_assert(!buf->refctr);
95         if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
96                 r = ext4_blocks_set_direct(bdev, buf->data, buf->lba, 1);
97                 if (r)
98                         return r;
99
100                 SLIST_REMOVE(&bc->dirty_list,
101                                 buf,
102                                 ext4_buf,
103                                 dirty_node);
104                 ext4_bcache_clear_flag(buf, BC_DIRTY);
105         }
106         return EOK;
107 }
108
109 int ext4_block_cache_shake(struct ext4_blockdev *bdev)
110 {
111         struct ext4_buf *buf;
112         while (!RB_EMPTY(&bdev->bc->lru_root) &&
113                 ext4_bcache_is_full(bdev->bc)) {
114                 
115                 buf = ext4_buf_lowest_lru(bdev->bc);
116                 ext4_assert(buf);
117                 if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
118                         int r = ext4_block_flush_buf(bdev, buf);
119                         if (r != EOK)
120                                 return r;
121
122                 }
123
124                 ext4_bcache_drop_buf(bdev->bc, buf);
125         }
126         return EOK;
127 }
128
129 int ext4_block_get_noread(struct ext4_blockdev *bdev, struct ext4_block *b,
130                           uint64_t lba)
131 {
132         bool is_new;
133         int r;
134
135         ext4_assert(bdev && b);
136
137         if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
138                 return EIO;
139
140         if (!(lba < bdev->lg_bcnt))
141                 return ERANGE;
142
143         b->dirty = 0;
144         b->lb_id = lba;
145
146         /*If cache is full we have to (flush and) drop it anyway :(*/
147         r = ext4_block_cache_shake(bdev);
148         if (r != EOK)
149                 return r;
150
151         r = ext4_bcache_alloc(bdev->bc, b, &is_new);
152         if (r != EOK)
153                 return r;
154
155         if (!b->data)
156                 return ENOMEM;
157
158         return EOK;
159 }
160
161 int ext4_block_get(struct ext4_blockdev *bdev, struct ext4_block *b,
162                    uint64_t lba)
163 {
164         uint64_t pba;
165         uint32_t pb_cnt;
166         int r = ext4_block_get_noread(bdev, b, lba);
167         if (r != EOK)
168                 return r;
169
170         if (b->uptodate) {
171                 /* Data in the cache is up-to-date.
172                  * Reading from physical device is not required */
173                 return EOK;
174         }
175
176         pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
177         pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
178
179         r = bdev->bread(bdev, b->data, pba, pb_cnt);
180
181         if (r != EOK) {
182                 ext4_bcache_free(bdev->bc, b, 0);
183                 b->lb_id = 0;
184                 return r;
185         }
186
187         /* Mark buffer up-to-date, since
188          * fresh data is read from physical device just now. */
189         ext4_bcache_set_flag(b->buf, BC_UPTODATE);
190         b->uptodate = true;
191         bdev->bread_ctr++;
192         return EOK;
193 }
194
195 int ext4_block_set(struct ext4_blockdev *bdev, struct ext4_block *b)
196 {
197         uint64_t pba;
198         uint32_t pb_cnt;
199         int r;
200
201         ext4_assert(bdev && b);
202         ext4_assert(b->buf);
203
204         if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
205                 return EIO;
206
207         /*Free cache delay mode*/
208         if (bdev->cache_write_back) {
209
210                 /*Free cache block and mark as free delayed*/
211                 return ext4_bcache_free(bdev->bc, b, bdev->cache_write_back);
212         }
213
214         if (b->buf->refctr > 1)
215                 return ext4_bcache_free(bdev->bc, b, 0);
216
217         /*We handle the dirty flag ourselves.*/
218         if (ext4_bcache_test_flag(b->buf, BC_DIRTY) || b->dirty) {
219                 b->uptodate = true;
220                 ext4_bcache_set_flag(b->buf, BC_UPTODATE);
221
222                 pba = (b->lb_id * bdev->lg_bsize) / bdev->ph_bsize;
223                 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
224
225                 r = bdev->bwrite(bdev, b->data, pba, pb_cnt);
226                 ext4_bcache_clear_flag(b->buf, BC_DIRTY);
227                 if (r != EOK) {
228                         b->dirty = true;
229                         ext4_bcache_free(bdev->bc, b, 0);
230                         return r;
231                 }
232
233                 b->dirty = false;
234                 bdev->bwrite_ctr++;
235         }
236         ext4_bcache_free(bdev->bc, b, 0);
237         return EOK;
238 }
239
240 int ext4_blocks_get_direct(struct ext4_blockdev *bdev, void *buf, uint64_t lba,
241                            uint32_t cnt)
242 {
243         uint64_t pba;
244         uint32_t pb_cnt;
245
246         ext4_assert(bdev && buf);
247
248         pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
249         pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
250
251         bdev->bread_ctr++;
252         return bdev->bread(bdev, buf, pba, pb_cnt * cnt);
253 }
254
255 int ext4_blocks_set_direct(struct ext4_blockdev *bdev, const void *buf,
256                            uint64_t lba, uint32_t cnt)
257 {
258         uint64_t pba;
259         uint32_t pb_cnt;
260
261         ext4_assert(bdev && buf);
262
263         pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
264         pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
265
266         bdev->bwrite_ctr++;
267
268         return bdev->bwrite(bdev, buf, pba, pb_cnt * cnt);
269 }
270
271 int ext4_block_writebytes(struct ext4_blockdev *bdev, uint64_t off,
272                           const void *buf, uint32_t len)
273 {
274         uint64_t block_idx;
275         uint64_t block_end;
276         uint32_t blen;
277         uint32_t unalg;
278         int r = EOK;
279
280         const uint8_t *p = (void *)buf;
281
282         ext4_assert(bdev && buf);
283
284         if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
285                 return EIO;
286
287         block_idx = off / bdev->ph_bsize;
288         block_end = block_idx + len / bdev->ph_bsize;
289
290         if (!(block_end < bdev->ph_bcnt))
291                 return EINVAL; /*Ups. Out of range operation*/
292
293         /*OK lets deal with the first possible unaligned block*/
294         unalg = (off & (bdev->ph_bsize - 1));
295         if (unalg) {
296
297                 uint32_t wlen = (bdev->ph_bsize - unalg) > len
298                                     ? len
299                                     : (bdev->ph_bsize - unalg);
300
301                 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
302                 if (r != EOK)
303                         return r;
304
305                 memcpy(bdev->ph_bbuf + unalg, p, wlen);
306
307                 r = bdev->bwrite(bdev, bdev->ph_bbuf, block_idx, 1);
308                 if (r != EOK)
309                         return r;
310
311                 p += wlen;
312                 len -= wlen;
313                 block_idx++;
314         }
315
316         /*Aligned data*/
317         blen = len / bdev->ph_bsize;
318         r = bdev->bwrite(bdev, p, block_idx, blen);
319         if (r != EOK)
320                 return r;
321
322         p += bdev->ph_bsize * blen;
323         len -= bdev->ph_bsize * blen;
324
325         block_idx += blen;
326
327         /*Rest of the data*/
328         if (len) {
329                 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
330                 if (r != EOK)
331                         return r;
332
333                 memcpy(bdev->ph_bbuf, p, len);
334
335                 r = bdev->bwrite(bdev, bdev->ph_bbuf, block_idx, 1);
336                 if (r != EOK)
337                         return r;
338         }
339
340         return r;
341 }
342
343 int ext4_block_readbytes(struct ext4_blockdev *bdev, uint64_t off, void *buf,
344                          uint32_t len)
345 {
346         uint64_t block_idx;
347         uint64_t block_end;
348         uint32_t blen;
349         uint32_t unalg;
350         int r = EOK;
351
352         uint8_t *p = (void *)buf;
353
354         ext4_assert(bdev && buf);
355
356         if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
357                 return EIO;
358
359         block_idx = off / bdev->ph_bsize;
360         block_end = block_idx + len / bdev->ph_bsize;
361
362         if (!(block_end < bdev->ph_bcnt))
363                 return EINVAL; /*Ups. Out of range operation*/
364
365         /*OK lets deal with the first possible unaligned block*/
366         unalg = (off & (bdev->ph_bsize - 1));
367         if (unalg) {
368
369                 uint32_t rlen = (bdev->ph_bsize - unalg) > len
370                                     ? len
371                                     : (bdev->ph_bsize - unalg);
372
373                 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
374                 if (r != EOK)
375                         return r;
376
377                 memcpy(p, bdev->ph_bbuf + unalg, rlen);
378
379                 p += rlen;
380                 len -= rlen;
381                 block_idx++;
382         }
383
384         /*Aligned data*/
385         blen = len / bdev->ph_bsize;
386
387         r = bdev->bread(bdev, p, block_idx, blen);
388         if (r != EOK)
389                 return r;
390
391         p += bdev->ph_bsize * blen;
392         len -= bdev->ph_bsize * blen;
393
394         block_idx += blen;
395
396         /*Rest of the data*/
397         if (len) {
398                 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
399                 if (r != EOK)
400                         return r;
401
402                 memcpy(p, bdev->ph_bbuf, len);
403         }
404
405         return r;
406 }
407
408 int ext4_block_cache_write_back(struct ext4_blockdev *bdev, uint8_t on_off)
409 {
410         int r;
411         struct ext4_buf *buf;
412
413         if (on_off)
414                 bdev->cache_write_back++;
415
416         if (!on_off && bdev->cache_write_back)
417                 bdev->cache_write_back--;
418
419         if (bdev->cache_write_back)
420                 return EOK;
421
422         /*Flush all delayed cache blocks*/
423         while (!SLIST_EMPTY(&bdev->bc->dirty_list)) {
424                 
425                 buf = SLIST_FIRST(&bdev->bc->dirty_list);
426                 ext4_assert(buf);
427                 r = ext4_block_flush_buf(bdev, buf);
428                 if (r != EOK)
429                         return r;
430
431         }
432         return EOK;
433 }
434
435 /**
436  * @}
437  */