2 * Copyright (c) 2015 Grzegorz Kostka (kostka.grzegorz@gmail.com)
3 * Copyright (c) 2015 Kaho Ng (ngkaho1234@gmail.com)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * - Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * - Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * - The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 /** @addtogroup lwext4
34 * @file ext4_journal.c
35 * @brief Journal handle functions
38 #include "ext4_config.h"
39 #include "ext4_types.h"
41 #include "ext4_super.h"
42 #include "ext4_errno.h"
43 #include "ext4_blockdev.h"
44 #include "ext4_crc32c.h"
45 #include "ext4_debug.h"
54 RB_ENTRY(revoke_entry) revoke_node;
58 uint32_t start_trans_id;
59 uint32_t last_trans_id;
60 uint32_t this_trans_id;
61 RB_HEAD(jbd_revoke, revoke_entry) revoke_root;
65 struct recover_info *info;
67 uint32_t this_trans_id;
71 jbd_revoke_entry_cmp(struct revoke_entry *a, struct revoke_entry *b)
73 if (a->block > b->block)
75 else if (a->block < b->block)
80 RB_GENERATE_INTERNAL(jbd_revoke, revoke_entry, revoke_node,
81 jbd_revoke_entry_cmp, static inline)
83 #define jbd_alloc_revoke_entry() calloc(1, sizeof(struct revoke_entry))
84 #define jbd_free_revoke_entry(addr) free(addr)
86 int jbd_inode_bmap(struct jbd_fs *jbd_fs,
88 ext4_fsblk_t *fblock);
90 int jbd_sb_write(struct jbd_fs *jbd_fs, struct jbd_sb *s)
93 struct ext4_fs *fs = jbd_fs->inode_ref.fs;
96 rc = jbd_inode_bmap(jbd_fs, 0, &fblock);
100 offset = fblock * ext4_sb_get_block_size(&fs->sb);
101 return ext4_block_writebytes(fs->bdev, offset, s,
102 EXT4_SUPERBLOCK_SIZE);
105 int jbd_sb_read(struct jbd_fs *jbd_fs, struct jbd_sb *s)
108 struct ext4_fs *fs = jbd_fs->inode_ref.fs;
111 rc = jbd_inode_bmap(jbd_fs, 0, &fblock);
115 offset = fblock * ext4_sb_get_block_size(&fs->sb);
116 return ext4_block_readbytes(fs->bdev, offset, s,
117 EXT4_SUPERBLOCK_SIZE);
120 static bool jbd_verify_sb(struct jbd_sb *sb)
122 struct jbd_bhdr *header = &sb->header;
123 if (jbd_get32(header, magic) != JBD_MAGIC_NUMBER)
126 if (jbd_get32(header, blocktype) != JBD_SUPERBLOCK &&
127 jbd_get32(header, blocktype) != JBD_SUPERBLOCK_V2)
133 static int jbd_write_sb(struct jbd_fs *jbd_fs)
137 rc = jbd_sb_write(jbd_fs, &jbd_fs->sb);
141 jbd_fs->dirty = false;
146 int jbd_get_fs(struct ext4_fs *fs,
147 struct jbd_fs *jbd_fs)
150 uint32_t journal_ino;
152 memset(jbd_fs, 0, sizeof(struct jbd_fs));
153 journal_ino = ext4_get32(&fs->sb, journal_inode_number);
155 rc = ext4_fs_get_inode_ref(fs,
159 memset(jbd_fs, 0, sizeof(struct jbd_fs));
162 rc = jbd_sb_read(jbd_fs, &jbd_fs->sb);
164 memset(jbd_fs, 0, sizeof(struct jbd_fs));
165 ext4_fs_put_inode_ref(&jbd_fs->inode_ref);
168 if (!jbd_verify_sb(&jbd_fs->sb)) {
169 memset(jbd_fs, 0, sizeof(struct jbd_fs));
170 ext4_fs_put_inode_ref(&jbd_fs->inode_ref);
177 int jbd_put_fs(struct jbd_fs *jbd_fs)
180 rc = jbd_write_sb(jbd_fs);
182 ext4_fs_put_inode_ref(&jbd_fs->inode_ref);
186 int jbd_inode_bmap(struct jbd_fs *jbd_fs,
188 ext4_fsblk_t *fblock)
190 int rc = ext4_fs_get_inode_dblk_idx(
198 int jbd_block_get(struct jbd_fs *jbd_fs,
199 struct ext4_block *block,
202 /* TODO: journal device. */
204 ext4_lblk_t iblock = (ext4_lblk_t)fblock;
205 rc = jbd_inode_bmap(jbd_fs, iblock,
210 struct ext4_blockdev *bdev = jbd_fs->inode_ref.fs->bdev;
211 rc = ext4_block_get(bdev, block, fblock);
215 int jbd_block_get_noread(struct jbd_fs *jbd_fs,
216 struct ext4_block *block,
219 /* TODO: journal device. */
221 ext4_lblk_t iblock = (ext4_lblk_t)fblock;
222 rc = jbd_inode_bmap(jbd_fs, iblock,
227 struct ext4_blockdev *bdev = jbd_fs->inode_ref.fs->bdev;
228 rc = ext4_block_get_noread(bdev, block, fblock);
232 int jbd_block_set(struct jbd_fs *jbd_fs,
233 struct ext4_block *block)
235 return ext4_block_set(jbd_fs->inode_ref.fs->bdev,
240 * helper functions to deal with 32 or 64bit block numbers.
242 int jbd_tag_bytes(struct jbd_fs *jbd_fs)
246 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
247 JBD_FEATURE_INCOMPAT_CSUM_V3))
248 return sizeof(struct jbd_block_tag3);
250 size = sizeof(struct jbd_block_tag);
252 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
253 JBD_FEATURE_INCOMPAT_CSUM_V2))
254 size += sizeof(uint16_t);
256 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
257 JBD_FEATURE_INCOMPAT_64BIT))
260 return size - sizeof(uint32_t);
263 /**@brief: tag information. */
268 uint8_t uuid[UUID_SIZE];
273 jbd_extract_block_tag(struct jbd_fs *jbd_fs,
276 int32_t remain_buf_size,
277 struct tag_info *tag_info)
280 tag_info->tag_bytes = tag_bytes;
281 tag_info->uuid_exist = false;
282 tag_info->last_tag = false;
284 if (remain_buf_size - tag_bytes < 0)
287 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
288 JBD_FEATURE_INCOMPAT_CSUM_V3)) {
289 struct jbd_block_tag3 *tag = __tag;
290 tag_info->block = jbd_get32(tag, blocknr);
291 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
292 JBD_FEATURE_INCOMPAT_64BIT))
294 (uint64_t)jbd_get32(tag, blocknr_high) << 32;
296 if (jbd_get32(tag, flags) & JBD_FLAG_ESCAPE)
299 if (!(jbd_get32(tag, flags) & JBD_FLAG_SAME_UUID)) {
300 if (remain_buf_size - tag_bytes < UUID_SIZE)
303 uuid_start = (char *)tag + tag_bytes;
304 tag_info->uuid_exist = true;
305 tag_info->tag_bytes += UUID_SIZE;
306 memcpy(tag_info->uuid, uuid_start, UUID_SIZE);
309 if (jbd_get32(tag, flags) & JBD_FLAG_LAST_TAG)
310 tag_info->last_tag = true;
313 struct jbd_block_tag *tag = __tag;
314 tag_info->block = jbd_get32(tag, blocknr);
315 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
316 JBD_FEATURE_INCOMPAT_64BIT))
318 (uint64_t)jbd_get32(tag, blocknr_high) << 32;
320 if (jbd_get16(tag, flags) & JBD_FLAG_ESCAPE)
323 if (!(jbd_get16(tag, flags) & JBD_FLAG_SAME_UUID)) {
324 if (remain_buf_size - tag_bytes < UUID_SIZE)
327 uuid_start = (char *)tag + tag_bytes;
328 tag_info->uuid_exist = true;
329 tag_info->tag_bytes += UUID_SIZE;
330 memcpy(tag_info->uuid, uuid_start, UUID_SIZE);
333 if (jbd_get16(tag, flags) & JBD_FLAG_LAST_TAG)
334 tag_info->last_tag = true;
341 jbd_write_block_tag(struct jbd_fs *jbd_fs,
343 int32_t remain_buf_size,
344 struct tag_info *tag_info)
347 int tag_bytes = jbd_tag_bytes(jbd_fs);
349 tag_info->tag_bytes = tag_bytes;
351 if (remain_buf_size - tag_bytes < 0)
354 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
355 JBD_FEATURE_INCOMPAT_CSUM_V3)) {
356 struct jbd_block_tag3 *tag = __tag;
357 jbd_set32(tag, blocknr, tag_info->block);
358 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
359 JBD_FEATURE_INCOMPAT_64BIT))
360 jbd_set32(tag, blocknr_high, tag_info->block >> 32);
362 if (tag_info->uuid_exist) {
363 if (remain_buf_size - tag_bytes < UUID_SIZE)
366 uuid_start = (char *)tag + tag_bytes;
367 tag_info->tag_bytes += UUID_SIZE;
368 memcpy(uuid_start, tag_info->uuid, UUID_SIZE);
370 jbd_set32(tag, flags,
371 jbd_get32(tag, flags) | JBD_FLAG_SAME_UUID);
373 if (tag_info->last_tag)
374 jbd_set32(tag, flags,
375 jbd_get32(tag, flags) | JBD_FLAG_LAST_TAG);
378 struct jbd_block_tag *tag = __tag;
379 jbd_set32(tag, blocknr, tag_info->block);
380 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
381 JBD_FEATURE_INCOMPAT_64BIT))
382 jbd_set32(tag, blocknr_high, tag_info->block >> 32);
384 if (tag_info->uuid_exist) {
385 if (remain_buf_size - tag_bytes < UUID_SIZE)
388 uuid_start = (char *)tag + tag_bytes;
389 tag_info->tag_bytes += UUID_SIZE;
390 memcpy(uuid_start, tag_info->uuid, UUID_SIZE);
392 jbd_set16(tag, flags,
393 jbd_get16(tag, flags) | JBD_FLAG_SAME_UUID);
395 if (tag_info->last_tag)
396 jbd_set16(tag, flags,
397 jbd_get16(tag, flags) | JBD_FLAG_LAST_TAG);
404 jbd_iterate_block_table(struct jbd_fs *jbd_fs,
406 int32_t tag_tbl_size,
407 void (*func)(struct jbd_fs * jbd_fs,
413 char *tag_start, *tag_ptr;
414 int tag_bytes = jbd_tag_bytes(jbd_fs);
415 tag_start = __tag_start;
418 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
419 JBD_FEATURE_INCOMPAT_CSUM_V2) ||
420 JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
421 JBD_FEATURE_INCOMPAT_CSUM_V3))
422 tag_tbl_size -= sizeof(struct jbd_block_tail);
424 while (tag_tbl_size) {
425 struct tag_info tag_info;
426 int rc = jbd_extract_block_tag(jbd_fs,
435 func(jbd_fs, tag_info.block, tag_info.uuid, arg);
437 if (tag_info.last_tag)
440 tag_ptr += tag_info.tag_bytes;
441 tag_tbl_size -= tag_info.tag_bytes;
445 static void jbd_display_block_tags(struct jbd_fs *jbd_fs,
450 uint32_t *iblock = arg;
451 ext4_dbg(DEBUG_JBD, "Block in block_tag: %" PRIu64 "\n", block);
458 static struct revoke_entry *
459 jbd_revoke_entry_lookup(struct recover_info *info, ext4_fsblk_t block)
461 struct revoke_entry tmp = {
465 return RB_FIND(jbd_revoke, &info->revoke_root, &tmp);
468 static void jbd_replay_block_tags(struct jbd_fs *jbd_fs,
470 uint8_t *uuid __unused,
474 struct replay_arg *arg = __arg;
475 struct recover_info *info = arg->info;
476 uint32_t *this_block = arg->this_block;
477 struct revoke_entry *revoke_entry;
478 struct ext4_block journal_block, ext4_block;
479 struct ext4_fs *fs = jbd_fs->inode_ref.fs;
483 revoke_entry = jbd_revoke_entry_lookup(info, block);
485 arg->this_trans_id < revoke_entry->trans_id)
489 "Replaying block in block_tag: %" PRIu64 "\n",
492 r = jbd_block_get(jbd_fs, &journal_block, *this_block);
497 r = ext4_block_get_noread(fs->bdev, &ext4_block, block);
499 jbd_block_set(jbd_fs, &journal_block);
503 memcpy(ext4_block.data,
505 jbd_get32(&jbd_fs->sb, blocksize));
507 ext4_bcache_set_dirty(ext4_block.buf);
508 ext4_block_set(fs->bdev, &ext4_block);
510 uint16_t mount_count, state;
511 mount_count = ext4_get16(&fs->sb, mount_count);
512 state = ext4_get16(&fs->sb, state);
515 journal_block.data + EXT4_SUPERBLOCK_OFFSET,
516 EXT4_SUPERBLOCK_SIZE);
518 /* Mark system as mounted */
519 ext4_set16(&fs->sb, state, state);
520 r = ext4_sb_write(fs->bdev, &fs->sb);
524 /*Update mount count*/
525 ext4_set16(&fs->sb, mount_count, mount_count);
528 jbd_block_set(jbd_fs, &journal_block);
533 static void jbd_add_revoke_block_tags(struct recover_info *info,
536 struct revoke_entry *revoke_entry;
538 ext4_dbg(DEBUG_JBD, "Add block %" PRIu64 " to revoke tree\n", block);
539 revoke_entry = jbd_revoke_entry_lookup(info, block);
541 revoke_entry->trans_id = info->this_trans_id;
545 revoke_entry = jbd_alloc_revoke_entry();
546 ext4_assert(revoke_entry);
547 revoke_entry->block = block;
548 revoke_entry->trans_id = info->this_trans_id;
549 RB_INSERT(jbd_revoke, &info->revoke_root, revoke_entry);
554 static void jbd_destroy_revoke_tree(struct recover_info *info)
556 while (!RB_EMPTY(&info->revoke_root)) {
557 struct revoke_entry *revoke_entry =
558 RB_MIN(jbd_revoke, &info->revoke_root);
559 ext4_assert(revoke_entry);
560 RB_REMOVE(jbd_revoke, &info->revoke_root, revoke_entry);
561 jbd_free_revoke_entry(revoke_entry);
565 /* Make sure we wrap around the log correctly! */
566 #define wrap(sb, var) \
568 if (var >= jbd_get32((sb), maxlen)) \
569 var -= (jbd_get32((sb), maxlen) - jbd_get32((sb), first)); \
572 #define ACTION_SCAN 0
573 #define ACTION_REVOKE 1
574 #define ACTION_RECOVER 2
577 static void jbd_build_revoke_tree(struct jbd_fs *jbd_fs,
578 struct jbd_bhdr *header,
579 struct recover_info *info)
582 struct jbd_revoke_header *revoke_hdr =
583 (struct jbd_revoke_header *)header;
584 uint32_t i, nr_entries, record_len = 4;
585 if (JBD_HAS_INCOMPAT_FEATURE(&jbd_fs->sb,
586 JBD_FEATURE_INCOMPAT_64BIT))
589 nr_entries = (jbd_get32(revoke_hdr, count) -
590 sizeof(struct jbd_revoke_header)) /
593 blocks_entry = (char *)(revoke_hdr + 1);
595 for (i = 0;i < nr_entries;i++) {
596 if (record_len == 8) {
598 (uint64_t *)blocks_entry;
599 jbd_add_revoke_block_tags(info, to_be64(*blocks));
602 (uint32_t *)blocks_entry;
603 jbd_add_revoke_block_tags(info, to_be32(*blocks));
605 blocks_entry += record_len;
609 static void jbd_debug_descriptor_block(struct jbd_fs *jbd_fs,
610 struct jbd_bhdr *header,
613 jbd_iterate_block_table(jbd_fs,
615 jbd_get32(&jbd_fs->sb, blocksize) -
616 sizeof(struct jbd_bhdr),
617 jbd_display_block_tags,
621 static void jbd_replay_descriptor_block(struct jbd_fs *jbd_fs,
622 struct jbd_bhdr *header,
623 struct replay_arg *arg)
625 jbd_iterate_block_table(jbd_fs,
627 jbd_get32(&jbd_fs->sb, blocksize) -
628 sizeof(struct jbd_bhdr),
629 jbd_replay_block_tags,
633 int jbd_iterate_log(struct jbd_fs *jbd_fs,
634 struct recover_info *info,
638 bool log_end = false;
639 struct jbd_sb *sb = &jbd_fs->sb;
640 uint32_t start_trans_id, this_trans_id;
641 uint32_t start_block, this_block;
643 start_trans_id = this_trans_id = jbd_get32(sb, sequence);
644 start_block = this_block = jbd_get32(sb, start);
646 ext4_dbg(DEBUG_JBD, "Start of journal at trans id: %" PRIu32 "\n",
650 struct ext4_block block;
651 struct jbd_bhdr *header;
652 if (action != ACTION_SCAN)
653 if (this_trans_id > info->last_trans_id) {
658 r = jbd_block_get(jbd_fs, &block, this_block);
662 header = (struct jbd_bhdr *)block.data;
663 if (jbd_get32(header, magic) != JBD_MAGIC_NUMBER) {
664 jbd_block_set(jbd_fs, &block);
669 if (jbd_get32(header, sequence) != this_trans_id) {
670 if (action != ACTION_SCAN)
673 jbd_block_set(jbd_fs, &block);
678 switch (jbd_get32(header, blocktype)) {
679 case JBD_DESCRIPTOR_BLOCK:
680 ext4_dbg(DEBUG_JBD, "Descriptor block: %" PRIu32", "
681 "trans_id: %" PRIu32"\n",
682 this_block, this_trans_id);
683 if (action == ACTION_RECOVER) {
684 struct replay_arg replay_arg;
685 replay_arg.info = info;
686 replay_arg.this_block = &this_block;
687 replay_arg.this_trans_id = this_trans_id;
689 jbd_replay_descriptor_block(jbd_fs,
690 header, &replay_arg);
692 jbd_debug_descriptor_block(jbd_fs,
693 header, &this_block);
696 case JBD_COMMIT_BLOCK:
697 ext4_dbg(DEBUG_JBD, "Commit block: %" PRIu32", "
698 "trans_id: %" PRIu32"\n",
699 this_block, this_trans_id);
702 case JBD_REVOKE_BLOCK:
703 ext4_dbg(DEBUG_JBD, "Revoke block: %" PRIu32", "
704 "trans_id: %" PRIu32"\n",
705 this_block, this_trans_id);
706 if (action == ACTION_REVOKE) {
707 info->this_trans_id = this_trans_id;
708 jbd_build_revoke_tree(jbd_fs,
716 jbd_block_set(jbd_fs, &block);
718 wrap(sb, this_block);
719 if (this_block == start_block)
723 ext4_dbg(DEBUG_JBD, "End of journal.\n");
724 if (r == EOK && action == ACTION_SCAN) {
725 info->start_trans_id = start_trans_id;
726 if (this_trans_id > start_trans_id)
727 info->last_trans_id = this_trans_id - 1;
729 info->last_trans_id = this_trans_id;
735 int jbd_recover(struct jbd_fs *jbd_fs)
738 struct recover_info info;
739 struct jbd_sb *sb = &jbd_fs->sb;
743 RB_INIT(&info.revoke_root);
745 r = jbd_iterate_log(jbd_fs, &info, ACTION_SCAN);
749 r = jbd_iterate_log(jbd_fs, &info, ACTION_REVOKE);
753 r = jbd_iterate_log(jbd_fs, &info, ACTION_RECOVER);
755 uint32_t features_incompatible =
756 ext4_get32(&jbd_fs->inode_ref.fs->sb,
757 features_incompatible);
758 jbd_set32(&jbd_fs->sb, start, 0);
759 features_incompatible &= ~EXT4_FINCOM_RECOVER;
760 ext4_set32(&jbd_fs->inode_ref.fs->sb,
761 features_incompatible,
762 features_incompatible);
763 jbd_fs->dirty = true;
764 r = ext4_sb_write(jbd_fs->inode_ref.fs->bdev,
765 &jbd_fs->inode_ref.fs->sb);
767 jbd_destroy_revoke_tree(&info);
771 void jbd_journal_write_sb(struct jbd_journal *journal)
773 struct jbd_fs *jbd_fs = journal->jbd_fs;
774 jbd_set32(&jbd_fs->sb, start, journal->start);
775 jbd_set32(&jbd_fs->sb, sequence, journal->trans_id);
776 jbd_fs->dirty = true;
779 int jbd_journal_start(struct jbd_fs *jbd_fs,
780 struct jbd_journal *journal)
783 uint32_t features_incompatible =
784 ext4_get32(&jbd_fs->inode_ref.fs->sb,
785 features_incompatible);
786 features_incompatible |= EXT4_FINCOM_RECOVER;
787 ext4_set32(&jbd_fs->inode_ref.fs->sb,
788 features_incompatible,
789 features_incompatible);
790 r = ext4_sb_write(jbd_fs->inode_ref.fs->bdev,
791 &jbd_fs->inode_ref.fs->sb);
795 journal->first = jbd_get32(&jbd_fs->sb, first);
796 journal->start = journal->first;
797 journal->last = journal->first;
798 journal->trans_id = 1;
799 journal->alloc_trans_id = 1;
801 journal->block_size = jbd_get32(&jbd_fs->sb, blocksize);
803 TAILQ_INIT(&journal->trans_queue);
804 TAILQ_INIT(&journal->cp_queue);
805 journal->jbd_fs = jbd_fs;
806 jbd_journal_write_sb(journal);
807 return jbd_write_sb(jbd_fs);
810 int jbd_journal_stop(struct jbd_journal *journal)
813 struct jbd_fs *jbd_fs = journal->jbd_fs;
814 uint32_t features_incompatible =
815 ext4_get32(&jbd_fs->inode_ref.fs->sb,
816 features_incompatible);
817 features_incompatible &= ~EXT4_FINCOM_RECOVER;
818 ext4_set32(&jbd_fs->inode_ref.fs->sb,
819 features_incompatible,
820 features_incompatible);
821 r = ext4_sb_write(jbd_fs->inode_ref.fs->bdev,
822 &jbd_fs->inode_ref.fs->sb);
827 journal->trans_id = 0;
828 jbd_journal_write_sb(journal);
829 return jbd_write_sb(journal->jbd_fs);
832 static uint32_t jbd_journal_alloc_block(struct jbd_journal *journal,
833 struct jbd_trans *trans)
835 uint32_t start_block = journal->last++;
836 trans->alloc_blocks++;
837 wrap(&journal->jbd_fs->sb, journal->last);
842 jbd_journal_new_trans(struct jbd_journal *journal)
844 struct jbd_trans *trans = calloc(1, sizeof(struct jbd_trans));
848 /* We will assign a trans_id to this transaction,
849 * once it has been committed.*/
850 trans->journal = journal;
855 static void jbd_trans_end_write(struct ext4_bcache *bc __unused,
856 struct ext4_buf *buf __unused,
860 int jbd_trans_add_block(struct jbd_trans *trans,
861 struct ext4_block *block)
864 /* We do not need to add those unmodified buffer to
866 if (!ext4_bcache_test_flag(block->buf, BC_DIRTY))
869 buf = calloc(1, sizeof(struct jbd_buf));
875 ext4_bcache_inc_ref(block->buf);
877 block->buf->end_write = jbd_trans_end_write;
878 block->buf->end_write_arg = trans;
881 LIST_INSERT_HEAD(&trans->buf_list, buf, buf_node);
885 int jbd_trans_revoke_block(struct jbd_trans *trans,
888 struct jbd_revoke_rec *rec =
889 calloc(1, sizeof(struct jbd_revoke_rec));
894 LIST_INSERT_HEAD(&trans->revoke_list, rec, revoke_node);
898 void jbd_journal_free_trans(struct jbd_journal *journal,
899 struct jbd_trans *trans,
902 struct jbd_buf *jbd_buf, *tmp;
903 struct jbd_revoke_rec *rec, *tmp2;
904 struct ext4_fs *fs = journal->jbd_fs->inode_ref.fs;
905 LIST_FOREACH_SAFE(jbd_buf, &trans->buf_list, buf_node,
908 ext4_block_set(fs->bdev, &jbd_buf->block);
910 LIST_REMOVE(jbd_buf, buf_node);
913 LIST_FOREACH_SAFE(rec, &trans->revoke_list, revoke_node,
915 LIST_REMOVE(rec, revoke_node);
922 static int jbd_trans_write_commit_block(struct jbd_trans *trans)
925 struct jbd_commit_header *header;
926 uint32_t commit_iblock = 0;
927 struct ext4_block commit_block;
928 struct jbd_journal *journal = trans->journal;
930 commit_iblock = jbd_journal_alloc_block(journal, trans);
931 rc = jbd_block_get_noread(journal->jbd_fs,
932 &commit_block, commit_iblock);
936 header = (struct jbd_commit_header *)commit_block.data;
937 jbd_set32(&header->header, magic, JBD_MAGIC_NUMBER);
938 jbd_set32(&header->header, blocktype, JBD_COMMIT_BLOCK);
939 jbd_set32(&header->header, sequence, trans->trans_id);
941 ext4_bcache_set_dirty(commit_block.buf);
942 rc = jbd_block_set(journal->jbd_fs, &commit_block);
949 static int jbd_journal_prepare(struct jbd_journal *journal,
950 struct jbd_trans *trans)
953 int32_t tag_tbl_size;
954 uint32_t desc_iblock = 0;
955 uint32_t data_iblock = 0;
956 char *tag_start = NULL, *tag_ptr = NULL;
957 struct jbd_buf *jbd_buf;
958 struct ext4_block desc_block, data_block;
960 LIST_FOREACH(jbd_buf, &trans->buf_list, buf_node) {
961 struct tag_info tag_info;
962 bool uuid_exist = false;
965 struct jbd_bhdr *bhdr;
966 desc_iblock = jbd_journal_alloc_block(journal, trans);
967 rc = jbd_block_get_noread(journal->jbd_fs,
968 &desc_block, desc_iblock);
972 ext4_bcache_set_dirty(desc_block.buf);
974 bhdr = (struct jbd_bhdr *)desc_block.data;
975 jbd_set32(bhdr, magic, JBD_MAGIC_NUMBER);
976 jbd_set32(bhdr, blocktype, JBD_DESCRIPTOR_BLOCK);
977 jbd_set32(bhdr, sequence, trans->trans_id);
979 tag_start = (char *)(bhdr + 1);
982 tag_tbl_size = journal->block_size -
983 sizeof(struct jbd_bhdr);
985 if (!trans->start_iblock)
986 trans->start_iblock = desc_iblock;
989 tag_info.block = jbd_buf->block.lb_id;
990 tag_info.uuid_exist = uuid_exist;
991 if (i == trans->data_cnt - 1)
992 tag_info.last_tag = true;
995 memcpy(tag_info.uuid, journal->jbd_fs->sb.uuid,
998 rc = jbd_write_block_tag(journal->jbd_fs,
1003 jbd_block_set(journal->jbd_fs, &desc_block);
1008 data_iblock = jbd_journal_alloc_block(journal, trans);
1009 rc = jbd_block_get_noread(journal->jbd_fs,
1010 &data_block, data_iblock);
1014 ext4_bcache_set_dirty(data_block.buf);
1016 memcpy(data_block.data, jbd_buf->block.data,
1017 journal->block_size);
1019 rc = jbd_block_set(journal->jbd_fs, &data_block);
1023 tag_ptr += tag_info.tag_bytes;
1024 tag_tbl_size -= tag_info.tag_bytes;
1028 if (rc == EOK && desc_iblock)
1029 jbd_block_set(journal->jbd_fs, &desc_block);
1035 jbd_journal_prepare_revoke(struct jbd_journal *journal,
1036 struct jbd_trans *trans)
1038 int rc = EOK, i = 0;
1039 int32_t tag_tbl_size;
1040 uint32_t desc_iblock = 0;
1041 char *blocks_entry = NULL;
1042 struct jbd_revoke_rec *rec, *tmp;
1043 struct ext4_block desc_block;
1044 struct jbd_revoke_header *header = NULL;
1045 int32_t record_len = 4;
1047 if (JBD_HAS_INCOMPAT_FEATURE(&journal->jbd_fs->sb,
1048 JBD_FEATURE_INCOMPAT_64BIT))
1051 LIST_FOREACH_SAFE(rec, &trans->revoke_list, revoke_node,
1055 struct jbd_bhdr *bhdr;
1056 desc_iblock = jbd_journal_alloc_block(journal, trans);
1057 rc = jbd_block_get_noread(journal->jbd_fs,
1058 &desc_block, desc_iblock);
1063 ext4_bcache_set_dirty(desc_block.buf);
1065 bhdr = (struct jbd_bhdr *)desc_block.data;
1066 jbd_set32(bhdr, magic, JBD_MAGIC_NUMBER);
1067 jbd_set32(bhdr, blocktype, JBD_REVOKE_BLOCK);
1068 jbd_set32(bhdr, sequence, trans->trans_id);
1070 header = (struct jbd_revoke_header *)bhdr;
1071 blocks_entry = (char *)(header + 1);
1072 tag_tbl_size = journal->block_size -
1073 sizeof(struct jbd_revoke_header);
1075 if (!trans->start_iblock)
1076 trans->start_iblock = desc_iblock;
1080 if (tag_tbl_size < record_len) {
1081 jbd_set32(header, count,
1082 journal->block_size - tag_tbl_size);
1083 jbd_block_set(journal->jbd_fs, &desc_block);
1088 if (record_len == 8) {
1090 (uint64_t *)blocks_entry;
1091 *blocks = to_be64(rec->lba);
1094 (uint32_t *)blocks_entry;
1095 *blocks = to_be32(rec->lba);
1097 blocks_entry += record_len;
1098 tag_tbl_size -= record_len;
1102 if (rc == EOK && desc_iblock) {
1104 jbd_set32(header, count,
1105 journal->block_size - tag_tbl_size);
1107 jbd_block_set(journal->jbd_fs, &desc_block);
1114 jbd_journal_submit_trans(struct jbd_journal *journal,
1115 struct jbd_trans *trans)
1117 TAILQ_INSERT_TAIL(&journal->trans_queue,
1122 void jbd_journal_cp_trans(struct jbd_journal *journal, struct jbd_trans *trans)
1124 struct jbd_buf *jbd_buf, *tmp;
1125 struct ext4_fs *fs = journal->jbd_fs->inode_ref.fs;
1126 LIST_FOREACH_SAFE(jbd_buf, &trans->buf_list, buf_node,
1128 ext4_block_set(fs->bdev, &jbd_buf->block);
1132 static void jbd_trans_end_write(struct ext4_bcache *bc __unused,
1133 struct ext4_buf *buf __unused,
1137 struct jbd_trans *trans = arg;
1138 struct jbd_journal *journal = trans->journal;
1142 trans->written_cnt++;
1143 if (trans->written_cnt == trans->data_cnt) {
1145 TAILQ_REMOVE(&journal->cp_queue, trans, trans_node);
1146 journal->start = trans->start_iblock +
1147 trans->alloc_blocks;
1148 journal->trans_id = trans->trans_id + 1;
1149 jbd_journal_write_sb(journal);
1150 jbd_write_sb(journal->jbd_fs);
1151 jbd_journal_free_trans(journal, trans, false);
1153 if ((trans = TAILQ_FIRST(&journal->cp_queue))) {
1154 if (trans->data_cnt) {
1155 jbd_journal_cp_trans(journal, trans);
1164 * XXX: one should disable cache writeback first.
1166 void jbd_journal_commit_one(struct jbd_journal *journal)
1169 uint32_t last = journal->last;
1170 struct jbd_trans *trans;
1171 if ((trans = TAILQ_FIRST(&journal->trans_queue))) {
1172 TAILQ_REMOVE(&journal->trans_queue, trans, trans_node);
1174 trans->trans_id = journal->alloc_trans_id;
1175 rc = jbd_journal_prepare(journal, trans);
1179 rc = jbd_journal_prepare_revoke(journal, trans);
1183 rc = jbd_trans_write_commit_block(trans);
1187 journal->alloc_trans_id++;
1188 if (TAILQ_EMPTY(&journal->cp_queue)) {
1189 if (trans->data_cnt) {
1190 TAILQ_INSERT_TAIL(&journal->cp_queue, trans,
1192 jbd_journal_cp_trans(journal, trans);
1194 journal->start = trans->start_iblock +
1195 trans->alloc_blocks;
1196 journal->trans_id = trans->trans_id + 1;
1197 jbd_journal_write_sb(journal);
1198 jbd_write_sb(journal->jbd_fs);
1199 jbd_journal_free_trans(journal, trans, false);
1202 TAILQ_INSERT_TAIL(&journal->cp_queue, trans,
1207 journal->last = last;
1208 jbd_journal_free_trans(journal, trans, true);