summaryrefslogtreecommitdiff
path: root/fs/ocfs2/resize.c
diff options
context:
space:
mode:
authorTao Ma <tao.ma@oracle.com>2007-12-18 10:47:25 +0300
committerMark Fasheh <mark.fasheh@oracle.com>2008-01-26 02:04:24 +0300
commit7909f2bf835376a20d6dbf853eb459a27566eba2 (patch)
treed256570ad048e27741970517929bc3c6d4c2b656 /fs/ocfs2/resize.c
parentd659072f736837e56b6433d58e5315ad1d4d5ccf (diff)
downloadlinux-7909f2bf835376a20d6dbf853eb459a27566eba2.tar.xz
[PATCH 2/2] ocfs2: Implement group add for online resize
This patch adds the ability for a userspace program to request that a properly formatted cluster group be added to the main allocation bitmap for an Ocfs2 file system. The request is made via an ioctl, OCFS2_IOC_GROUP_ADD. On a high level, this is similar to ext3, but we use a different ioctl as the structure which has to be passed through is different. During an online resize, tunefs.ocfs2 will format any new cluster groups which must be added to complete the resize, and call OCFS2_IOC_GROUP_ADD on each one. Kernel verifies that the core cluster group information is valid and then does the work of linking it into the global allocation bitmap. Signed-off-by: Tao Ma <tao.ma@oracle.com> Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Diffstat (limited to 'fs/ocfs2/resize.c')
-rw-r--r--fs/ocfs2/resize.c245
1 files changed, 244 insertions, 1 deletions
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index 848f7293f4fc..7791309bb258 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -356,7 +356,7 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
}
mlog(0, "extend the last group at %llu, new clusters = %d\n",
- le64_to_cpu(group->bg_blkno), new_clusters);
+ (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters);
handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS);
if (IS_ERR(handle)) {
@@ -396,3 +396,246 @@ out:
mlog_exit_void();
return ret;
}
+
+static int ocfs2_check_new_group(struct inode *inode,
+ struct ocfs2_dinode *di,
+ struct ocfs2_new_group_input *input,
+ struct buffer_head *group_bh)
+{
+ int ret;
+ struct ocfs2_group_desc *gd;
+ u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc);
+ unsigned int max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) *
+ le16_to_cpu(di->id2.i_chain.cl_bpc);
+
+
+ gd = (struct ocfs2_group_desc *)group_bh->b_data;
+
+ ret = -EIO;
+ if (!OCFS2_IS_VALID_GROUP_DESC(gd))
+ mlog(ML_ERROR, "Group descriptor # %llu isn't valid.\n",
+ (unsigned long long)le64_to_cpu(gd->bg_blkno));
+ else if (di->i_blkno != gd->bg_parent_dinode)
+ mlog(ML_ERROR, "Group descriptor # %llu has bad parent "
+ "pointer (%llu, expected %llu)\n",
+ (unsigned long long)le64_to_cpu(gd->bg_blkno),
+ (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
+ (unsigned long long)le64_to_cpu(di->i_blkno));
+ else if (le16_to_cpu(gd->bg_bits) > max_bits)
+ mlog(ML_ERROR, "Group descriptor # %llu has bit count of %u\n",
+ (unsigned long long)le64_to_cpu(gd->bg_blkno),
+ le16_to_cpu(gd->bg_bits));
+ else if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits))
+ mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but "
+ "claims that %u are free\n",
+ (unsigned long long)le64_to_cpu(gd->bg_blkno),
+ le16_to_cpu(gd->bg_bits),
+ le16_to_cpu(gd->bg_free_bits_count));
+ else if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size)))
+ mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but "
+ "max bitmap bits of %u\n",
+ (unsigned long long)le64_to_cpu(gd->bg_blkno),
+ le16_to_cpu(gd->bg_bits),
+ 8 * le16_to_cpu(gd->bg_size));
+ else if (le16_to_cpu(gd->bg_chain) != input->chain)
+ mlog(ML_ERROR, "Group descriptor # %llu has bad chain %u "
+ "while input has %u set.\n",
+ (unsigned long long)le64_to_cpu(gd->bg_blkno),
+ le16_to_cpu(gd->bg_chain), input->chain);
+ else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc)
+ mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but "
+ "input has %u clusters set\n",
+ (unsigned long long)le64_to_cpu(gd->bg_blkno),
+ le16_to_cpu(gd->bg_bits), input->clusters);
+ else if (le16_to_cpu(gd->bg_free_bits_count) != input->frees * cl_bpc)
+ mlog(ML_ERROR, "Group descriptor # %llu has free bit count %u "
+ "but it should have %u set\n",
+ (unsigned long long)le64_to_cpu(gd->bg_blkno),
+ le16_to_cpu(gd->bg_bits),
+ input->frees * cl_bpc);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int ocfs2_verify_group_and_input(struct inode *inode,
+ struct ocfs2_dinode *di,
+ struct ocfs2_new_group_input *input,
+ struct buffer_head *group_bh)
+{
+ u16 cl_count = le16_to_cpu(di->id2.i_chain.cl_count);
+ u16 cl_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
+ u16 next_free = le16_to_cpu(di->id2.i_chain.cl_next_free_rec);
+ u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group);
+ u32 total_clusters = le32_to_cpu(di->i_clusters);
+ int ret = -EINVAL;
+
+ if (cluster < total_clusters)
+ mlog(ML_ERROR, "add a group which is in the current volume.\n");
+ else if (input->chain >= cl_count)
+ mlog(ML_ERROR, "input chain exceeds the limit.\n");
+ else if (next_free != cl_count && next_free != input->chain)
+ mlog(ML_ERROR,
+ "the add group should be in chain %u\n", next_free);
+ else if (total_clusters + input->clusters < total_clusters)
+ mlog(ML_ERROR, "add group's clusters overflow.\n");
+ else if (input->clusters > cl_cpg)
+ mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n");
+ else if (input->frees > input->clusters)
+ mlog(ML_ERROR, "the free cluster exceeds the total clusters\n");
+ else if (total_clusters % cl_cpg != 0)
+ mlog(ML_ERROR,
+ "the last group isn't full. Use group extend first.\n");
+ else if (input->group != ocfs2_which_cluster_group(inode, cluster))
+ mlog(ML_ERROR, "group blkno is invalid\n");
+ else if ((ret = ocfs2_check_new_group(inode, di, input, group_bh)))
+ mlog(ML_ERROR, "group descriptor check failed.\n");
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/* Add a new group descriptor to global_bitmap. */
+int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
+{
+ int ret;
+ handle_t *handle;
+ struct buffer_head *main_bm_bh = NULL;
+ struct inode *main_bm_inode = NULL;
+ struct ocfs2_dinode *fe = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *group_bh = NULL;
+ struct ocfs2_group_desc *group = NULL;
+ struct ocfs2_chain_list *cl;
+ struct ocfs2_chain_rec *cr;
+ u16 cl_bpc;
+
+ mlog_entry_void();
+
+ if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ return -EROFS;
+
+ main_bm_inode = ocfs2_get_system_file_inode(osb,
+ GLOBAL_BITMAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
+ if (!main_bm_inode) {
+ ret = -EINVAL;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ mutex_lock(&main_bm_inode->i_mutex);
+
+ ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_mutex;
+ }
+
+ fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
+
+ if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
+ ocfs2_group_bitmap_size(osb->sb) * 8) {
+ mlog(ML_ERROR, "The disk is too old and small."
+ " Force to do offline resize.");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = ocfs2_read_block(osb, input->group, &group_bh, 0, NULL);
+ if (ret < 0) {
+ mlog(ML_ERROR, "Can't read the group descriptor # %llu "
+ "from the device.", (unsigned long long)input->group);
+ goto out_unlock;
+ }
+
+ ocfs2_set_new_buffer_uptodate(inode, group_bh);
+
+ ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ mlog(0, "Add a new group %llu in chain = %u, length = %u\n",
+ (unsigned long long)input->group, input->chain, input->clusters);
+
+ handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
+ if (IS_ERR(handle)) {
+ mlog_errno(PTR_ERR(handle));
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
+ cl = &fe->id2.i_chain;
+ cr = &cl->cl_recs[input->chain];
+
+ ret = ocfs2_journal_access(handle, main_bm_inode, group_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ group = (struct ocfs2_group_desc *)group_bh->b_data;
+ group->bg_next_group = cr->c_blkno;
+
+ ret = ocfs2_journal_dirty(handle, group_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ ret = ocfs2_journal_access(handle, main_bm_inode, main_bm_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) {
+ le16_add_cpu(&cl->cl_next_free_rec, 1);
+ memset(cr, 0, sizeof(struct ocfs2_chain_rec));
+ }
+
+ cr->c_blkno = le64_to_cpu(input->group);
+ le32_add_cpu(&cr->c_total, input->clusters * cl_bpc);
+ le32_add_cpu(&cr->c_free, input->frees * cl_bpc);
+
+ le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc);
+ le32_add_cpu(&fe->id1.bitmap1.i_used,
+ (input->clusters - input->frees) * cl_bpc);
+ le32_add_cpu(&fe->i_clusters, input->clusters);
+
+ ocfs2_journal_dirty(handle, main_bm_bh);
+
+ spin_lock(&OCFS2_I(main_bm_inode)->ip_lock);
+ OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
+ le64_add_cpu(&fe->i_size, input->clusters << osb->s_clustersize_bits);
+ spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock);
+ i_size_write(main_bm_inode, le64_to_cpu(fe->i_size));
+
+ ocfs2_update_super_and_backups(main_bm_inode, input->clusters);
+
+out_commit:
+ ocfs2_commit_trans(osb, handle);
+out_unlock:
+ if (group_bh)
+ brelse(group_bh);
+
+ if (main_bm_bh)
+ brelse(main_bm_bh);
+
+ ocfs2_inode_unlock(main_bm_inode, 1);
+
+out_mutex:
+ mutex_unlock(&main_bm_inode->i_mutex);
+ iput(main_bm_inode);
+
+out:
+ mlog_exit_void();
+ return ret;
+}