diff options
author | Timothy Pearson <tpearson@raptorengineering.com> | 2017-08-23 14:45:25 -0500 |
---|---|---|
committer | Timothy Pearson <tpearson@raptorengineering.com> | 2017-08-23 14:45:25 -0500 |
commit | fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204 (patch) | |
tree | 22962a4387943edc841c72a4e636a068c66d58fd /fs/ocfs2 | |
download | ast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.zip ast2050-linux-kernel-fcbb27b0ec6dcbc5a5108cb8fb19eae64593d204.tar.gz |
Initial import of modified Linux 2.6.28 tree
Original upstream URL:
git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git | branch linux-2.6.28.y
Diffstat (limited to 'fs/ocfs2')
104 files changed, 65342 insertions, 0 deletions
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile new file mode 100644 index 0000000..589dcdf --- /dev/null +++ b/fs/ocfs2/Makefile @@ -0,0 +1,46 @@ +EXTRA_CFLAGS += -Ifs/ocfs2 + +EXTRA_CFLAGS += -DCATCH_BH_JBD_RACES + +obj-$(CONFIG_OCFS2_FS) += \ + ocfs2.o \ + ocfs2_stackglue.o + +obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_stack_o2cb.o +obj-$(CONFIG_OCFS2_FS_USERSPACE_CLUSTER) += ocfs2_stack_user.o + +ocfs2-objs := \ + alloc.o \ + aops.o \ + buffer_head_io.o \ + dcache.o \ + dir.o \ + dlmglue.o \ + export.o \ + extent_map.o \ + file.o \ + heartbeat.o \ + inode.o \ + ioctl.o \ + journal.o \ + localalloc.o \ + locks.o \ + mmap.o \ + namei.o \ + resize.o \ + slot_map.o \ + suballoc.o \ + super.o \ + symlink.o \ + sysfile.o \ + uptodate.o \ + ver.o \ + xattr.o + +ocfs2_stackglue-objs := stackglue.o +ocfs2_stack_o2cb-objs := stack_o2cb.o +ocfs2_stack_user-objs := stack_user.o + +# cluster/ is always needed when OCFS2_FS for masklog support +obj-$(CONFIG_OCFS2_FS) += cluster/ +obj-$(CONFIG_OCFS2_FS_O2CB) += dlm/ diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c new file mode 100644 index 0000000..0cc2deb --- /dev/null +++ b/fs/ocfs2/alloc.c @@ -0,0 +1,7104 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * alloc.c + * + * Extent allocs and frees + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/swap.h> + +#define MLOG_MASK_PREFIX ML_DISK_ALLOC +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "aops.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "inode.h" +#include "journal.h" +#include "localalloc.h" +#include "suballoc.h" +#include "sysfile.h" +#include "file.h" +#include "super.h" +#include "uptodate.h" + +#include "buffer_head_io.h" + + +/* + * Operations for a specific extent tree type. + * + * To implement an on-disk btree (extent tree) type in ocfs2, add + * an ocfs2_extent_tree_operations structure and the matching + * ocfs2_init_<thingy>_extent_tree() function. That's pretty much it + * for the allocation portion of the extent tree. + */ +struct ocfs2_extent_tree_operations { + /* + * last_eb_blk is the block number of the right most leaf extent + * block. Most on-disk structures containing an extent tree store + * this value for fast access. The ->eo_set_last_eb_blk() and + * ->eo_get_last_eb_blk() operations access this value. They are + * both required. + */ + void (*eo_set_last_eb_blk)(struct ocfs2_extent_tree *et, + u64 blkno); + u64 (*eo_get_last_eb_blk)(struct ocfs2_extent_tree *et); + + /* + * The on-disk structure usually keeps track of how many total + * clusters are stored in this extent tree. This function updates + * that value. new_clusters is the delta, and must be + * added to the total. Required. + */ + void (*eo_update_clusters)(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 new_clusters); + + /* + * If ->eo_insert_check() exists, it is called before rec is + * inserted into the extent tree. It is optional. + */ + int (*eo_insert_check)(struct inode *inode, + struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *rec); + int (*eo_sanity_check)(struct inode *inode, struct ocfs2_extent_tree *et); + + /* + * -------------------------------------------------------------- + * The remaining are internal to ocfs2_extent_tree and don't have + * accessor functions + */ + + /* + * ->eo_fill_root_el() takes et->et_object and sets et->et_root_el. + * It is required. + */ + void (*eo_fill_root_el)(struct ocfs2_extent_tree *et); + + /* + * ->eo_fill_max_leaf_clusters sets et->et_max_leaf_clusters if + * it exists. If it does not, et->et_max_leaf_clusters is set + * to 0 (unlimited). Optional. + */ + void (*eo_fill_max_leaf_clusters)(struct inode *inode, + struct ocfs2_extent_tree *et); +}; + + +/* + * Pre-declare ocfs2_dinode_et_ops so we can use it as a sanity check + * in the methods. + */ +static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et); +static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et, + u64 blkno); +static void ocfs2_dinode_update_clusters(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 clusters); +static int ocfs2_dinode_insert_check(struct inode *inode, + struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *rec); +static int ocfs2_dinode_sanity_check(struct inode *inode, + struct ocfs2_extent_tree *et); +static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et); +static struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = { + .eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk, + .eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk, + .eo_update_clusters = ocfs2_dinode_update_clusters, + .eo_insert_check = ocfs2_dinode_insert_check, + .eo_sanity_check = ocfs2_dinode_sanity_check, + .eo_fill_root_el = ocfs2_dinode_fill_root_el, +}; + +static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et, + u64 blkno) +{ + struct ocfs2_dinode *di = et->et_object; + + BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); + di->i_last_eb_blk = cpu_to_le64(blkno); +} + +static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et) +{ + struct ocfs2_dinode *di = et->et_object; + + BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); + return le64_to_cpu(di->i_last_eb_blk); +} + +static void ocfs2_dinode_update_clusters(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 clusters) +{ + struct ocfs2_dinode *di = et->et_object; + + le32_add_cpu(&di->i_clusters, clusters); + spin_lock(&OCFS2_I(inode)->ip_lock); + OCFS2_I(inode)->ip_clusters = le32_to_cpu(di->i_clusters); + spin_unlock(&OCFS2_I(inode)->ip_lock); +} + +static int ocfs2_dinode_insert_check(struct inode *inode, + struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *rec) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + BUG_ON(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL); + mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) && + (OCFS2_I(inode)->ip_clusters != rec->e_cpos), + "Device %s, asking for sparse allocation: inode %llu, " + "cpos %u, clusters %u\n", + osb->dev_str, + (unsigned long long)OCFS2_I(inode)->ip_blkno, + rec->e_cpos, + OCFS2_I(inode)->ip_clusters); + + return 0; +} + +static int ocfs2_dinode_sanity_check(struct inode *inode, + struct ocfs2_extent_tree *et) +{ + int ret = 0; + struct ocfs2_dinode *di; + + BUG_ON(et->et_ops != &ocfs2_dinode_et_ops); + + di = et->et_object; + if (!OCFS2_IS_VALID_DINODE(di)) { + ret = -EIO; + ocfs2_error(inode->i_sb, + "Inode %llu has invalid path root", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + } + + return ret; +} + +static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et) +{ + struct ocfs2_dinode *di = et->et_object; + + et->et_root_el = &di->id2.i_list; +} + + +static void ocfs2_xattr_value_fill_root_el(struct ocfs2_extent_tree *et) +{ + struct ocfs2_xattr_value_root *xv = et->et_object; + + et->et_root_el = &xv->xr_list; +} + +static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et, + u64 blkno) +{ + struct ocfs2_xattr_value_root *xv = + (struct ocfs2_xattr_value_root *)et->et_object; + + xv->xr_last_eb_blk = cpu_to_le64(blkno); +} + +static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et) +{ + struct ocfs2_xattr_value_root *xv = + (struct ocfs2_xattr_value_root *) et->et_object; + + return le64_to_cpu(xv->xr_last_eb_blk); +} + +static void ocfs2_xattr_value_update_clusters(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 clusters) +{ + struct ocfs2_xattr_value_root *xv = + (struct ocfs2_xattr_value_root *)et->et_object; + + le32_add_cpu(&xv->xr_clusters, clusters); +} + +static struct ocfs2_extent_tree_operations ocfs2_xattr_value_et_ops = { + .eo_set_last_eb_blk = ocfs2_xattr_value_set_last_eb_blk, + .eo_get_last_eb_blk = ocfs2_xattr_value_get_last_eb_blk, + .eo_update_clusters = ocfs2_xattr_value_update_clusters, + .eo_fill_root_el = ocfs2_xattr_value_fill_root_el, +}; + +static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et) +{ + struct ocfs2_xattr_block *xb = et->et_object; + + et->et_root_el = &xb->xb_attrs.xb_root.xt_list; +} + +static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct inode *inode, + struct ocfs2_extent_tree *et) +{ + et->et_max_leaf_clusters = + ocfs2_clusters_for_bytes(inode->i_sb, + OCFS2_MAX_XATTR_TREE_LEAF_SIZE); +} + +static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et, + u64 blkno) +{ + struct ocfs2_xattr_block *xb = et->et_object; + struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; + + xt->xt_last_eb_blk = cpu_to_le64(blkno); +} + +static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et) +{ + struct ocfs2_xattr_block *xb = et->et_object; + struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; + + return le64_to_cpu(xt->xt_last_eb_blk); +} + +static void ocfs2_xattr_tree_update_clusters(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 clusters) +{ + struct ocfs2_xattr_block *xb = et->et_object; + + le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, clusters); +} + +static struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = { + .eo_set_last_eb_blk = ocfs2_xattr_tree_set_last_eb_blk, + .eo_get_last_eb_blk = ocfs2_xattr_tree_get_last_eb_blk, + .eo_update_clusters = ocfs2_xattr_tree_update_clusters, + .eo_fill_root_el = ocfs2_xattr_tree_fill_root_el, + .eo_fill_max_leaf_clusters = ocfs2_xattr_tree_fill_max_leaf_clusters, +}; + +static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et, + struct inode *inode, + struct buffer_head *bh, + void *obj, + struct ocfs2_extent_tree_operations *ops) +{ + et->et_ops = ops; + et->et_root_bh = bh; + if (!obj) + obj = (void *)bh->b_data; + et->et_object = obj; + + et->et_ops->eo_fill_root_el(et); + if (!et->et_ops->eo_fill_max_leaf_clusters) + et->et_max_leaf_clusters = 0; + else + et->et_ops->eo_fill_max_leaf_clusters(inode, et); +} + +void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et, + struct inode *inode, + struct buffer_head *bh) +{ + __ocfs2_init_extent_tree(et, inode, bh, NULL, &ocfs2_dinode_et_ops); +} + +void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, + struct inode *inode, + struct buffer_head *bh) +{ + __ocfs2_init_extent_tree(et, inode, bh, NULL, + &ocfs2_xattr_tree_et_ops); +} + +void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, + struct inode *inode, + struct buffer_head *bh, + struct ocfs2_xattr_value_root *xv) +{ + __ocfs2_init_extent_tree(et, inode, bh, xv, + &ocfs2_xattr_value_et_ops); +} + +static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et, + u64 new_last_eb_blk) +{ + et->et_ops->eo_set_last_eb_blk(et, new_last_eb_blk); +} + +static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et) +{ + return et->et_ops->eo_get_last_eb_blk(et); +} + +static inline void ocfs2_et_update_clusters(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 clusters) +{ + et->et_ops->eo_update_clusters(inode, et, clusters); +} + +static inline int ocfs2_et_insert_check(struct inode *inode, + struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *rec) +{ + int ret = 0; + + if (et->et_ops->eo_insert_check) + ret = et->et_ops->eo_insert_check(inode, et, rec); + return ret; +} + +static inline int ocfs2_et_sanity_check(struct inode *inode, + struct ocfs2_extent_tree *et) +{ + int ret = 0; + + if (et->et_ops->eo_sanity_check) + ret = et->et_ops->eo_sanity_check(inode, et); + return ret; +} + +static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc); +static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, + struct ocfs2_extent_block *eb); + +/* + * Structures which describe a path through a btree, and functions to + * manipulate them. + * + * The idea here is to be as generic as possible with the tree + * manipulation code. + */ +struct ocfs2_path_item { + struct buffer_head *bh; + struct ocfs2_extent_list *el; +}; + +#define OCFS2_MAX_PATH_DEPTH 5 + +struct ocfs2_path { + int p_tree_depth; + struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH]; +}; + +#define path_root_bh(_path) ((_path)->p_node[0].bh) +#define path_root_el(_path) ((_path)->p_node[0].el) +#define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh) +#define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el) +#define path_num_items(_path) ((_path)->p_tree_depth + 1) + +/* + * Reset the actual path elements so that we can re-use the structure + * to build another path. Generally, this involves freeing the buffer + * heads. + */ +static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) +{ + int i, start = 0, depth = 0; + struct ocfs2_path_item *node; + + if (keep_root) + start = 1; + + for(i = start; i < path_num_items(path); i++) { + node = &path->p_node[i]; + + brelse(node->bh); + node->bh = NULL; + node->el = NULL; + } + + /* + * Tree depth may change during truncate, or insert. If we're + * keeping the root extent list, then make sure that our path + * structure reflects the proper depth. + */ + if (keep_root) + depth = le16_to_cpu(path_root_el(path)->l_tree_depth); + + path->p_tree_depth = depth; +} + +static void ocfs2_free_path(struct ocfs2_path *path) +{ + if (path) { + ocfs2_reinit_path(path, 0); + kfree(path); + } +} + +/* + * All the elements of src into dest. After this call, src could be freed + * without affecting dest. + * + * Both paths should have the same root. Any non-root elements of dest + * will be freed. + */ +static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src) +{ + int i; + + BUG_ON(path_root_bh(dest) != path_root_bh(src)); + BUG_ON(path_root_el(dest) != path_root_el(src)); + + ocfs2_reinit_path(dest, 1); + + for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { + dest->p_node[i].bh = src->p_node[i].bh; + dest->p_node[i].el = src->p_node[i].el; + + if (dest->p_node[i].bh) + get_bh(dest->p_node[i].bh); + } +} + +/* + * Make the *dest path the same as src and re-initialize src path to + * have a root only. + */ +static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src) +{ + int i; + + BUG_ON(path_root_bh(dest) != path_root_bh(src)); + + for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) { + brelse(dest->p_node[i].bh); + + dest->p_node[i].bh = src->p_node[i].bh; + dest->p_node[i].el = src->p_node[i].el; + + src->p_node[i].bh = NULL; + src->p_node[i].el = NULL; + } +} + +/* + * Insert an extent block at given index. + * + * This will not take an additional reference on eb_bh. + */ +static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index, + struct buffer_head *eb_bh) +{ + struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data; + + /* + * Right now, no root bh is an extent block, so this helps + * catch code errors with dinode trees. The assertion can be + * safely removed if we ever need to insert extent block + * structures at the root. + */ + BUG_ON(index == 0); + + path->p_node[index].bh = eb_bh; + path->p_node[index].el = &eb->h_list; +} + +static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, + struct ocfs2_extent_list *root_el) +{ + struct ocfs2_path *path; + + BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH); + + path = kzalloc(sizeof(*path), GFP_NOFS); + if (path) { + path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth); + get_bh(root_bh); + path_root_bh(path) = root_bh; + path_root_el(path) = root_el; + } + + return path; +} + +/* + * Convenience function to journal all components in a path. + */ +static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle, + struct ocfs2_path *path) +{ + int i, ret = 0; + + if (!path) + goto out; + + for(i = 0; i < path_num_items(path); i++) { + ret = ocfs2_journal_access(handle, inode, path->p_node[i].bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + +out: + return ret; +} + +/* + * Return the index of the extent record which contains cluster #v_cluster. + * -1 is returned if it was not found. + * + * Should work fine on interior and exterior nodes. + */ +int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster) +{ + int ret = -1; + int i; + struct ocfs2_extent_rec *rec; + u32 rec_end, rec_start, clusters; + + for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { + rec = &el->l_recs[i]; + + rec_start = le32_to_cpu(rec->e_cpos); + clusters = ocfs2_rec_clusters(el, rec); + + rec_end = rec_start + clusters; + + if (v_cluster >= rec_start && v_cluster < rec_end) { + ret = i; + break; + } + } + + return ret; +} + +enum ocfs2_contig_type { + CONTIG_NONE = 0, + CONTIG_LEFT, + CONTIG_RIGHT, + CONTIG_LEFTRIGHT, +}; + + +/* + * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and + * ocfs2_extent_contig only work properly against leaf nodes! + */ +static int ocfs2_block_extent_contig(struct super_block *sb, + struct ocfs2_extent_rec *ext, + u64 blkno) +{ + u64 blk_end = le64_to_cpu(ext->e_blkno); + + blk_end += ocfs2_clusters_to_blocks(sb, + le16_to_cpu(ext->e_leaf_clusters)); + + return blkno == blk_end; +} + +static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left, + struct ocfs2_extent_rec *right) +{ + u32 left_range; + + left_range = le32_to_cpu(left->e_cpos) + + le16_to_cpu(left->e_leaf_clusters); + + return (left_range == le32_to_cpu(right->e_cpos)); +} + +static enum ocfs2_contig_type + ocfs2_extent_contig(struct inode *inode, + struct ocfs2_extent_rec *ext, + struct ocfs2_extent_rec *insert_rec) +{ + u64 blkno = le64_to_cpu(insert_rec->e_blkno); + + /* + * Refuse to coalesce extent records with different flag + * fields - we don't want to mix unwritten extents with user + * data. + */ + if (ext->e_flags != insert_rec->e_flags) + return CONTIG_NONE; + + if (ocfs2_extents_adjacent(ext, insert_rec) && + ocfs2_block_extent_contig(inode->i_sb, ext, blkno)) + return CONTIG_RIGHT; + + blkno = le64_to_cpu(ext->e_blkno); + if (ocfs2_extents_adjacent(insert_rec, ext) && + ocfs2_block_extent_contig(inode->i_sb, insert_rec, blkno)) + return CONTIG_LEFT; + + return CONTIG_NONE; +} + +/* + * NOTE: We can have pretty much any combination of contiguousness and + * appending. + * + * The usefulness of APPEND_TAIL is more in that it lets us know that + * we'll have to update the path to that leaf. + */ +enum ocfs2_append_type { + APPEND_NONE = 0, + APPEND_TAIL, +}; + +enum ocfs2_split_type { + SPLIT_NONE = 0, + SPLIT_LEFT, + SPLIT_RIGHT, +}; + +struct ocfs2_insert_type { + enum ocfs2_split_type ins_split; + enum ocfs2_append_type ins_appending; + enum ocfs2_contig_type ins_contig; + int ins_contig_index; + int ins_tree_depth; +}; + +struct ocfs2_merge_ctxt { + enum ocfs2_contig_type c_contig_type; + int c_has_empty_extent; + int c_split_covers_rec; +}; + +/* + * How many free extents have we got before we need more meta data? + */ +int ocfs2_num_free_extents(struct ocfs2_super *osb, + struct inode *inode, + struct ocfs2_extent_tree *et) +{ + int retval; + struct ocfs2_extent_list *el = NULL; + struct ocfs2_extent_block *eb; + struct buffer_head *eb_bh = NULL; + u64 last_eb_blk = 0; + + mlog_entry_void(); + + el = et->et_root_el; + last_eb_blk = ocfs2_et_get_last_eb_blk(et); + + if (last_eb_blk) { + retval = ocfs2_read_block(inode, last_eb_blk, + &eb_bh); + if (retval < 0) { + mlog_errno(retval); + goto bail; + } + eb = (struct ocfs2_extent_block *) eb_bh->b_data; + el = &eb->h_list; + } + + BUG_ON(el->l_tree_depth != 0); + + retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec); +bail: + brelse(eb_bh); + + mlog_exit(retval); + return retval; +} + +/* expects array to already be allocated + * + * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and + * l_count for you + */ +static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, + handle_t *handle, + struct inode *inode, + int wanted, + struct ocfs2_alloc_context *meta_ac, + struct buffer_head *bhs[]) +{ + int count, status, i; + u16 suballoc_bit_start; + u32 num_got; + u64 first_blkno; + struct ocfs2_extent_block *eb; + + mlog_entry_void(); + + count = 0; + while (count < wanted) { + status = ocfs2_claim_metadata(osb, + handle, + meta_ac, + wanted - count, + &suballoc_bit_start, + &num_got, + &first_blkno); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + for(i = count; i < (num_got + count); i++) { + bhs[i] = sb_getblk(osb->sb, first_blkno); + if (bhs[i] == NULL) { + status = -EIO; + mlog_errno(status); + goto bail; + } + ocfs2_set_new_buffer_uptodate(inode, bhs[i]); + + status = ocfs2_journal_access(handle, inode, bhs[i], + OCFS2_JOURNAL_ACCESS_CREATE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + memset(bhs[i]->b_data, 0, osb->sb->s_blocksize); + eb = (struct ocfs2_extent_block *) bhs[i]->b_data; + /* Ok, setup the minimal stuff here. */ + strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); + eb->h_blkno = cpu_to_le64(first_blkno); + eb->h_fs_generation = cpu_to_le32(osb->fs_generation); + eb->h_suballoc_slot = cpu_to_le16(osb->slot_num); + eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); + eb->h_list.l_count = + cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb)); + + suballoc_bit_start++; + first_blkno++; + + /* We'll also be dirtied by the caller, so + * this isn't absolutely necessary. */ + status = ocfs2_journal_dirty(handle, bhs[i]); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + count += num_got; + } + + status = 0; +bail: + if (status < 0) { + for(i = 0; i < wanted; i++) { + brelse(bhs[i]); + bhs[i] = NULL; + } + } + mlog_exit(status); + return status; +} + +/* + * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth(). + * + * Returns the sum of the rightmost extent rec logical offset and + * cluster count. + * + * ocfs2_add_branch() uses this to determine what logical cluster + * value should be populated into the leftmost new branch records. + * + * ocfs2_shift_tree_depth() uses this to determine the # clusters + * value for the new topmost tree record. + */ +static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el) +{ + int i; + + i = le16_to_cpu(el->l_next_free_rec) - 1; + + return le32_to_cpu(el->l_recs[i].e_cpos) + + ocfs2_rec_clusters(el, &el->l_recs[i]); +} + +/* + * Add an entire tree branch to our inode. eb_bh is the extent block + * to start at, if we don't want to start the branch at the dinode + * structure. + * + * last_eb_bh is required as we have to update it's next_leaf pointer + * for the new last extent block. + * + * the new branch will be 'empty' in the sense that every block will + * contain a single record with cluster count == 0. + */ +static int ocfs2_add_branch(struct ocfs2_super *osb, + handle_t *handle, + struct inode *inode, + struct ocfs2_extent_tree *et, + struct buffer_head *eb_bh, + struct buffer_head **last_eb_bh, + struct ocfs2_alloc_context *meta_ac) +{ + int status, new_blocks, i; + u64 next_blkno, new_last_eb_blk; + struct buffer_head *bh; + struct buffer_head **new_eb_bhs = NULL; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *eb_el; + struct ocfs2_extent_list *el; + u32 new_cpos; + + mlog_entry_void(); + + BUG_ON(!last_eb_bh || !*last_eb_bh); + + if (eb_bh) { + eb = (struct ocfs2_extent_block *) eb_bh->b_data; + el = &eb->h_list; + } else + el = et->et_root_el; + + /* we never add a branch to a leaf. */ + BUG_ON(!el->l_tree_depth); + + new_blocks = le16_to_cpu(el->l_tree_depth); + + /* allocate the number of new eb blocks we need */ + new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *), + GFP_KERNEL); + if (!new_eb_bhs) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + status = ocfs2_create_new_meta_bhs(osb, handle, inode, new_blocks, + meta_ac, new_eb_bhs); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data; + new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list); + + /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be + * linked with the rest of the tree. + * conversly, new_eb_bhs[0] is the new bottommost leaf. + * + * when we leave the loop, new_last_eb_blk will point to the + * newest leaf, and next_blkno will point to the topmost extent + * block. */ + next_blkno = new_last_eb_blk = 0; + for(i = 0; i < new_blocks; i++) { + bh = new_eb_bhs[i]; + eb = (struct ocfs2_extent_block *) bh->b_data; + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); + status = -EIO; + goto bail; + } + eb_el = &eb->h_list; + + status = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + eb->h_next_leaf_blk = 0; + eb_el->l_tree_depth = cpu_to_le16(i); + eb_el->l_next_free_rec = cpu_to_le16(1); + /* + * This actually counts as an empty extent as + * c_clusters == 0 + */ + eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos); + eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno); + /* + * eb_el isn't always an interior node, but even leaf + * nodes want a zero'd flags and reserved field so + * this gets the whole 32 bits regardless of use. + */ + eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0); + if (!eb_el->l_tree_depth) + new_last_eb_blk = le64_to_cpu(eb->h_blkno); + + status = ocfs2_journal_dirty(handle, bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + next_blkno = le64_to_cpu(eb->h_blkno); + } + + /* This is a bit hairy. We want to update up to three blocks + * here without leaving any of them in an inconsistent state + * in case of error. We don't have to worry about + * journal_dirty erroring as it won't unless we've aborted the + * handle (in which case we would never be here) so reserving + * the write with journal_access is all we need to do. */ + status = ocfs2_journal_access(handle, inode, *last_eb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + status = ocfs2_journal_access(handle, inode, et->et_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + if (eb_bh) { + status = ocfs2_journal_access(handle, inode, eb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + /* Link the new branch into the rest of the tree (el will + * either be on the root_bh, or the extent block passed in. */ + i = le16_to_cpu(el->l_next_free_rec); + el->l_recs[i].e_blkno = cpu_to_le64(next_blkno); + el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); + el->l_recs[i].e_int_clusters = 0; + le16_add_cpu(&el->l_next_free_rec, 1); + + /* fe needs a new last extent block pointer, as does the + * next_leaf on the previously last-extent-block. */ + ocfs2_et_set_last_eb_blk(et, new_last_eb_blk); + + eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; + eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk); + + status = ocfs2_journal_dirty(handle, *last_eb_bh); + if (status < 0) + mlog_errno(status); + status = ocfs2_journal_dirty(handle, et->et_root_bh); + if (status < 0) + mlog_errno(status); + if (eb_bh) { + status = ocfs2_journal_dirty(handle, eb_bh); + if (status < 0) + mlog_errno(status); + } + + /* + * Some callers want to track the rightmost leaf so pass it + * back here. + */ + brelse(*last_eb_bh); + get_bh(new_eb_bhs[0]); + *last_eb_bh = new_eb_bhs[0]; + + status = 0; +bail: + if (new_eb_bhs) { + for (i = 0; i < new_blocks; i++) + brelse(new_eb_bhs[i]); + kfree(new_eb_bhs); + } + + mlog_exit(status); + return status; +} + +/* + * adds another level to the allocation tree. + * returns back the new extent block so you can add a branch to it + * after this call. + */ +static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, + handle_t *handle, + struct inode *inode, + struct ocfs2_extent_tree *et, + struct ocfs2_alloc_context *meta_ac, + struct buffer_head **ret_new_eb_bh) +{ + int status, i; + u32 new_clusters; + struct buffer_head *new_eb_bh = NULL; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *root_el; + struct ocfs2_extent_list *eb_el; + + mlog_entry_void(); + + status = ocfs2_create_new_meta_bhs(osb, handle, inode, 1, meta_ac, + &new_eb_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + eb = (struct ocfs2_extent_block *) new_eb_bh->b_data; + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); + status = -EIO; + goto bail; + } + + eb_el = &eb->h_list; + root_el = et->et_root_el; + + status = ocfs2_journal_access(handle, inode, new_eb_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + /* copy the root extent list data into the new extent block */ + eb_el->l_tree_depth = root_el->l_tree_depth; + eb_el->l_next_free_rec = root_el->l_next_free_rec; + for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++) + eb_el->l_recs[i] = root_el->l_recs[i]; + + status = ocfs2_journal_dirty(handle, new_eb_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_journal_access(handle, inode, et->et_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + new_clusters = ocfs2_sum_rightmost_rec(eb_el); + + /* update root_bh now */ + le16_add_cpu(&root_el->l_tree_depth, 1); + root_el->l_recs[0].e_cpos = 0; + root_el->l_recs[0].e_blkno = eb->h_blkno; + root_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters); + for (i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++) + memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); + root_el->l_next_free_rec = cpu_to_le16(1); + + /* If this is our 1st tree depth shift, then last_eb_blk + * becomes the allocated extent block */ + if (root_el->l_tree_depth == cpu_to_le16(1)) + ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); + + status = ocfs2_journal_dirty(handle, et->et_root_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + *ret_new_eb_bh = new_eb_bh; + new_eb_bh = NULL; + status = 0; +bail: + brelse(new_eb_bh); + + mlog_exit(status); + return status; +} + +/* + * Should only be called when there is no space left in any of the + * leaf nodes. What we want to do is find the lowest tree depth + * non-leaf extent block with room for new records. There are three + * valid results of this search: + * + * 1) a lowest extent block is found, then we pass it back in + * *lowest_eb_bh and return '0' + * + * 2) the search fails to find anything, but the root_el has room. We + * pass NULL back in *lowest_eb_bh, but still return '0' + * + * 3) the search fails to find anything AND the root_el is full, in + * which case we return > 0 + * + * return status < 0 indicates an error. + */ +static int ocfs2_find_branch_target(struct ocfs2_super *osb, + struct inode *inode, + struct ocfs2_extent_tree *et, + struct buffer_head **target_bh) +{ + int status = 0, i; + u64 blkno; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *el; + struct buffer_head *bh = NULL; + struct buffer_head *lowest_bh = NULL; + + mlog_entry_void(); + + *target_bh = NULL; + + el = et->et_root_el; + + while(le16_to_cpu(el->l_tree_depth) > 1) { + if (le16_to_cpu(el->l_next_free_rec) == 0) { + ocfs2_error(inode->i_sb, "Dinode %llu has empty " + "extent list (next_free_rec == 0)", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + status = -EIO; + goto bail; + } + i = le16_to_cpu(el->l_next_free_rec) - 1; + blkno = le64_to_cpu(el->l_recs[i].e_blkno); + if (!blkno) { + ocfs2_error(inode->i_sb, "Dinode %llu has extent " + "list where extent # %d has no physical " + "block start", + (unsigned long long)OCFS2_I(inode)->ip_blkno, i); + status = -EIO; + goto bail; + } + + brelse(bh); + bh = NULL; + + status = ocfs2_read_block(inode, blkno, &bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + eb = (struct ocfs2_extent_block *) bh->b_data; + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); + status = -EIO; + goto bail; + } + el = &eb->h_list; + + if (le16_to_cpu(el->l_next_free_rec) < + le16_to_cpu(el->l_count)) { + brelse(lowest_bh); + lowest_bh = bh; + get_bh(lowest_bh); + } + } + + /* If we didn't find one and the fe doesn't have any room, + * then return '1' */ + el = et->et_root_el; + if (!lowest_bh && (el->l_next_free_rec == el->l_count)) + status = 1; + + *target_bh = lowest_bh; +bail: + brelse(bh); + + mlog_exit(status); + return status; +} + +/* + * Grow a b-tree so that it has more records. + * + * We might shift the tree depth in which case existing paths should + * be considered invalid. + * + * Tree depth after the grow is returned via *final_depth. + * + * *last_eb_bh will be updated by ocfs2_add_branch(). + */ +static int ocfs2_grow_tree(struct inode *inode, handle_t *handle, + struct ocfs2_extent_tree *et, int *final_depth, + struct buffer_head **last_eb_bh, + struct ocfs2_alloc_context *meta_ac) +{ + int ret, shift; + struct ocfs2_extent_list *el = et->et_root_el; + int depth = le16_to_cpu(el->l_tree_depth); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *bh = NULL; + + BUG_ON(meta_ac == NULL); + + shift = ocfs2_find_branch_target(osb, inode, et, &bh); + if (shift < 0) { + ret = shift; + mlog_errno(ret); + goto out; + } + + /* We traveled all the way to the bottom of the allocation tree + * and didn't find room for any more extents - we need to add + * another tree level */ + if (shift) { + BUG_ON(bh); + mlog(0, "need to shift tree depth (current = %d)\n", depth); + + /* ocfs2_shift_tree_depth will return us a buffer with + * the new extent block (so we can pass that to + * ocfs2_add_branch). */ + ret = ocfs2_shift_tree_depth(osb, handle, inode, et, + meta_ac, &bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + depth++; + if (depth == 1) { + /* + * Special case: we have room now if we shifted from + * tree_depth 0, so no more work needs to be done. + * + * We won't be calling add_branch, so pass + * back *last_eb_bh as the new leaf. At depth + * zero, it should always be null so there's + * no reason to brelse. + */ + BUG_ON(*last_eb_bh); + get_bh(bh); + *last_eb_bh = bh; + goto out; + } + } + + /* call ocfs2_add_branch to add the final part of the tree with + * the new data. */ + mlog(0, "add branch. bh = %p\n", bh); + ret = ocfs2_add_branch(osb, handle, inode, et, bh, last_eb_bh, + meta_ac); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + +out: + if (final_depth) + *final_depth = depth; + brelse(bh); + return ret; +} + +/* + * This function will discard the rightmost extent record. + */ +static void ocfs2_shift_records_right(struct ocfs2_extent_list *el) +{ + int next_free = le16_to_cpu(el->l_next_free_rec); + int count = le16_to_cpu(el->l_count); + unsigned int num_bytes; + + BUG_ON(!next_free); + /* This will cause us to go off the end of our extent list. */ + BUG_ON(next_free >= count); + + num_bytes = sizeof(struct ocfs2_extent_rec) * next_free; + + memmove(&el->l_recs[1], &el->l_recs[0], num_bytes); +} + +static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el, + struct ocfs2_extent_rec *insert_rec) +{ + int i, insert_index, next_free, has_empty, num_bytes; + u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos); + struct ocfs2_extent_rec *rec; + + next_free = le16_to_cpu(el->l_next_free_rec); + has_empty = ocfs2_is_empty_extent(&el->l_recs[0]); + + BUG_ON(!next_free); + + /* The tree code before us didn't allow enough room in the leaf. */ + BUG_ON(el->l_next_free_rec == el->l_count && !has_empty); + + /* + * The easiest way to approach this is to just remove the + * empty extent and temporarily decrement next_free. + */ + if (has_empty) { + /* + * If next_free was 1 (only an empty extent), this + * loop won't execute, which is fine. We still want + * the decrement above to happen. + */ + for(i = 0; i < (next_free - 1); i++) + el->l_recs[i] = el->l_recs[i+1]; + + next_free--; + } + + /* + * Figure out what the new record index should be. + */ + for(i = 0; i < next_free; i++) { + rec = &el->l_recs[i]; + + if (insert_cpos < le32_to_cpu(rec->e_cpos)) + break; + } + insert_index = i; + + mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n", + insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count)); + + BUG_ON(insert_index < 0); + BUG_ON(insert_index >= le16_to_cpu(el->l_count)); + BUG_ON(insert_index > next_free); + + /* + * No need to memmove if we're just adding to the tail. + */ + if (insert_index != next_free) { + BUG_ON(next_free >= le16_to_cpu(el->l_count)); + + num_bytes = next_free - insert_index; + num_bytes *= sizeof(struct ocfs2_extent_rec); + memmove(&el->l_recs[insert_index + 1], + &el->l_recs[insert_index], + num_bytes); + } + + /* + * Either we had an empty extent, and need to re-increment or + * there was no empty extent on a non full rightmost leaf node, + * in which case we still need to increment. + */ + next_free++; + el->l_next_free_rec = cpu_to_le16(next_free); + /* + * Make sure none of the math above just messed up our tree. + */ + BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)); + + el->l_recs[insert_index] = *insert_rec; + +} + +static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el) +{ + int size, num_recs = le16_to_cpu(el->l_next_free_rec); + + BUG_ON(num_recs == 0); + + if (ocfs2_is_empty_extent(&el->l_recs[0])) { + num_recs--; + size = num_recs * sizeof(struct ocfs2_extent_rec); + memmove(&el->l_recs[0], &el->l_recs[1], size); + memset(&el->l_recs[num_recs], 0, + sizeof(struct ocfs2_extent_rec)); + el->l_next_free_rec = cpu_to_le16(num_recs); + } +} + +/* + * Create an empty extent record . + * + * l_next_free_rec may be updated. + * + * If an empty extent already exists do nothing. + */ +static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el) +{ + int next_free = le16_to_cpu(el->l_next_free_rec); + + BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); + + if (next_free == 0) + goto set_and_inc; + + if (ocfs2_is_empty_extent(&el->l_recs[0])) + return; + + mlog_bug_on_msg(el->l_count == el->l_next_free_rec, + "Asked to create an empty extent in a full list:\n" + "count = %u, tree depth = %u", + le16_to_cpu(el->l_count), + le16_to_cpu(el->l_tree_depth)); + + ocfs2_shift_records_right(el); + +set_and_inc: + le16_add_cpu(&el->l_next_free_rec, 1); + memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); +} + +/* + * For a rotation which involves two leaf nodes, the "root node" is + * the lowest level tree node which contains a path to both leafs. This + * resulting set of information can be used to form a complete "subtree" + * + * This function is passed two full paths from the dinode down to a + * pair of adjacent leaves. It's task is to figure out which path + * index contains the subtree root - this can be the root index itself + * in a worst-case rotation. + * + * The array index of the subtree root is passed back. + */ +static int ocfs2_find_subtree_root(struct inode *inode, + struct ocfs2_path *left, + struct ocfs2_path *right) +{ + int i = 0; + + /* + * Check that the caller passed in two paths from the same tree. + */ + BUG_ON(path_root_bh(left) != path_root_bh(right)); + + do { + i++; + + /* + * The caller didn't pass two adjacent paths. + */ + mlog_bug_on_msg(i > left->p_tree_depth, + "Inode %lu, left depth %u, right depth %u\n" + "left leaf blk %llu, right leaf blk %llu\n", + inode->i_ino, left->p_tree_depth, + right->p_tree_depth, + (unsigned long long)path_leaf_bh(left)->b_blocknr, + (unsigned long long)path_leaf_bh(right)->b_blocknr); + } while (left->p_node[i].bh->b_blocknr == + right->p_node[i].bh->b_blocknr); + + return i - 1; +} + +typedef void (path_insert_t)(void *, struct buffer_head *); + +/* + * Traverse a btree path in search of cpos, starting at root_el. + * + * This code can be called with a cpos larger than the tree, in which + * case it will return the rightmost path. + */ +static int __ocfs2_find_path(struct inode *inode, + struct ocfs2_extent_list *root_el, u32 cpos, + path_insert_t *func, void *data) +{ + int i, ret = 0; + u32 range; + u64 blkno; + struct buffer_head *bh = NULL; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *el; + struct ocfs2_extent_rec *rec; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + el = root_el; + while (el->l_tree_depth) { + if (le16_to_cpu(el->l_next_free_rec) == 0) { + ocfs2_error(inode->i_sb, + "Inode %llu has empty extent list at " + "depth %u\n", + (unsigned long long)oi->ip_blkno, + le16_to_cpu(el->l_tree_depth)); + ret = -EROFS; + goto out; + + } + + for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) { + rec = &el->l_recs[i]; + + /* + * In the case that cpos is off the allocation + * tree, this should just wind up returning the + * rightmost record. + */ + range = le32_to_cpu(rec->e_cpos) + + ocfs2_rec_clusters(el, rec); + if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) + break; + } + + blkno = le64_to_cpu(el->l_recs[i].e_blkno); + if (blkno == 0) { + ocfs2_error(inode->i_sb, + "Inode %llu has bad blkno in extent list " + "at depth %u (index %d)\n", + (unsigned long long)oi->ip_blkno, + le16_to_cpu(el->l_tree_depth), i); + ret = -EROFS; + goto out; + } + + brelse(bh); + bh = NULL; + ret = ocfs2_read_block(inode, blkno, &bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) bh->b_data; + el = &eb->h_list; + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); + ret = -EIO; + goto out; + } + + if (le16_to_cpu(el->l_next_free_rec) > + le16_to_cpu(el->l_count)) { + ocfs2_error(inode->i_sb, + "Inode %llu has bad count in extent list " + "at block %llu (next free=%u, count=%u)\n", + (unsigned long long)oi->ip_blkno, + (unsigned long long)bh->b_blocknr, + le16_to_cpu(el->l_next_free_rec), + le16_to_cpu(el->l_count)); + ret = -EROFS; + goto out; + } + + if (func) + func(data, bh); + } + +out: + /* + * Catch any trailing bh that the loop didn't handle. + */ + brelse(bh); + + return ret; +} + +/* + * Given an initialized path (that is, it has a valid root extent + * list), this function will traverse the btree in search of the path + * which would contain cpos. + * + * The path traveled is recorded in the path structure. + * + * Note that this will not do any comparisons on leaf node extent + * records, so it will work fine in the case that we just added a tree + * branch. + */ +struct find_path_data { + int index; + struct ocfs2_path *path; +}; +static void find_path_ins(void *data, struct buffer_head *bh) +{ + struct find_path_data *fp = data; + + get_bh(bh); + ocfs2_path_insert_eb(fp->path, fp->index, bh); + fp->index++; +} +static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path, + u32 cpos) +{ + struct find_path_data data; + + data.index = 1; + data.path = path; + return __ocfs2_find_path(inode, path_root_el(path), cpos, + find_path_ins, &data); +} + +static void find_leaf_ins(void *data, struct buffer_head *bh) +{ + struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data; + struct ocfs2_extent_list *el = &eb->h_list; + struct buffer_head **ret = data; + + /* We want to retain only the leaf block. */ + if (le16_to_cpu(el->l_tree_depth) == 0) { + get_bh(bh); + *ret = bh; + } +} +/* + * Find the leaf block in the tree which would contain cpos. No + * checking of the actual leaf is done. + * + * Some paths want to call this instead of allocating a path structure + * and calling ocfs2_find_path(). + * + * This function doesn't handle non btree extent lists. + */ +int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el, + u32 cpos, struct buffer_head **leaf_bh) +{ + int ret; + struct buffer_head *bh = NULL; + + ret = __ocfs2_find_path(inode, root_el, cpos, find_leaf_ins, &bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + *leaf_bh = bh; +out: + return ret; +} + +/* + * Adjust the adjacent records (left_rec, right_rec) involved in a rotation. + * + * Basically, we've moved stuff around at the bottom of the tree and + * we need to fix up the extent records above the changes to reflect + * the new changes. + * + * left_rec: the record on the left. + * left_child_el: is the child list pointed to by left_rec + * right_rec: the record to the right of left_rec + * right_child_el: is the child list pointed to by right_rec + * + * By definition, this only works on interior nodes. + */ +static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec, + struct ocfs2_extent_list *left_child_el, + struct ocfs2_extent_rec *right_rec, + struct ocfs2_extent_list *right_child_el) +{ + u32 left_clusters, right_end; + + /* + * Interior nodes never have holes. Their cpos is the cpos of + * the leftmost record in their child list. Their cluster + * count covers the full theoretical range of their child list + * - the range between their cpos and the cpos of the record + * immediately to their right. + */ + left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos); + if (ocfs2_is_empty_extent(&right_child_el->l_recs[0])) { + BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1); + left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos); + } + left_clusters -= le32_to_cpu(left_rec->e_cpos); + left_rec->e_int_clusters = cpu_to_le32(left_clusters); + + /* + * Calculate the rightmost cluster count boundary before + * moving cpos - we will need to adjust clusters after + * updating e_cpos to keep the same highest cluster count. + */ + right_end = le32_to_cpu(right_rec->e_cpos); + right_end += le32_to_cpu(right_rec->e_int_clusters); + + right_rec->e_cpos = left_rec->e_cpos; + le32_add_cpu(&right_rec->e_cpos, left_clusters); + + right_end -= le32_to_cpu(right_rec->e_cpos); + right_rec->e_int_clusters = cpu_to_le32(right_end); +} + +/* + * Adjust the adjacent root node records involved in a + * rotation. left_el_blkno is passed in as a key so that we can easily + * find it's index in the root list. + */ +static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el, + struct ocfs2_extent_list *left_el, + struct ocfs2_extent_list *right_el, + u64 left_el_blkno) +{ + int i; + + BUG_ON(le16_to_cpu(root_el->l_tree_depth) <= + le16_to_cpu(left_el->l_tree_depth)); + + for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) { + if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno) + break; + } + + /* + * The path walking code should have never returned a root and + * two paths which are not adjacent. + */ + BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1)); + + ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el, + &root_el->l_recs[i + 1], right_el); +} + +/* + * We've changed a leaf block (in right_path) and need to reflect that + * change back up the subtree. + * + * This happens in multiple places: + * - When we've moved an extent record from the left path leaf to the right + * path leaf to make room for an empty extent in the left path leaf. + * - When our insert into the right path leaf is at the leftmost edge + * and requires an update of the path immediately to it's left. This + * can occur at the end of some types of rotation and appending inserts. + * - When we've adjusted the last extent record in the left path leaf and the + * 1st extent record in the right path leaf during cross extent block merge. + */ +static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle, + struct ocfs2_path *left_path, + struct ocfs2_path *right_path, + int subtree_index) +{ + int ret, i, idx; + struct ocfs2_extent_list *el, *left_el, *right_el; + struct ocfs2_extent_rec *left_rec, *right_rec; + struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; + + /* + * Update the counts and position values within all the + * interior nodes to reflect the leaf rotation we just did. + * + * The root node is handled below the loop. + * + * We begin the loop with right_el and left_el pointing to the + * leaf lists and work our way up. + * + * NOTE: within this loop, left_el and right_el always refer + * to the *child* lists. + */ + left_el = path_leaf_el(left_path); + right_el = path_leaf_el(right_path); + for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) { + mlog(0, "Adjust records at index %u\n", i); + + /* + * One nice property of knowing that all of these + * nodes are below the root is that we only deal with + * the leftmost right node record and the rightmost + * left node record. + */ + el = left_path->p_node[i].el; + idx = le16_to_cpu(left_el->l_next_free_rec) - 1; + left_rec = &el->l_recs[idx]; + + el = right_path->p_node[i].el; + right_rec = &el->l_recs[0]; + + ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec, + right_el); + + ret = ocfs2_journal_dirty(handle, left_path->p_node[i].bh); + if (ret) + mlog_errno(ret); + + ret = ocfs2_journal_dirty(handle, right_path->p_node[i].bh); + if (ret) + mlog_errno(ret); + + /* + * Setup our list pointers now so that the current + * parents become children in the next iteration. + */ + left_el = left_path->p_node[i].el; + right_el = right_path->p_node[i].el; + } + + /* + * At the root node, adjust the two adjacent records which + * begin our path to the leaves. + */ + + el = left_path->p_node[subtree_index].el; + left_el = left_path->p_node[subtree_index + 1].el; + right_el = right_path->p_node[subtree_index + 1].el; + + ocfs2_adjust_root_records(el, left_el, right_el, + left_path->p_node[subtree_index + 1].bh->b_blocknr); + + root_bh = left_path->p_node[subtree_index].bh; + + ret = ocfs2_journal_dirty(handle, root_bh); + if (ret) + mlog_errno(ret); +} + +static int ocfs2_rotate_subtree_right(struct inode *inode, + handle_t *handle, + struct ocfs2_path *left_path, + struct ocfs2_path *right_path, + int subtree_index) +{ + int ret, i; + struct buffer_head *right_leaf_bh; + struct buffer_head *left_leaf_bh = NULL; + struct buffer_head *root_bh; + struct ocfs2_extent_list *right_el, *left_el; + struct ocfs2_extent_rec move_rec; + + left_leaf_bh = path_leaf_bh(left_path); + left_el = path_leaf_el(left_path); + + if (left_el->l_next_free_rec != left_el->l_count) { + ocfs2_error(inode->i_sb, + "Inode %llu has non-full interior leaf node %llu" + "(next free = %u)", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)left_leaf_bh->b_blocknr, + le16_to_cpu(left_el->l_next_free_rec)); + return -EROFS; + } + + /* + * This extent block may already have an empty record, so we + * return early if so. + */ + if (ocfs2_is_empty_extent(&left_el->l_recs[0])) + return 0; + + root_bh = left_path->p_node[subtree_index].bh; + BUG_ON(root_bh != right_path->p_node[subtree_index].bh); + + ret = ocfs2_journal_access(handle, inode, root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + for(i = subtree_index + 1; i < path_num_items(right_path); i++) { + ret = ocfs2_journal_access(handle, inode, + right_path->p_node[i].bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, + left_path->p_node[i].bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + right_leaf_bh = path_leaf_bh(right_path); + right_el = path_leaf_el(right_path); + + /* This is a code error, not a disk corruption. */ + mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails " + "because rightmost leaf block %llu is empty\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)right_leaf_bh->b_blocknr); + + ocfs2_create_empty_extent(right_el); + + ret = ocfs2_journal_dirty(handle, right_leaf_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* Do the copy now. */ + i = le16_to_cpu(left_el->l_next_free_rec) - 1; + move_rec = left_el->l_recs[i]; + right_el->l_recs[0] = move_rec; + + /* + * Clear out the record we just copied and shift everything + * over, leaving an empty extent in the left leaf. + * + * We temporarily subtract from next_free_rec so that the + * shift will lose the tail record (which is now defunct). + */ + le16_add_cpu(&left_el->l_next_free_rec, -1); + ocfs2_shift_records_right(left_el); + memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); + le16_add_cpu(&left_el->l_next_free_rec, 1); + + ret = ocfs2_journal_dirty(handle, left_leaf_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_complete_edge_insert(inode, handle, left_path, right_path, + subtree_index); + +out: + return ret; +} + +/* + * Given a full path, determine what cpos value would return us a path + * containing the leaf immediately to the left of the current one. + * + * Will return zero if the path passed in is already the leftmost path. + */ +static int ocfs2_find_cpos_for_left_leaf(struct super_block *sb, + struct ocfs2_path *path, u32 *cpos) +{ + int i, j, ret = 0; + u64 blkno; + struct ocfs2_extent_list *el; + + BUG_ON(path->p_tree_depth == 0); + + *cpos = 0; + + blkno = path_leaf_bh(path)->b_blocknr; + + /* Start at the tree node just above the leaf and work our way up. */ + i = path->p_tree_depth - 1; + while (i >= 0) { + el = path->p_node[i].el; + + /* + * Find the extent record just before the one in our + * path. + */ + for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { + if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { + if (j == 0) { + if (i == 0) { + /* + * We've determined that the + * path specified is already + * the leftmost one - return a + * cpos of zero. + */ + goto out; + } + /* + * The leftmost record points to our + * leaf - we need to travel up the + * tree one level. + */ + goto next_node; + } + + *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos); + *cpos = *cpos + ocfs2_rec_clusters(el, + &el->l_recs[j - 1]); + *cpos = *cpos - 1; + goto out; + } + } + + /* + * If we got here, we never found a valid node where + * the tree indicated one should be. + */ + ocfs2_error(sb, + "Invalid extent tree at extent block %llu\n", + (unsigned long long)blkno); + ret = -EROFS; + goto out; + +next_node: + blkno = path->p_node[i].bh->b_blocknr; + i--; + } + +out: + return ret; +} + +/* + * Extend the transaction by enough credits to complete the rotation, + * and still leave at least the original number of credits allocated + * to this transaction. + */ +static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth, + int op_credits, + struct ocfs2_path *path) +{ + int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits; + + if (handle->h_buffer_credits < credits) + return ocfs2_extend_trans(handle, credits); + + return 0; +} + +/* + * Trap the case where we're inserting into the theoretical range past + * the _actual_ left leaf range. Otherwise, we'll rotate a record + * whose cpos is less than ours into the right leaf. + * + * It's only necessary to look at the rightmost record of the left + * leaf because the logic that calls us should ensure that the + * theoretical ranges in the path components above the leaves are + * correct. + */ +static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path, + u32 insert_cpos) +{ + struct ocfs2_extent_list *left_el; + struct ocfs2_extent_rec *rec; + int next_free; + + left_el = path_leaf_el(left_path); + next_free = le16_to_cpu(left_el->l_next_free_rec); + rec = &left_el->l_recs[next_free - 1]; + + if (insert_cpos > le32_to_cpu(rec->e_cpos)) + return 1; + return 0; +} + +static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos) +{ + int next_free = le16_to_cpu(el->l_next_free_rec); + unsigned int range; + struct ocfs2_extent_rec *rec; + + if (next_free == 0) + return 0; + + rec = &el->l_recs[0]; + if (ocfs2_is_empty_extent(rec)) { + /* Empty list. */ + if (next_free == 1) + return 0; + rec = &el->l_recs[1]; + } + + range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); + if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range) + return 1; + return 0; +} + +/* + * Rotate all the records in a btree right one record, starting at insert_cpos. + * + * The path to the rightmost leaf should be passed in. + * + * The array is assumed to be large enough to hold an entire path (tree depth). + * + * Upon succesful return from this function: + * + * - The 'right_path' array will contain a path to the leaf block + * whose range contains e_cpos. + * - That leaf block will have a single empty extent in list index 0. + * - In the case that the rotation requires a post-insert update, + * *ret_left_path will contain a valid path which can be passed to + * ocfs2_insert_path(). + */ +static int ocfs2_rotate_tree_right(struct inode *inode, + handle_t *handle, + enum ocfs2_split_type split, + u32 insert_cpos, + struct ocfs2_path *right_path, + struct ocfs2_path **ret_left_path) +{ + int ret, start, orig_credits = handle->h_buffer_credits; + u32 cpos; + struct ocfs2_path *left_path = NULL; + + *ret_left_path = NULL; + + left_path = ocfs2_new_path(path_root_bh(right_path), + path_root_el(right_path)); + if (!left_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, &cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos); + + /* + * What we want to do here is: + * + * 1) Start with the rightmost path. + * + * 2) Determine a path to the leaf block directly to the left + * of that leaf. + * + * 3) Determine the 'subtree root' - the lowest level tree node + * which contains a path to both leaves. + * + * 4) Rotate the subtree. + * + * 5) Find the next subtree by considering the left path to be + * the new right path. + * + * The check at the top of this while loop also accepts + * insert_cpos == cpos because cpos is only a _theoretical_ + * value to get us the left path - insert_cpos might very well + * be filling that hole. + * + * Stop at a cpos of '0' because we either started at the + * leftmost branch (i.e., a tree with one branch and a + * rotation inside of it), or we've gone as far as we can in + * rotating subtrees. + */ + while (cpos && insert_cpos <= cpos) { + mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n", + insert_cpos, cpos); + + ret = ocfs2_find_path(inode, left_path, cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + mlog_bug_on_msg(path_leaf_bh(left_path) == + path_leaf_bh(right_path), + "Inode %lu: error during insert of %u " + "(left path cpos %u) results in two identical " + "paths ending at %llu\n", + inode->i_ino, insert_cpos, cpos, + (unsigned long long) + path_leaf_bh(left_path)->b_blocknr); + + if (split == SPLIT_NONE && + ocfs2_rotate_requires_path_adjustment(left_path, + insert_cpos)) { + + /* + * We've rotated the tree as much as we + * should. The rest is up to + * ocfs2_insert_path() to complete, after the + * record insertion. We indicate this + * situation by returning the left path. + * + * The reason we don't adjust the records here + * before the record insert is that an error + * later might break the rule where a parent + * record e_cpos will reflect the actual + * e_cpos of the 1st nonempty record of the + * child list. + */ + *ret_left_path = left_path; + goto out_ret_path; + } + + start = ocfs2_find_subtree_root(inode, left_path, right_path); + + mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", + start, + (unsigned long long) right_path->p_node[start].bh->b_blocknr, + right_path->p_tree_depth); + + ret = ocfs2_extend_rotate_transaction(handle, start, + orig_credits, right_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_rotate_subtree_right(inode, handle, left_path, + right_path, start); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (split != SPLIT_NONE && + ocfs2_leftmost_rec_contains(path_leaf_el(right_path), + insert_cpos)) { + /* + * A rotate moves the rightmost left leaf + * record over to the leftmost right leaf + * slot. If we're doing an extent split + * instead of a real insert, then we have to + * check that the extent to be split wasn't + * just moved over. If it was, then we can + * exit here, passing left_path back - + * ocfs2_split_extent() is smart enough to + * search both leaves. + */ + *ret_left_path = left_path; + goto out_ret_path; + } + + /* + * There is no need to re-read the next right path + * as we know that it'll be our current left + * path. Optimize by copying values instead. + */ + ocfs2_mv_path(right_path, left_path); + + ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, + &cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + } + +out: + ocfs2_free_path(left_path); + +out_ret_path: + return ret; +} + +static void ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle, + struct ocfs2_path *path) +{ + int i, idx; + struct ocfs2_extent_rec *rec; + struct ocfs2_extent_list *el; + struct ocfs2_extent_block *eb; + u32 range; + + /* Path should always be rightmost. */ + eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; + BUG_ON(eb->h_next_leaf_blk != 0ULL); + + el = &eb->h_list; + BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0); + idx = le16_to_cpu(el->l_next_free_rec) - 1; + rec = &el->l_recs[idx]; + range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); + + for (i = 0; i < path->p_tree_depth; i++) { + el = path->p_node[i].el; + idx = le16_to_cpu(el->l_next_free_rec) - 1; + rec = &el->l_recs[idx]; + + rec->e_int_clusters = cpu_to_le32(range); + le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos)); + + ocfs2_journal_dirty(handle, path->p_node[i].bh); + } +} + +static void ocfs2_unlink_path(struct inode *inode, handle_t *handle, + struct ocfs2_cached_dealloc_ctxt *dealloc, + struct ocfs2_path *path, int unlink_start) +{ + int ret, i; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *el; + struct buffer_head *bh; + + for(i = unlink_start; i < path_num_items(path); i++) { + bh = path->p_node[i].bh; + + eb = (struct ocfs2_extent_block *)bh->b_data; + /* + * Not all nodes might have had their final count + * decremented by the caller - handle this here. + */ + el = &eb->h_list; + if (le16_to_cpu(el->l_next_free_rec) > 1) { + mlog(ML_ERROR, + "Inode %llu, attempted to remove extent block " + "%llu with %u records\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)le64_to_cpu(eb->h_blkno), + le16_to_cpu(el->l_next_free_rec)); + + ocfs2_journal_dirty(handle, bh); + ocfs2_remove_from_cache(inode, bh); + continue; + } + + el->l_next_free_rec = 0; + memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); + + ocfs2_journal_dirty(handle, bh); + + ret = ocfs2_cache_extent_block_free(dealloc, eb); + if (ret) + mlog_errno(ret); + + ocfs2_remove_from_cache(inode, bh); + } +} + +static void ocfs2_unlink_subtree(struct inode *inode, handle_t *handle, + struct ocfs2_path *left_path, + struct ocfs2_path *right_path, + int subtree_index, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int i; + struct buffer_head *root_bh = left_path->p_node[subtree_index].bh; + struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el; + struct ocfs2_extent_list *el; + struct ocfs2_extent_block *eb; + + el = path_leaf_el(left_path); + + eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data; + + for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++) + if (root_el->l_recs[i].e_blkno == eb->h_blkno) + break; + + BUG_ON(i >= le16_to_cpu(root_el->l_next_free_rec)); + + memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec)); + le16_add_cpu(&root_el->l_next_free_rec, -1); + + eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; + eb->h_next_leaf_blk = 0; + + ocfs2_journal_dirty(handle, root_bh); + ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); + + ocfs2_unlink_path(inode, handle, dealloc, right_path, + subtree_index + 1); +} + +static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, + struct ocfs2_path *left_path, + struct ocfs2_path *right_path, + int subtree_index, + struct ocfs2_cached_dealloc_ctxt *dealloc, + int *deleted, + struct ocfs2_extent_tree *et) +{ + int ret, i, del_right_subtree = 0, right_has_empty = 0; + struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path); + struct ocfs2_extent_list *right_leaf_el, *left_leaf_el; + struct ocfs2_extent_block *eb; + + *deleted = 0; + + right_leaf_el = path_leaf_el(right_path); + left_leaf_el = path_leaf_el(left_path); + root_bh = left_path->p_node[subtree_index].bh; + BUG_ON(root_bh != right_path->p_node[subtree_index].bh); + + if (!ocfs2_is_empty_extent(&left_leaf_el->l_recs[0])) + return 0; + + eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data; + if (ocfs2_is_empty_extent(&right_leaf_el->l_recs[0])) { + /* + * It's legal for us to proceed if the right leaf is + * the rightmost one and it has an empty extent. There + * are two cases to handle - whether the leaf will be + * empty after removal or not. If the leaf isn't empty + * then just remove the empty extent up front. The + * next block will handle empty leaves by flagging + * them for unlink. + * + * Non rightmost leaves will throw -EAGAIN and the + * caller can manually move the subtree and retry. + */ + + if (eb->h_next_leaf_blk != 0ULL) + return -EAGAIN; + + if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) { + ret = ocfs2_journal_access(handle, inode, + path_leaf_bh(right_path), + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_remove_empty_extent(right_leaf_el); + } else + right_has_empty = 1; + } + + if (eb->h_next_leaf_blk == 0ULL && + le16_to_cpu(right_leaf_el->l_next_free_rec) == 1) { + /* + * We have to update i_last_eb_blk during the meta + * data delete. + */ + ret = ocfs2_journal_access(handle, inode, et_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + del_right_subtree = 1; + } + + /* + * Getting here with an empty extent in the right path implies + * that it's the rightmost path and will be deleted. + */ + BUG_ON(right_has_empty && !del_right_subtree); + + ret = ocfs2_journal_access(handle, inode, root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + for(i = subtree_index + 1; i < path_num_items(right_path); i++) { + ret = ocfs2_journal_access(handle, inode, + right_path->p_node[i].bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, + left_path->p_node[i].bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + if (!right_has_empty) { + /* + * Only do this if we're moving a real + * record. Otherwise, the action is delayed until + * after removal of the right path in which case we + * can do a simple shift to remove the empty extent. + */ + ocfs2_rotate_leaf(left_leaf_el, &right_leaf_el->l_recs[0]); + memset(&right_leaf_el->l_recs[0], 0, + sizeof(struct ocfs2_extent_rec)); + } + if (eb->h_next_leaf_blk == 0ULL) { + /* + * Move recs over to get rid of empty extent, decrease + * next_free. This is allowed to remove the last + * extent in our leaf (setting l_next_free_rec to + * zero) - the delete code below won't care. + */ + ocfs2_remove_empty_extent(right_leaf_el); + } + + ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); + if (ret) + mlog_errno(ret); + ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); + if (ret) + mlog_errno(ret); + + if (del_right_subtree) { + ocfs2_unlink_subtree(inode, handle, left_path, right_path, + subtree_index, dealloc); + ocfs2_update_edge_lengths(inode, handle, left_path); + + eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; + ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); + + /* + * Removal of the extent in the left leaf was skipped + * above so we could delete the right path + * 1st. + */ + if (right_has_empty) + ocfs2_remove_empty_extent(left_leaf_el); + + ret = ocfs2_journal_dirty(handle, et_root_bh); + if (ret) + mlog_errno(ret); + + *deleted = 1; + } else + ocfs2_complete_edge_insert(inode, handle, left_path, right_path, + subtree_index); + +out: + return ret; +} + +/* + * Given a full path, determine what cpos value would return us a path + * containing the leaf immediately to the right of the current one. + * + * Will return zero if the path passed in is already the rightmost path. + * + * This looks similar, but is subtly different to + * ocfs2_find_cpos_for_left_leaf(). + */ +static int ocfs2_find_cpos_for_right_leaf(struct super_block *sb, + struct ocfs2_path *path, u32 *cpos) +{ + int i, j, ret = 0; + u64 blkno; + struct ocfs2_extent_list *el; + + *cpos = 0; + + if (path->p_tree_depth == 0) + return 0; + + blkno = path_leaf_bh(path)->b_blocknr; + + /* Start at the tree node just above the leaf and work our way up. */ + i = path->p_tree_depth - 1; + while (i >= 0) { + int next_free; + + el = path->p_node[i].el; + + /* + * Find the extent record just after the one in our + * path. + */ + next_free = le16_to_cpu(el->l_next_free_rec); + for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) { + if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) { + if (j == (next_free - 1)) { + if (i == 0) { + /* + * We've determined that the + * path specified is already + * the rightmost one - return a + * cpos of zero. + */ + goto out; + } + /* + * The rightmost record points to our + * leaf - we need to travel up the + * tree one level. + */ + goto next_node; + } + + *cpos = le32_to_cpu(el->l_recs[j + 1].e_cpos); + goto out; + } + } + + /* + * If we got here, we never found a valid node where + * the tree indicated one should be. + */ + ocfs2_error(sb, + "Invalid extent tree at extent block %llu\n", + (unsigned long long)blkno); + ret = -EROFS; + goto out; + +next_node: + blkno = path->p_node[i].bh->b_blocknr; + i--; + } + +out: + return ret; +} + +static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode, + handle_t *handle, + struct buffer_head *bh, + struct ocfs2_extent_list *el) +{ + int ret; + + if (!ocfs2_is_empty_extent(&el->l_recs[0])) + return 0; + + ret = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_remove_empty_extent(el); + + ret = ocfs2_journal_dirty(handle, bh); + if (ret) + mlog_errno(ret); + +out: + return ret; +} + +static int __ocfs2_rotate_tree_left(struct inode *inode, + handle_t *handle, int orig_credits, + struct ocfs2_path *path, + struct ocfs2_cached_dealloc_ctxt *dealloc, + struct ocfs2_path **empty_extent_path, + struct ocfs2_extent_tree *et) +{ + int ret, subtree_root, deleted; + u32 right_cpos; + struct ocfs2_path *left_path = NULL; + struct ocfs2_path *right_path = NULL; + + BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0]))); + + *empty_extent_path = NULL; + + ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, path, + &right_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + left_path = ocfs2_new_path(path_root_bh(path), + path_root_el(path)); + if (!left_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ocfs2_cp_path(left_path, path); + + right_path = ocfs2_new_path(path_root_bh(path), + path_root_el(path)); + if (!right_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + while (right_cpos) { + ret = ocfs2_find_path(inode, right_path, right_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + subtree_root = ocfs2_find_subtree_root(inode, left_path, + right_path); + + mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", + subtree_root, + (unsigned long long) + right_path->p_node[subtree_root].bh->b_blocknr, + right_path->p_tree_depth); + + ret = ocfs2_extend_rotate_transaction(handle, subtree_root, + orig_credits, left_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * Caller might still want to make changes to the + * tree root, so re-add it to the journal here. + */ + ret = ocfs2_journal_access(handle, inode, + path_root_bh(left_path), + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_rotate_subtree_left(inode, handle, left_path, + right_path, subtree_root, + dealloc, &deleted, et); + if (ret == -EAGAIN) { + /* + * The rotation has to temporarily stop due to + * the right subtree having an empty + * extent. Pass it back to the caller for a + * fixup. + */ + *empty_extent_path = right_path; + right_path = NULL; + goto out; + } + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * The subtree rotate might have removed records on + * the rightmost edge. If so, then rotation is + * complete. + */ + if (deleted) + break; + + ocfs2_mv_path(left_path, right_path); + + ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path, + &right_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + } + +out: + ocfs2_free_path(right_path); + ocfs2_free_path(left_path); + + return ret; +} + +static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, + struct ocfs2_path *path, + struct ocfs2_cached_dealloc_ctxt *dealloc, + struct ocfs2_extent_tree *et) +{ + int ret, subtree_index; + u32 cpos; + struct ocfs2_path *left_path = NULL; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *el; + + + ret = ocfs2_et_sanity_check(inode, et); + if (ret) + goto out; + /* + * There's two ways we handle this depending on + * whether path is the only existing one. + */ + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_path(inode, handle, path); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (cpos) { + /* + * We have a path to the left of this one - it needs + * an update too. + */ + left_path = ocfs2_new_path(path_root_bh(path), + path_root_el(path)); + if (!left_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_path(inode, left_path, cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_path(inode, handle, left_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + subtree_index = ocfs2_find_subtree_root(inode, left_path, path); + + ocfs2_unlink_subtree(inode, handle, left_path, path, + subtree_index, dealloc); + ocfs2_update_edge_lengths(inode, handle, left_path); + + eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; + ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); + } else { + /* + * 'path' is also the leftmost path which + * means it must be the only one. This gets + * handled differently because we want to + * revert the inode back to having extents + * in-line. + */ + ocfs2_unlink_path(inode, handle, dealloc, path, 1); + + el = et->et_root_el; + el->l_tree_depth = 0; + el->l_next_free_rec = 0; + memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); + + ocfs2_et_set_last_eb_blk(et, 0); + } + + ocfs2_journal_dirty(handle, path_root_bh(path)); + +out: + ocfs2_free_path(left_path); + return ret; +} + +/* + * Left rotation of btree records. + * + * In many ways, this is (unsurprisingly) the opposite of right + * rotation. We start at some non-rightmost path containing an empty + * extent in the leaf block. The code works its way to the rightmost + * path by rotating records to the left in every subtree. + * + * This is used by any code which reduces the number of extent records + * in a leaf. After removal, an empty record should be placed in the + * leftmost list position. + * + * This won't handle a length update of the rightmost path records if + * the rightmost tree leaf record is removed so the caller is + * responsible for detecting and correcting that. + */ +static int ocfs2_rotate_tree_left(struct inode *inode, handle_t *handle, + struct ocfs2_path *path, + struct ocfs2_cached_dealloc_ctxt *dealloc, + struct ocfs2_extent_tree *et) +{ + int ret, orig_credits = handle->h_buffer_credits; + struct ocfs2_path *tmp_path = NULL, *restart_path = NULL; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *el; + + el = path_leaf_el(path); + if (!ocfs2_is_empty_extent(&el->l_recs[0])) + return 0; + + if (path->p_tree_depth == 0) { +rightmost_no_delete: + /* + * Inline extents. This is trivially handled, so do + * it up front. + */ + ret = ocfs2_rotate_rightmost_leaf_left(inode, handle, + path_leaf_bh(path), + path_leaf_el(path)); + if (ret) + mlog_errno(ret); + goto out; + } + + /* + * Handle rightmost branch now. There's several cases: + * 1) simple rotation leaving records in there. That's trivial. + * 2) rotation requiring a branch delete - there's no more + * records left. Two cases of this: + * a) There are branches to the left. + * b) This is also the leftmost (the only) branch. + * + * 1) is handled via ocfs2_rotate_rightmost_leaf_left() + * 2a) we need the left branch so that we can update it with the unlink + * 2b) we need to bring the inode back to inline extents. + */ + + eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; + el = &eb->h_list; + if (eb->h_next_leaf_blk == 0) { + /* + * This gets a bit tricky if we're going to delete the + * rightmost path. Get the other cases out of the way + * 1st. + */ + if (le16_to_cpu(el->l_next_free_rec) > 1) + goto rightmost_no_delete; + + if (le16_to_cpu(el->l_next_free_rec) == 0) { + ret = -EIO; + ocfs2_error(inode->i_sb, + "Inode %llu has empty extent block at %llu", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)le64_to_cpu(eb->h_blkno)); + goto out; + } + + /* + * XXX: The caller can not trust "path" any more after + * this as it will have been deleted. What do we do? + * + * In theory the rotate-for-merge code will never get + * here because it'll always ask for a rotate in a + * nonempty list. + */ + + ret = ocfs2_remove_rightmost_path(inode, handle, path, + dealloc, et); + if (ret) + mlog_errno(ret); + goto out; + } + + /* + * Now we can loop, remembering the path we get from -EAGAIN + * and restarting from there. + */ +try_rotate: + ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, path, + dealloc, &restart_path, et); + if (ret && ret != -EAGAIN) { + mlog_errno(ret); + goto out; + } + + while (ret == -EAGAIN) { + tmp_path = restart_path; + restart_path = NULL; + + ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, + tmp_path, dealloc, + &restart_path, et); + if (ret && ret != -EAGAIN) { + mlog_errno(ret); + goto out; + } + + ocfs2_free_path(tmp_path); + tmp_path = NULL; + + if (ret == 0) + goto try_rotate; + } + +out: + ocfs2_free_path(tmp_path); + ocfs2_free_path(restart_path); + return ret; +} + +static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el, + int index) +{ + struct ocfs2_extent_rec *rec = &el->l_recs[index]; + unsigned int size; + + if (rec->e_leaf_clusters == 0) { + /* + * We consumed all of the merged-from record. An empty + * extent cannot exist anywhere but the 1st array + * position, so move things over if the merged-from + * record doesn't occupy that position. + * + * This creates a new empty extent so the caller + * should be smart enough to have removed any existing + * ones. + */ + if (index > 0) { + BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); + size = index * sizeof(struct ocfs2_extent_rec); + memmove(&el->l_recs[1], &el->l_recs[0], size); + } + + /* + * Always memset - the caller doesn't check whether it + * created an empty extent, so there could be junk in + * the other fields. + */ + memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec)); + } +} + +static int ocfs2_get_right_path(struct inode *inode, + struct ocfs2_path *left_path, + struct ocfs2_path **ret_right_path) +{ + int ret; + u32 right_cpos; + struct ocfs2_path *right_path = NULL; + struct ocfs2_extent_list *left_el; + + *ret_right_path = NULL; + + /* This function shouldn't be called for non-trees. */ + BUG_ON(left_path->p_tree_depth == 0); + + left_el = path_leaf_el(left_path); + BUG_ON(left_el->l_next_free_rec != left_el->l_count); + + ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path, + &right_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* This function shouldn't be called for the rightmost leaf. */ + BUG_ON(right_cpos == 0); + + right_path = ocfs2_new_path(path_root_bh(left_path), + path_root_el(left_path)); + if (!right_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_path(inode, right_path, right_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + *ret_right_path = right_path; +out: + if (ret) + ocfs2_free_path(right_path); + return ret; +} + +/* + * Remove split_rec clusters from the record at index and merge them + * onto the beginning of the record "next" to it. + * For index < l_count - 1, the next means the extent rec at index + 1. + * For index == l_count - 1, the "next" means the 1st extent rec of the + * next extent block. + */ +static int ocfs2_merge_rec_right(struct inode *inode, + struct ocfs2_path *left_path, + handle_t *handle, + struct ocfs2_extent_rec *split_rec, + int index) +{ + int ret, next_free, i; + unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); + struct ocfs2_extent_rec *left_rec; + struct ocfs2_extent_rec *right_rec; + struct ocfs2_extent_list *right_el; + struct ocfs2_path *right_path = NULL; + int subtree_index = 0; + struct ocfs2_extent_list *el = path_leaf_el(left_path); + struct buffer_head *bh = path_leaf_bh(left_path); + struct buffer_head *root_bh = NULL; + + BUG_ON(index >= le16_to_cpu(el->l_next_free_rec)); + left_rec = &el->l_recs[index]; + + if (index == le16_to_cpu(el->l_next_free_rec) - 1 && + le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) { + /* we meet with a cross extent block merge. */ + ret = ocfs2_get_right_path(inode, left_path, &right_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + right_el = path_leaf_el(right_path); + next_free = le16_to_cpu(right_el->l_next_free_rec); + BUG_ON(next_free <= 0); + right_rec = &right_el->l_recs[0]; + if (ocfs2_is_empty_extent(right_rec)) { + BUG_ON(next_free <= 1); + right_rec = &right_el->l_recs[1]; + } + + BUG_ON(le32_to_cpu(left_rec->e_cpos) + + le16_to_cpu(left_rec->e_leaf_clusters) != + le32_to_cpu(right_rec->e_cpos)); + + subtree_index = ocfs2_find_subtree_root(inode, + left_path, right_path); + + ret = ocfs2_extend_rotate_transaction(handle, subtree_index, + handle->h_buffer_credits, + right_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + root_bh = left_path->p_node[subtree_index].bh; + BUG_ON(root_bh != right_path->p_node[subtree_index].bh); + + ret = ocfs2_journal_access(handle, inode, root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + for (i = subtree_index + 1; + i < path_num_items(right_path); i++) { + ret = ocfs2_journal_access(handle, inode, + right_path->p_node[i].bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, + left_path->p_node[i].bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + } else { + BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1); + right_rec = &el->l_recs[index + 1]; + } + + ret = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + le16_add_cpu(&left_rec->e_leaf_clusters, -split_clusters); + + le32_add_cpu(&right_rec->e_cpos, -split_clusters); + le64_add_cpu(&right_rec->e_blkno, + -ocfs2_clusters_to_blocks(inode->i_sb, split_clusters)); + le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters); + + ocfs2_cleanup_merge(el, index); + + ret = ocfs2_journal_dirty(handle, bh); + if (ret) + mlog_errno(ret); + + if (right_path) { + ret = ocfs2_journal_dirty(handle, path_leaf_bh(right_path)); + if (ret) + mlog_errno(ret); + + ocfs2_complete_edge_insert(inode, handle, left_path, + right_path, subtree_index); + } +out: + if (right_path) + ocfs2_free_path(right_path); + return ret; +} + +static int ocfs2_get_left_path(struct inode *inode, + struct ocfs2_path *right_path, + struct ocfs2_path **ret_left_path) +{ + int ret; + u32 left_cpos; + struct ocfs2_path *left_path = NULL; + + *ret_left_path = NULL; + + /* This function shouldn't be called for non-trees. */ + BUG_ON(right_path->p_tree_depth == 0); + + ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, + right_path, &left_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* This function shouldn't be called for the leftmost leaf. */ + BUG_ON(left_cpos == 0); + + left_path = ocfs2_new_path(path_root_bh(right_path), + path_root_el(right_path)); + if (!left_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_path(inode, left_path, left_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + *ret_left_path = left_path; +out: + if (ret) + ocfs2_free_path(left_path); + return ret; +} + +/* + * Remove split_rec clusters from the record at index and merge them + * onto the tail of the record "before" it. + * For index > 0, the "before" means the extent rec at index - 1. + * + * For index == 0, the "before" means the last record of the previous + * extent block. And there is also a situation that we may need to + * remove the rightmost leaf extent block in the right_path and change + * the right path to indicate the new rightmost path. + */ +static int ocfs2_merge_rec_left(struct inode *inode, + struct ocfs2_path *right_path, + handle_t *handle, + struct ocfs2_extent_rec *split_rec, + struct ocfs2_cached_dealloc_ctxt *dealloc, + struct ocfs2_extent_tree *et, + int index) +{ + int ret, i, subtree_index = 0, has_empty_extent = 0; + unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters); + struct ocfs2_extent_rec *left_rec; + struct ocfs2_extent_rec *right_rec; + struct ocfs2_extent_list *el = path_leaf_el(right_path); + struct buffer_head *bh = path_leaf_bh(right_path); + struct buffer_head *root_bh = NULL; + struct ocfs2_path *left_path = NULL; + struct ocfs2_extent_list *left_el; + + BUG_ON(index < 0); + + right_rec = &el->l_recs[index]; + if (index == 0) { + /* we meet with a cross extent block merge. */ + ret = ocfs2_get_left_path(inode, right_path, &left_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + left_el = path_leaf_el(left_path); + BUG_ON(le16_to_cpu(left_el->l_next_free_rec) != + le16_to_cpu(left_el->l_count)); + + left_rec = &left_el->l_recs[ + le16_to_cpu(left_el->l_next_free_rec) - 1]; + BUG_ON(le32_to_cpu(left_rec->e_cpos) + + le16_to_cpu(left_rec->e_leaf_clusters) != + le32_to_cpu(split_rec->e_cpos)); + + subtree_index = ocfs2_find_subtree_root(inode, + left_path, right_path); + + ret = ocfs2_extend_rotate_transaction(handle, subtree_index, + handle->h_buffer_credits, + left_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + root_bh = left_path->p_node[subtree_index].bh; + BUG_ON(root_bh != right_path->p_node[subtree_index].bh); + + ret = ocfs2_journal_access(handle, inode, root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + for (i = subtree_index + 1; + i < path_num_items(right_path); i++) { + ret = ocfs2_journal_access(handle, inode, + right_path->p_node[i].bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, + left_path->p_node[i].bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + } + } else { + left_rec = &el->l_recs[index - 1]; + if (ocfs2_is_empty_extent(&el->l_recs[0])) + has_empty_extent = 1; + } + + ret = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (has_empty_extent && index == 1) { + /* + * The easy case - we can just plop the record right in. + */ + *left_rec = *split_rec; + + has_empty_extent = 0; + } else + le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters); + + le32_add_cpu(&right_rec->e_cpos, split_clusters); + le64_add_cpu(&right_rec->e_blkno, + ocfs2_clusters_to_blocks(inode->i_sb, split_clusters)); + le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters); + + ocfs2_cleanup_merge(el, index); + + ret = ocfs2_journal_dirty(handle, bh); + if (ret) + mlog_errno(ret); + + if (left_path) { + ret = ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); + if (ret) + mlog_errno(ret); + + /* + * In the situation that the right_rec is empty and the extent + * block is empty also, ocfs2_complete_edge_insert can't handle + * it and we need to delete the right extent block. + */ + if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 && + le16_to_cpu(el->l_next_free_rec) == 1) { + + ret = ocfs2_remove_rightmost_path(inode, handle, + right_path, + dealloc, et); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* Now the rightmost extent block has been deleted. + * So we use the new rightmost path. + */ + ocfs2_mv_path(right_path, left_path); + left_path = NULL; + } else + ocfs2_complete_edge_insert(inode, handle, left_path, + right_path, subtree_index); + } +out: + if (left_path) + ocfs2_free_path(left_path); + return ret; +} + +static int ocfs2_try_to_merge_extent(struct inode *inode, + handle_t *handle, + struct ocfs2_path *path, + int split_index, + struct ocfs2_extent_rec *split_rec, + struct ocfs2_cached_dealloc_ctxt *dealloc, + struct ocfs2_merge_ctxt *ctxt, + struct ocfs2_extent_tree *et) + +{ + int ret = 0; + struct ocfs2_extent_list *el = path_leaf_el(path); + struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; + + BUG_ON(ctxt->c_contig_type == CONTIG_NONE); + + if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { + /* + * The merge code will need to create an empty + * extent to take the place of the newly + * emptied slot. Remove any pre-existing empty + * extents - having more than one in a leaf is + * illegal. + */ + ret = ocfs2_rotate_tree_left(inode, handle, path, + dealloc, et); + if (ret) { + mlog_errno(ret); + goto out; + } + split_index--; + rec = &el->l_recs[split_index]; + } + + if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) { + /* + * Left-right contig implies this. + */ + BUG_ON(!ctxt->c_split_covers_rec); + + /* + * Since the leftright insert always covers the entire + * extent, this call will delete the insert record + * entirely, resulting in an empty extent record added to + * the extent block. + * + * Since the adding of an empty extent shifts + * everything back to the right, there's no need to + * update split_index here. + * + * When the split_index is zero, we need to merge it to the + * prevoius extent block. It is more efficient and easier + * if we do merge_right first and merge_left later. + */ + ret = ocfs2_merge_rec_right(inode, path, + handle, split_rec, + split_index); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * We can only get this from logic error above. + */ + BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0])); + + /* The merge left us with an empty extent, remove it. */ + ret = ocfs2_rotate_tree_left(inode, handle, path, + dealloc, et); + if (ret) { + mlog_errno(ret); + goto out; + } + + rec = &el->l_recs[split_index]; + + /* + * Note that we don't pass split_rec here on purpose - + * we've merged it into the rec already. + */ + ret = ocfs2_merge_rec_left(inode, path, + handle, rec, + dealloc, et, + split_index); + + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_rotate_tree_left(inode, handle, path, + dealloc, et); + /* + * Error from this last rotate is not critical, so + * print but don't bubble it up. + */ + if (ret) + mlog_errno(ret); + ret = 0; + } else { + /* + * Merge a record to the left or right. + * + * 'contig_type' is relative to the existing record, + * so for example, if we're "right contig", it's to + * the record on the left (hence the left merge). + */ + if (ctxt->c_contig_type == CONTIG_RIGHT) { + ret = ocfs2_merge_rec_left(inode, + path, + handle, split_rec, + dealloc, et, + split_index); + if (ret) { + mlog_errno(ret); + goto out; + } + } else { + ret = ocfs2_merge_rec_right(inode, + path, + handle, split_rec, + split_index); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + if (ctxt->c_split_covers_rec) { + /* + * The merge may have left an empty extent in + * our leaf. Try to rotate it away. + */ + ret = ocfs2_rotate_tree_left(inode, handle, path, + dealloc, et); + if (ret) + mlog_errno(ret); + ret = 0; + } + } + +out: + return ret; +} + +static void ocfs2_subtract_from_rec(struct super_block *sb, + enum ocfs2_split_type split, + struct ocfs2_extent_rec *rec, + struct ocfs2_extent_rec *split_rec) +{ + u64 len_blocks; + + len_blocks = ocfs2_clusters_to_blocks(sb, + le16_to_cpu(split_rec->e_leaf_clusters)); + + if (split == SPLIT_LEFT) { + /* + * Region is on the left edge of the existing + * record. + */ + le32_add_cpu(&rec->e_cpos, + le16_to_cpu(split_rec->e_leaf_clusters)); + le64_add_cpu(&rec->e_blkno, len_blocks); + le16_add_cpu(&rec->e_leaf_clusters, + -le16_to_cpu(split_rec->e_leaf_clusters)); + } else { + /* + * Region is on the right edge of the existing + * record. + */ + le16_add_cpu(&rec->e_leaf_clusters, + -le16_to_cpu(split_rec->e_leaf_clusters)); + } +} + +/* + * Do the final bits of extent record insertion at the target leaf + * list. If this leaf is part of an allocation tree, it is assumed + * that the tree above has been prepared. + */ +static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec, + struct ocfs2_extent_list *el, + struct ocfs2_insert_type *insert, + struct inode *inode) +{ + int i = insert->ins_contig_index; + unsigned int range; + struct ocfs2_extent_rec *rec; + + BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); + + if (insert->ins_split != SPLIT_NONE) { + i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos)); + BUG_ON(i == -1); + rec = &el->l_recs[i]; + ocfs2_subtract_from_rec(inode->i_sb, insert->ins_split, rec, + insert_rec); + goto rotate; + } + + /* + * Contiguous insert - either left or right. + */ + if (insert->ins_contig != CONTIG_NONE) { + rec = &el->l_recs[i]; + if (insert->ins_contig == CONTIG_LEFT) { + rec->e_blkno = insert_rec->e_blkno; + rec->e_cpos = insert_rec->e_cpos; + } + le16_add_cpu(&rec->e_leaf_clusters, + le16_to_cpu(insert_rec->e_leaf_clusters)); + return; + } + + /* + * Handle insert into an empty leaf. + */ + if (le16_to_cpu(el->l_next_free_rec) == 0 || + ((le16_to_cpu(el->l_next_free_rec) == 1) && + ocfs2_is_empty_extent(&el->l_recs[0]))) { + el->l_recs[0] = *insert_rec; + el->l_next_free_rec = cpu_to_le16(1); + return; + } + + /* + * Appending insert. + */ + if (insert->ins_appending == APPEND_TAIL) { + i = le16_to_cpu(el->l_next_free_rec) - 1; + rec = &el->l_recs[i]; + range = le32_to_cpu(rec->e_cpos) + + le16_to_cpu(rec->e_leaf_clusters); + BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range); + + mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >= + le16_to_cpu(el->l_count), + "inode %lu, depth %u, count %u, next free %u, " + "rec.cpos %u, rec.clusters %u, " + "insert.cpos %u, insert.clusters %u\n", + inode->i_ino, + le16_to_cpu(el->l_tree_depth), + le16_to_cpu(el->l_count), + le16_to_cpu(el->l_next_free_rec), + le32_to_cpu(el->l_recs[i].e_cpos), + le16_to_cpu(el->l_recs[i].e_leaf_clusters), + le32_to_cpu(insert_rec->e_cpos), + le16_to_cpu(insert_rec->e_leaf_clusters)); + i++; + el->l_recs[i] = *insert_rec; + le16_add_cpu(&el->l_next_free_rec, 1); + return; + } + +rotate: + /* + * Ok, we have to rotate. + * + * At this point, it is safe to assume that inserting into an + * empty leaf and appending to a leaf have both been handled + * above. + * + * This leaf needs to have space, either by the empty 1st + * extent record, or by virtue of an l_next_rec < l_count. + */ + ocfs2_rotate_leaf(el, insert_rec); +} + +static void ocfs2_adjust_rightmost_records(struct inode *inode, + handle_t *handle, + struct ocfs2_path *path, + struct ocfs2_extent_rec *insert_rec) +{ + int ret, i, next_free; + struct buffer_head *bh; + struct ocfs2_extent_list *el; + struct ocfs2_extent_rec *rec; + + /* + * Update everything except the leaf block. + */ + for (i = 0; i < path->p_tree_depth; i++) { + bh = path->p_node[i].bh; + el = path->p_node[i].el; + + next_free = le16_to_cpu(el->l_next_free_rec); + if (next_free == 0) { + ocfs2_error(inode->i_sb, + "Dinode %llu has a bad extent list", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + ret = -EIO; + return; + } + + rec = &el->l_recs[next_free - 1]; + + rec->e_int_clusters = insert_rec->e_cpos; + le32_add_cpu(&rec->e_int_clusters, + le16_to_cpu(insert_rec->e_leaf_clusters)); + le32_add_cpu(&rec->e_int_clusters, + -le32_to_cpu(rec->e_cpos)); + + ret = ocfs2_journal_dirty(handle, bh); + if (ret) + mlog_errno(ret); + + } +} + +static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle, + struct ocfs2_extent_rec *insert_rec, + struct ocfs2_path *right_path, + struct ocfs2_path **ret_left_path) +{ + int ret, next_free; + struct ocfs2_extent_list *el; + struct ocfs2_path *left_path = NULL; + + *ret_left_path = NULL; + + /* + * This shouldn't happen for non-trees. The extent rec cluster + * count manipulation below only works for interior nodes. + */ + BUG_ON(right_path->p_tree_depth == 0); + + /* + * If our appending insert is at the leftmost edge of a leaf, + * then we might need to update the rightmost records of the + * neighboring path. + */ + el = path_leaf_el(right_path); + next_free = le16_to_cpu(el->l_next_free_rec); + if (next_free == 0 || + (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) { + u32 left_cpos; + + ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, + &left_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + mlog(0, "Append may need a left path update. cpos: %u, " + "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos), + left_cpos); + + /* + * No need to worry if the append is already in the + * leftmost leaf. + */ + if (left_cpos) { + left_path = ocfs2_new_path(path_root_bh(right_path), + path_root_el(right_path)); + if (!left_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_path(inode, left_path, left_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * ocfs2_insert_path() will pass the left_path to the + * journal for us. + */ + } + } + + ret = ocfs2_journal_access_path(inode, handle, right_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_adjust_rightmost_records(inode, handle, right_path, insert_rec); + + *ret_left_path = left_path; + ret = 0; +out: + if (ret != 0) + ocfs2_free_path(left_path); + + return ret; +} + +static void ocfs2_split_record(struct inode *inode, + struct ocfs2_path *left_path, + struct ocfs2_path *right_path, + struct ocfs2_extent_rec *split_rec, + enum ocfs2_split_type split) +{ + int index; + u32 cpos = le32_to_cpu(split_rec->e_cpos); + struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el; + struct ocfs2_extent_rec *rec, *tmprec; + + right_el = path_leaf_el(right_path);; + if (left_path) + left_el = path_leaf_el(left_path); + + el = right_el; + insert_el = right_el; + index = ocfs2_search_extent_list(el, cpos); + if (index != -1) { + if (index == 0 && left_path) { + BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0])); + + /* + * This typically means that the record + * started in the left path but moved to the + * right as a result of rotation. We either + * move the existing record to the left, or we + * do the later insert there. + * + * In this case, the left path should always + * exist as the rotate code will have passed + * it back for a post-insert update. + */ + + if (split == SPLIT_LEFT) { + /* + * It's a left split. Since we know + * that the rotate code gave us an + * empty extent in the left path, we + * can just do the insert there. + */ + insert_el = left_el; + } else { + /* + * Right split - we have to move the + * existing record over to the left + * leaf. The insert will be into the + * newly created empty extent in the + * right leaf. + */ + tmprec = &right_el->l_recs[index]; + ocfs2_rotate_leaf(left_el, tmprec); + el = left_el; + + memset(tmprec, 0, sizeof(*tmprec)); + index = ocfs2_search_extent_list(left_el, cpos); + BUG_ON(index == -1); + } + } + } else { + BUG_ON(!left_path); + BUG_ON(!ocfs2_is_empty_extent(&left_el->l_recs[0])); + /* + * Left path is easy - we can just allow the insert to + * happen. + */ + el = left_el; + insert_el = left_el; + index = ocfs2_search_extent_list(el, cpos); + BUG_ON(index == -1); + } + + rec = &el->l_recs[index]; + ocfs2_subtract_from_rec(inode->i_sb, split, rec, split_rec); + ocfs2_rotate_leaf(insert_el, split_rec); +} + +/* + * This function only does inserts on an allocation b-tree. For tree + * depth = 0, ocfs2_insert_at_leaf() is called directly. + * + * right_path is the path we want to do the actual insert + * in. left_path should only be passed in if we need to update that + * portion of the tree after an edge insert. + */ +static int ocfs2_insert_path(struct inode *inode, + handle_t *handle, + struct ocfs2_path *left_path, + struct ocfs2_path *right_path, + struct ocfs2_extent_rec *insert_rec, + struct ocfs2_insert_type *insert) +{ + int ret, subtree_index; + struct buffer_head *leaf_bh = path_leaf_bh(right_path); + + if (left_path) { + int credits = handle->h_buffer_credits; + + /* + * There's a chance that left_path got passed back to + * us without being accounted for in the + * journal. Extend our transaction here to be sure we + * can change those blocks. + */ + credits += left_path->p_tree_depth; + + ret = ocfs2_extend_trans(handle, credits); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_path(inode, handle, left_path); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + /* + * Pass both paths to the journal. The majority of inserts + * will be touching all components anyway. + */ + ret = ocfs2_journal_access_path(inode, handle, right_path); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + if (insert->ins_split != SPLIT_NONE) { + /* + * We could call ocfs2_insert_at_leaf() for some types + * of splits, but it's easier to just let one separate + * function sort it all out. + */ + ocfs2_split_record(inode, left_path, right_path, + insert_rec, insert->ins_split); + + /* + * Split might have modified either leaf and we don't + * have a guarantee that the later edge insert will + * dirty this for us. + */ + if (left_path) + ret = ocfs2_journal_dirty(handle, + path_leaf_bh(left_path)); + if (ret) + mlog_errno(ret); + } else + ocfs2_insert_at_leaf(insert_rec, path_leaf_el(right_path), + insert, inode); + + ret = ocfs2_journal_dirty(handle, leaf_bh); + if (ret) + mlog_errno(ret); + + if (left_path) { + /* + * The rotate code has indicated that we need to fix + * up portions of the tree after the insert. + * + * XXX: Should we extend the transaction here? + */ + subtree_index = ocfs2_find_subtree_root(inode, left_path, + right_path); + ocfs2_complete_edge_insert(inode, handle, left_path, + right_path, subtree_index); + } + + ret = 0; +out: + return ret; +} + +static int ocfs2_do_insert_extent(struct inode *inode, + handle_t *handle, + struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *insert_rec, + struct ocfs2_insert_type *type) +{ + int ret, rotate = 0; + u32 cpos; + struct ocfs2_path *right_path = NULL; + struct ocfs2_path *left_path = NULL; + struct ocfs2_extent_list *el; + + el = et->et_root_el; + + ret = ocfs2_journal_access(handle, inode, et->et_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (le16_to_cpu(el->l_tree_depth) == 0) { + ocfs2_insert_at_leaf(insert_rec, el, type, inode); + goto out_update_clusters; + } + + right_path = ocfs2_new_path(et->et_root_bh, et->et_root_el); + if (!right_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + /* + * Determine the path to start with. Rotations need the + * rightmost path, everything else can go directly to the + * target leaf. + */ + cpos = le32_to_cpu(insert_rec->e_cpos); + if (type->ins_appending == APPEND_NONE && + type->ins_contig == CONTIG_NONE) { + rotate = 1; + cpos = UINT_MAX; + } + + ret = ocfs2_find_path(inode, right_path, cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * Rotations and appends need special treatment - they modify + * parts of the tree's above them. + * + * Both might pass back a path immediate to the left of the + * one being inserted to. This will be cause + * ocfs2_insert_path() to modify the rightmost records of + * left_path to account for an edge insert. + * + * XXX: When modifying this code, keep in mind that an insert + * can wind up skipping both of these two special cases... + */ + if (rotate) { + ret = ocfs2_rotate_tree_right(inode, handle, type->ins_split, + le32_to_cpu(insert_rec->e_cpos), + right_path, &left_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * ocfs2_rotate_tree_right() might have extended the + * transaction without re-journaling our tree root. + */ + ret = ocfs2_journal_access(handle, inode, et->et_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + } else if (type->ins_appending == APPEND_TAIL + && type->ins_contig != CONTIG_LEFT) { + ret = ocfs2_append_rec_to_path(inode, handle, insert_rec, + right_path, &left_path); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + ret = ocfs2_insert_path(inode, handle, left_path, right_path, + insert_rec, type); + if (ret) { + mlog_errno(ret); + goto out; + } + +out_update_clusters: + if (type->ins_split == SPLIT_NONE) + ocfs2_et_update_clusters(inode, et, + le16_to_cpu(insert_rec->e_leaf_clusters)); + + ret = ocfs2_journal_dirty(handle, et->et_root_bh); + if (ret) + mlog_errno(ret); + +out: + ocfs2_free_path(left_path); + ocfs2_free_path(right_path); + + return ret; +} + +static enum ocfs2_contig_type +ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, + struct ocfs2_extent_list *el, int index, + struct ocfs2_extent_rec *split_rec) +{ + int status; + enum ocfs2_contig_type ret = CONTIG_NONE; + u32 left_cpos, right_cpos; + struct ocfs2_extent_rec *rec = NULL; + struct ocfs2_extent_list *new_el; + struct ocfs2_path *left_path = NULL, *right_path = NULL; + struct buffer_head *bh; + struct ocfs2_extent_block *eb; + + if (index > 0) { + rec = &el->l_recs[index - 1]; + } else if (path->p_tree_depth > 0) { + status = ocfs2_find_cpos_for_left_leaf(inode->i_sb, + path, &left_cpos); + if (status) + goto out; + + if (left_cpos != 0) { + left_path = ocfs2_new_path(path_root_bh(path), + path_root_el(path)); + if (!left_path) + goto out; + + status = ocfs2_find_path(inode, left_path, left_cpos); + if (status) + goto out; + + new_el = path_leaf_el(left_path); + + if (le16_to_cpu(new_el->l_next_free_rec) != + le16_to_cpu(new_el->l_count)) { + bh = path_leaf_bh(left_path); + eb = (struct ocfs2_extent_block *)bh->b_data; + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, + eb); + goto out; + } + rec = &new_el->l_recs[ + le16_to_cpu(new_el->l_next_free_rec) - 1]; + } + } + + /* + * We're careful to check for an empty extent record here - + * the merge code will know what to do if it sees one. + */ + if (rec) { + if (index == 1 && ocfs2_is_empty_extent(rec)) { + if (split_rec->e_cpos == el->l_recs[index].e_cpos) + ret = CONTIG_RIGHT; + } else { + ret = ocfs2_extent_contig(inode, rec, split_rec); + } + } + + rec = NULL; + if (index < (le16_to_cpu(el->l_next_free_rec) - 1)) + rec = &el->l_recs[index + 1]; + else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) && + path->p_tree_depth > 0) { + status = ocfs2_find_cpos_for_right_leaf(inode->i_sb, + path, &right_cpos); + if (status) + goto out; + + if (right_cpos == 0) + goto out; + + right_path = ocfs2_new_path(path_root_bh(path), + path_root_el(path)); + if (!right_path) + goto out; + + status = ocfs2_find_path(inode, right_path, right_cpos); + if (status) + goto out; + + new_el = path_leaf_el(right_path); + rec = &new_el->l_recs[0]; + if (ocfs2_is_empty_extent(rec)) { + if (le16_to_cpu(new_el->l_next_free_rec) <= 1) { + bh = path_leaf_bh(right_path); + eb = (struct ocfs2_extent_block *)bh->b_data; + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, + eb); + goto out; + } + rec = &new_el->l_recs[1]; + } + } + + if (rec) { + enum ocfs2_contig_type contig_type; + + contig_type = ocfs2_extent_contig(inode, rec, split_rec); + + if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT) + ret = CONTIG_LEFTRIGHT; + else if (ret == CONTIG_NONE) + ret = contig_type; + } + +out: + if (left_path) + ocfs2_free_path(left_path); + if (right_path) + ocfs2_free_path(right_path); + + return ret; +} + +static void ocfs2_figure_contig_type(struct inode *inode, + struct ocfs2_insert_type *insert, + struct ocfs2_extent_list *el, + struct ocfs2_extent_rec *insert_rec, + struct ocfs2_extent_tree *et) +{ + int i; + enum ocfs2_contig_type contig_type = CONTIG_NONE; + + BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); + + for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { + contig_type = ocfs2_extent_contig(inode, &el->l_recs[i], + insert_rec); + if (contig_type != CONTIG_NONE) { + insert->ins_contig_index = i; + break; + } + } + insert->ins_contig = contig_type; + + if (insert->ins_contig != CONTIG_NONE) { + struct ocfs2_extent_rec *rec = + &el->l_recs[insert->ins_contig_index]; + unsigned int len = le16_to_cpu(rec->e_leaf_clusters) + + le16_to_cpu(insert_rec->e_leaf_clusters); + + /* + * Caller might want us to limit the size of extents, don't + * calculate contiguousness if we might exceed that limit. + */ + if (et->et_max_leaf_clusters && + (len > et->et_max_leaf_clusters)) + insert->ins_contig = CONTIG_NONE; + } +} + +/* + * This should only be called against the righmost leaf extent list. + * + * ocfs2_figure_appending_type() will figure out whether we'll have to + * insert at the tail of the rightmost leaf. + * + * This should also work against the root extent list for tree's with 0 + * depth. If we consider the root extent list to be the rightmost leaf node + * then the logic here makes sense. + */ +static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert, + struct ocfs2_extent_list *el, + struct ocfs2_extent_rec *insert_rec) +{ + int i; + u32 cpos = le32_to_cpu(insert_rec->e_cpos); + struct ocfs2_extent_rec *rec; + + insert->ins_appending = APPEND_NONE; + + BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); + + if (!el->l_next_free_rec) + goto set_tail_append; + + if (ocfs2_is_empty_extent(&el->l_recs[0])) { + /* Were all records empty? */ + if (le16_to_cpu(el->l_next_free_rec) == 1) + goto set_tail_append; + } + + i = le16_to_cpu(el->l_next_free_rec) - 1; + rec = &el->l_recs[i]; + + if (cpos >= + (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters))) + goto set_tail_append; + + return; + +set_tail_append: + insert->ins_appending = APPEND_TAIL; +} + +/* + * Helper function called at the begining of an insert. + * + * This computes a few things that are commonly used in the process of + * inserting into the btree: + * - Whether the new extent is contiguous with an existing one. + * - The current tree depth. + * - Whether the insert is an appending one. + * - The total # of free records in the tree. + * + * All of the information is stored on the ocfs2_insert_type + * structure. + */ +static int ocfs2_figure_insert_type(struct inode *inode, + struct ocfs2_extent_tree *et, + struct buffer_head **last_eb_bh, + struct ocfs2_extent_rec *insert_rec, + int *free_records, + struct ocfs2_insert_type *insert) +{ + int ret; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *el; + struct ocfs2_path *path = NULL; + struct buffer_head *bh = NULL; + + insert->ins_split = SPLIT_NONE; + + el = et->et_root_el; + insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth); + + if (el->l_tree_depth) { + /* + * If we have tree depth, we read in the + * rightmost extent block ahead of time as + * ocfs2_figure_insert_type() and ocfs2_add_branch() + * may want it later. + */ + ret = ocfs2_read_block(inode, ocfs2_et_get_last_eb_blk(et), &bh); + if (ret) { + mlog_exit(ret); + goto out; + } + eb = (struct ocfs2_extent_block *) bh->b_data; + el = &eb->h_list; + } + + /* + * Unless we have a contiguous insert, we'll need to know if + * there is room left in our allocation tree for another + * extent record. + * + * XXX: This test is simplistic, we can search for empty + * extent records too. + */ + *free_records = le16_to_cpu(el->l_count) - + le16_to_cpu(el->l_next_free_rec); + + if (!insert->ins_tree_depth) { + ocfs2_figure_contig_type(inode, insert, el, insert_rec, et); + ocfs2_figure_appending_type(insert, el, insert_rec); + return 0; + } + + path = ocfs2_new_path(et->et_root_bh, et->et_root_el); + if (!path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + /* + * In the case that we're inserting past what the tree + * currently accounts for, ocfs2_find_path() will return for + * us the rightmost tree path. This is accounted for below in + * the appending code. + */ + ret = ocfs2_find_path(inode, path, le32_to_cpu(insert_rec->e_cpos)); + if (ret) { + mlog_errno(ret); + goto out; + } + + el = path_leaf_el(path); + + /* + * Now that we have the path, there's two things we want to determine: + * 1) Contiguousness (also set contig_index if this is so) + * + * 2) Are we doing an append? We can trivially break this up + * into two types of appends: simple record append, or a + * rotate inside the tail leaf. + */ + ocfs2_figure_contig_type(inode, insert, el, insert_rec, et); + + /* + * The insert code isn't quite ready to deal with all cases of + * left contiguousness. Specifically, if it's an insert into + * the 1st record in a leaf, it will require the adjustment of + * cluster count on the last record of the path directly to it's + * left. For now, just catch that case and fool the layers + * above us. This works just fine for tree_depth == 0, which + * is why we allow that above. + */ + if (insert->ins_contig == CONTIG_LEFT && + insert->ins_contig_index == 0) + insert->ins_contig = CONTIG_NONE; + + /* + * Ok, so we can simply compare against last_eb to figure out + * whether the path doesn't exist. This will only happen in + * the case that we're doing a tail append, so maybe we can + * take advantage of that information somehow. + */ + if (ocfs2_et_get_last_eb_blk(et) == + path_leaf_bh(path)->b_blocknr) { + /* + * Ok, ocfs2_find_path() returned us the rightmost + * tree path. This might be an appending insert. There are + * two cases: + * 1) We're doing a true append at the tail: + * -This might even be off the end of the leaf + * 2) We're "appending" by rotating in the tail + */ + ocfs2_figure_appending_type(insert, el, insert_rec); + } + +out: + ocfs2_free_path(path); + + if (ret == 0) + *last_eb_bh = bh; + else + brelse(bh); + return ret; +} + +/* + * Insert an extent into an inode btree. + * + * The caller needs to update fe->i_clusters + */ +int ocfs2_insert_extent(struct ocfs2_super *osb, + handle_t *handle, + struct inode *inode, + struct ocfs2_extent_tree *et, + u32 cpos, + u64 start_blk, + u32 new_clusters, + u8 flags, + struct ocfs2_alloc_context *meta_ac) +{ + int status; + int uninitialized_var(free_records); + struct buffer_head *last_eb_bh = NULL; + struct ocfs2_insert_type insert = {0, }; + struct ocfs2_extent_rec rec; + + mlog(0, "add %u clusters at position %u to inode %llu\n", + new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno); + + memset(&rec, 0, sizeof(rec)); + rec.e_cpos = cpu_to_le32(cpos); + rec.e_blkno = cpu_to_le64(start_blk); + rec.e_leaf_clusters = cpu_to_le16(new_clusters); + rec.e_flags = flags; + status = ocfs2_et_insert_check(inode, et, &rec); + if (status) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_figure_insert_type(inode, et, &last_eb_bh, &rec, + &free_records, &insert); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + mlog(0, "Insert.appending: %u, Insert.Contig: %u, " + "Insert.contig_index: %d, Insert.free_records: %d, " + "Insert.tree_depth: %d\n", + insert.ins_appending, insert.ins_contig, insert.ins_contig_index, + free_records, insert.ins_tree_depth); + + if (insert.ins_contig == CONTIG_NONE && free_records == 0) { + status = ocfs2_grow_tree(inode, handle, et, + &insert.ins_tree_depth, &last_eb_bh, + meta_ac); + if (status) { + mlog_errno(status); + goto bail; + } + } + + /* Finally, we can add clusters. This might rotate the tree for us. */ + status = ocfs2_do_insert_extent(inode, handle, et, &rec, &insert); + if (status < 0) + mlog_errno(status); + else if (et->et_ops == &ocfs2_dinode_et_ops) + ocfs2_extent_map_insert_rec(inode, &rec); + +bail: + brelse(last_eb_bh); + + mlog_exit(status); + return status; +} + +/* + * Allcate and add clusters into the extent b-tree. + * The new clusters(clusters_to_add) will be inserted at logical_offset. + * The extent b-tree's root is specified by et, and + * it is not limited to the file storage. Any extent tree can use this + * function if it implements the proper ocfs2_extent_tree. + */ +int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb, + struct inode *inode, + u32 *logical_offset, + u32 clusters_to_add, + int mark_unwritten, + struct ocfs2_extent_tree *et, + handle_t *handle, + struct ocfs2_alloc_context *data_ac, + struct ocfs2_alloc_context *meta_ac, + enum ocfs2_alloc_restarted *reason_ret) +{ + int status = 0; + int free_extents; + enum ocfs2_alloc_restarted reason = RESTART_NONE; + u32 bit_off, num_bits; + u64 block; + u8 flags = 0; + + BUG_ON(!clusters_to_add); + + if (mark_unwritten) + flags = OCFS2_EXT_UNWRITTEN; + + free_extents = ocfs2_num_free_extents(osb, inode, et); + if (free_extents < 0) { + status = free_extents; + mlog_errno(status); + goto leave; + } + + /* there are two cases which could cause us to EAGAIN in the + * we-need-more-metadata case: + * 1) we haven't reserved *any* + * 2) we are so fragmented, we've needed to add metadata too + * many times. */ + if (!free_extents && !meta_ac) { + mlog(0, "we haven't reserved any metadata!\n"); + status = -EAGAIN; + reason = RESTART_META; + goto leave; + } else if ((!free_extents) + && (ocfs2_alloc_context_bits_left(meta_ac) + < ocfs2_extend_meta_needed(et->et_root_el))) { + mlog(0, "filesystem is really fragmented...\n"); + status = -EAGAIN; + reason = RESTART_META; + goto leave; + } + + status = __ocfs2_claim_clusters(osb, handle, data_ac, 1, + clusters_to_add, &bit_off, &num_bits); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto leave; + } + + BUG_ON(num_bits > clusters_to_add); + + /* reserve our write early -- insert_extent may update the inode */ + status = ocfs2_journal_access(handle, inode, et->et_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + block = ocfs2_clusters_to_blocks(osb->sb, bit_off); + mlog(0, "Allocating %u clusters at block %u for inode %llu\n", + num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno); + status = ocfs2_insert_extent(osb, handle, inode, et, + *logical_offset, block, + num_bits, flags, meta_ac); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_journal_dirty(handle, et->et_root_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + clusters_to_add -= num_bits; + *logical_offset += num_bits; + + if (clusters_to_add) { + mlog(0, "need to alloc once more, wanted = %u\n", + clusters_to_add); + status = -EAGAIN; + reason = RESTART_TRANS; + } + +leave: + mlog_exit(status); + if (reason_ret) + *reason_ret = reason; + return status; +} + +static void ocfs2_make_right_split_rec(struct super_block *sb, + struct ocfs2_extent_rec *split_rec, + u32 cpos, + struct ocfs2_extent_rec *rec) +{ + u32 rec_cpos = le32_to_cpu(rec->e_cpos); + u32 rec_range = rec_cpos + le16_to_cpu(rec->e_leaf_clusters); + + memset(split_rec, 0, sizeof(struct ocfs2_extent_rec)); + + split_rec->e_cpos = cpu_to_le32(cpos); + split_rec->e_leaf_clusters = cpu_to_le16(rec_range - cpos); + + split_rec->e_blkno = rec->e_blkno; + le64_add_cpu(&split_rec->e_blkno, + ocfs2_clusters_to_blocks(sb, cpos - rec_cpos)); + + split_rec->e_flags = rec->e_flags; +} + +static int ocfs2_split_and_insert(struct inode *inode, + handle_t *handle, + struct ocfs2_path *path, + struct ocfs2_extent_tree *et, + struct buffer_head **last_eb_bh, + int split_index, + struct ocfs2_extent_rec *orig_split_rec, + struct ocfs2_alloc_context *meta_ac) +{ + int ret = 0, depth; + unsigned int insert_range, rec_range, do_leftright = 0; + struct ocfs2_extent_rec tmprec; + struct ocfs2_extent_list *rightmost_el; + struct ocfs2_extent_rec rec; + struct ocfs2_extent_rec split_rec = *orig_split_rec; + struct ocfs2_insert_type insert; + struct ocfs2_extent_block *eb; + +leftright: + /* + * Store a copy of the record on the stack - it might move + * around as the tree is manipulated below. + */ + rec = path_leaf_el(path)->l_recs[split_index]; + + rightmost_el = et->et_root_el; + + depth = le16_to_cpu(rightmost_el->l_tree_depth); + if (depth) { + BUG_ON(!(*last_eb_bh)); + eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; + rightmost_el = &eb->h_list; + } + + if (le16_to_cpu(rightmost_el->l_next_free_rec) == + le16_to_cpu(rightmost_el->l_count)) { + ret = ocfs2_grow_tree(inode, handle, et, + &depth, last_eb_bh, meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + memset(&insert, 0, sizeof(struct ocfs2_insert_type)); + insert.ins_appending = APPEND_NONE; + insert.ins_contig = CONTIG_NONE; + insert.ins_tree_depth = depth; + + insert_range = le32_to_cpu(split_rec.e_cpos) + + le16_to_cpu(split_rec.e_leaf_clusters); + rec_range = le32_to_cpu(rec.e_cpos) + + le16_to_cpu(rec.e_leaf_clusters); + + if (split_rec.e_cpos == rec.e_cpos) { + insert.ins_split = SPLIT_LEFT; + } else if (insert_range == rec_range) { + insert.ins_split = SPLIT_RIGHT; + } else { + /* + * Left/right split. We fake this as a right split + * first and then make a second pass as a left split. + */ + insert.ins_split = SPLIT_RIGHT; + + ocfs2_make_right_split_rec(inode->i_sb, &tmprec, insert_range, + &rec); + + split_rec = tmprec; + + BUG_ON(do_leftright); + do_leftright = 1; + } + + ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (do_leftright == 1) { + u32 cpos; + struct ocfs2_extent_list *el; + + do_leftright++; + split_rec = *orig_split_rec; + + ocfs2_reinit_path(path, 1); + + cpos = le32_to_cpu(split_rec.e_cpos); + ret = ocfs2_find_path(inode, path, cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + el = path_leaf_el(path); + split_index = ocfs2_search_extent_list(el, cpos); + goto leftright; + } +out: + + return ret; +} + +/* + * Mark part or all of the extent record at split_index in the leaf + * pointed to by path as written. This removes the unwritten + * extent flag. + * + * Care is taken to handle contiguousness so as to not grow the tree. + * + * meta_ac is not strictly necessary - we only truly need it if growth + * of the tree is required. All other cases will degrade into a less + * optimal tree layout. + * + * last_eb_bh should be the rightmost leaf block for any extent + * btree. Since a split may grow the tree or a merge might shrink it, + * the caller cannot trust the contents of that buffer after this call. + * + * This code is optimized for readability - several passes might be + * made over certain portions of the tree. All of those blocks will + * have been brought into cache (and pinned via the journal), so the + * extra overhead is not expressed in terms of disk reads. + */ +static int __ocfs2_mark_extent_written(struct inode *inode, + struct ocfs2_extent_tree *et, + handle_t *handle, + struct ocfs2_path *path, + int split_index, + struct ocfs2_extent_rec *split_rec, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret = 0; + struct ocfs2_extent_list *el = path_leaf_el(path); + struct buffer_head *last_eb_bh = NULL; + struct ocfs2_extent_rec *rec = &el->l_recs[split_index]; + struct ocfs2_merge_ctxt ctxt; + struct ocfs2_extent_list *rightmost_el; + + if (!(rec->e_flags & OCFS2_EXT_UNWRITTEN)) { + ret = -EIO; + mlog_errno(ret); + goto out; + } + + if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) || + ((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) < + (le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) { + ret = -EIO; + mlog_errno(ret); + goto out; + } + + ctxt.c_contig_type = ocfs2_figure_merge_contig_type(inode, path, el, + split_index, + split_rec); + + /* + * The core merge / split code wants to know how much room is + * left in this inodes allocation tree, so we pass the + * rightmost extent list. + */ + if (path->p_tree_depth) { + struct ocfs2_extent_block *eb; + + ret = ocfs2_read_block(inode, ocfs2_et_get_last_eb_blk(et), + &last_eb_bh); + if (ret) { + mlog_exit(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); + ret = -EROFS; + goto out; + } + + rightmost_el = &eb->h_list; + } else + rightmost_el = path_root_el(path); + + if (rec->e_cpos == split_rec->e_cpos && + rec->e_leaf_clusters == split_rec->e_leaf_clusters) + ctxt.c_split_covers_rec = 1; + else + ctxt.c_split_covers_rec = 0; + + ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); + + mlog(0, "index: %d, contig: %u, has_empty: %u, split_covers: %u\n", + split_index, ctxt.c_contig_type, ctxt.c_has_empty_extent, + ctxt.c_split_covers_rec); + + if (ctxt.c_contig_type == CONTIG_NONE) { + if (ctxt.c_split_covers_rec) + el->l_recs[split_index] = *split_rec; + else + ret = ocfs2_split_and_insert(inode, handle, path, et, + &last_eb_bh, split_index, + split_rec, meta_ac); + if (ret) + mlog_errno(ret); + } else { + ret = ocfs2_try_to_merge_extent(inode, handle, path, + split_index, split_rec, + dealloc, &ctxt, et); + if (ret) + mlog_errno(ret); + } + +out: + brelse(last_eb_bh); + return ret; +} + +/* + * Mark the already-existing extent at cpos as written for len clusters. + * + * If the existing extent is larger than the request, initiate a + * split. An attempt will be made at merging with adjacent extents. + * + * The caller is responsible for passing down meta_ac if we'll need it. + */ +int ocfs2_mark_extent_written(struct inode *inode, + struct ocfs2_extent_tree *et, + handle_t *handle, u32 cpos, u32 len, u32 phys, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret, index; + u64 start_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys); + struct ocfs2_extent_rec split_rec; + struct ocfs2_path *left_path = NULL; + struct ocfs2_extent_list *el; + + mlog(0, "Inode %lu cpos %u, len %u, phys %u (%llu)\n", + inode->i_ino, cpos, len, phys, (unsigned long long)start_blkno); + + if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { + ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " + "that are being written to, but the feature bit " + "is not set in the super block.", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + ret = -EROFS; + goto out; + } + + /* + * XXX: This should be fixed up so that we just re-insert the + * next extent records. + * + * XXX: This is a hack on the extent tree, maybe it should be + * an op? + */ + if (et->et_ops == &ocfs2_dinode_et_ops) + ocfs2_extent_map_trunc(inode, 0); + + left_path = ocfs2_new_path(et->et_root_bh, et->et_root_el); + if (!left_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_path(inode, left_path, cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + el = path_leaf_el(left_path); + + index = ocfs2_search_extent_list(el, cpos); + if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { + ocfs2_error(inode->i_sb, + "Inode %llu has an extent at cpos %u which can no " + "longer be found.\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); + ret = -EROFS; + goto out; + } + + memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec)); + split_rec.e_cpos = cpu_to_le32(cpos); + split_rec.e_leaf_clusters = cpu_to_le16(len); + split_rec.e_blkno = cpu_to_le64(start_blkno); + split_rec.e_flags = path_leaf_el(left_path)->l_recs[index].e_flags; + split_rec.e_flags &= ~OCFS2_EXT_UNWRITTEN; + + ret = __ocfs2_mark_extent_written(inode, et, handle, left_path, + index, &split_rec, meta_ac, + dealloc); + if (ret) + mlog_errno(ret); + +out: + ocfs2_free_path(left_path); + return ret; +} + +static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et, + handle_t *handle, struct ocfs2_path *path, + int index, u32 new_range, + struct ocfs2_alloc_context *meta_ac) +{ + int ret, depth, credits = handle->h_buffer_credits; + struct buffer_head *last_eb_bh = NULL; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *rightmost_el, *el; + struct ocfs2_extent_rec split_rec; + struct ocfs2_extent_rec *rec; + struct ocfs2_insert_type insert; + + /* + * Setup the record to split before we grow the tree. + */ + el = path_leaf_el(path); + rec = &el->l_recs[index]; + ocfs2_make_right_split_rec(inode->i_sb, &split_rec, new_range, rec); + + depth = path->p_tree_depth; + if (depth > 0) { + ret = ocfs2_read_block(inode, ocfs2_et_get_last_eb_blk(et), + &last_eb_bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; + rightmost_el = &eb->h_list; + } else + rightmost_el = path_leaf_el(path); + + credits += path->p_tree_depth + + ocfs2_extend_meta_needed(et->et_root_el); + ret = ocfs2_extend_trans(handle, credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (le16_to_cpu(rightmost_el->l_next_free_rec) == + le16_to_cpu(rightmost_el->l_count)) { + ret = ocfs2_grow_tree(inode, handle, et, &depth, &last_eb_bh, + meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + memset(&insert, 0, sizeof(struct ocfs2_insert_type)); + insert.ins_appending = APPEND_NONE; + insert.ins_contig = CONTIG_NONE; + insert.ins_split = SPLIT_RIGHT; + insert.ins_tree_depth = depth; + + ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert); + if (ret) + mlog_errno(ret); + +out: + brelse(last_eb_bh); + return ret; +} + +static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, + struct ocfs2_path *path, int index, + struct ocfs2_cached_dealloc_ctxt *dealloc, + u32 cpos, u32 len, + struct ocfs2_extent_tree *et) +{ + int ret; + u32 left_cpos, rec_range, trunc_range; + int wants_rotate = 0, is_rightmost_tree_rec = 0; + struct super_block *sb = inode->i_sb; + struct ocfs2_path *left_path = NULL; + struct ocfs2_extent_list *el = path_leaf_el(path); + struct ocfs2_extent_rec *rec; + struct ocfs2_extent_block *eb; + + if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) { + ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et); + if (ret) { + mlog_errno(ret); + goto out; + } + + index--; + } + + if (index == (le16_to_cpu(el->l_next_free_rec) - 1) && + path->p_tree_depth) { + /* + * Check whether this is the rightmost tree record. If + * we remove all of this record or part of its right + * edge then an update of the record lengths above it + * will be required. + */ + eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; + if (eb->h_next_leaf_blk == 0) + is_rightmost_tree_rec = 1; + } + + rec = &el->l_recs[index]; + if (index == 0 && path->p_tree_depth && + le32_to_cpu(rec->e_cpos) == cpos) { + /* + * Changing the leftmost offset (via partial or whole + * record truncate) of an interior (or rightmost) path + * means we have to update the subtree that is formed + * by this leaf and the one to it's left. + * + * There are two cases we can skip: + * 1) Path is the leftmost one in our inode tree. + * 2) The leaf is rightmost and will be empty after + * we remove the extent record - the rotate code + * knows how to update the newly formed edge. + */ + + ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, + &left_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) { + left_path = ocfs2_new_path(path_root_bh(path), + path_root_el(path)); + if (!left_path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_path(inode, left_path, left_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + } + } + + ret = ocfs2_extend_rotate_transaction(handle, 0, + handle->h_buffer_credits, + path); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_path(inode, handle, path); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_path(inode, handle, left_path); + if (ret) { + mlog_errno(ret); + goto out; + } + + rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); + trunc_range = cpos + len; + + if (le32_to_cpu(rec->e_cpos) == cpos && rec_range == trunc_range) { + int next_free; + + memset(rec, 0, sizeof(*rec)); + ocfs2_cleanup_merge(el, index); + wants_rotate = 1; + + next_free = le16_to_cpu(el->l_next_free_rec); + if (is_rightmost_tree_rec && next_free > 1) { + /* + * We skip the edge update if this path will + * be deleted by the rotate code. + */ + rec = &el->l_recs[next_free - 1]; + ocfs2_adjust_rightmost_records(inode, handle, path, + rec); + } + } else if (le32_to_cpu(rec->e_cpos) == cpos) { + /* Remove leftmost portion of the record. */ + le32_add_cpu(&rec->e_cpos, len); + le64_add_cpu(&rec->e_blkno, ocfs2_clusters_to_blocks(sb, len)); + le16_add_cpu(&rec->e_leaf_clusters, -len); + } else if (rec_range == trunc_range) { + /* Remove rightmost portion of the record */ + le16_add_cpu(&rec->e_leaf_clusters, -len); + if (is_rightmost_tree_rec) + ocfs2_adjust_rightmost_records(inode, handle, path, rec); + } else { + /* Caller should have trapped this. */ + mlog(ML_ERROR, "Inode %llu: Invalid record truncate: (%u, %u) " + "(%u, %u)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, + le32_to_cpu(rec->e_cpos), + le16_to_cpu(rec->e_leaf_clusters), cpos, len); + BUG(); + } + + if (left_path) { + int subtree_index; + + subtree_index = ocfs2_find_subtree_root(inode, left_path, path); + ocfs2_complete_edge_insert(inode, handle, left_path, path, + subtree_index); + } + + ocfs2_journal_dirty(handle, path_leaf_bh(path)); + + ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et); + if (ret) { + mlog_errno(ret); + goto out; + } + +out: + ocfs2_free_path(left_path); + return ret; +} + +int ocfs2_remove_extent(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 cpos, u32 len, handle_t *handle, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret, index; + u32 rec_range, trunc_range; + struct ocfs2_extent_rec *rec; + struct ocfs2_extent_list *el; + struct ocfs2_path *path = NULL; + + ocfs2_extent_map_trunc(inode, 0); + + path = ocfs2_new_path(et->et_root_bh, et->et_root_el); + if (!path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_path(inode, path, cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + el = path_leaf_el(path); + index = ocfs2_search_extent_list(el, cpos); + if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { + ocfs2_error(inode->i_sb, + "Inode %llu has an extent at cpos %u which can no " + "longer be found.\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); + ret = -EROFS; + goto out; + } + + /* + * We have 3 cases of extent removal: + * 1) Range covers the entire extent rec + * 2) Range begins or ends on one edge of the extent rec + * 3) Range is in the middle of the extent rec (no shared edges) + * + * For case 1 we remove the extent rec and left rotate to + * fill the hole. + * + * For case 2 we just shrink the existing extent rec, with a + * tree update if the shrinking edge is also the edge of an + * extent block. + * + * For case 3 we do a right split to turn the extent rec into + * something case 2 can handle. + */ + rec = &el->l_recs[index]; + rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); + trunc_range = cpos + len; + + BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); + + mlog(0, "Inode %llu, remove (cpos %u, len %u). Existing index %d " + "(cpos %u, len %u)\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, len, index, + le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); + + if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { + ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc, + cpos, len, et); + if (ret) { + mlog_errno(ret); + goto out; + } + } else { + ret = ocfs2_split_tree(inode, et, handle, path, index, + trunc_range, meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * The split could have manipulated the tree enough to + * move the record location, so we have to look for it again. + */ + ocfs2_reinit_path(path, 1); + + ret = ocfs2_find_path(inode, path, cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + el = path_leaf_el(path); + index = ocfs2_search_extent_list(el, cpos); + if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { + ocfs2_error(inode->i_sb, + "Inode %llu: split at cpos %u lost record.", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + cpos); + ret = -EROFS; + goto out; + } + + /* + * Double check our values here. If anything is fishy, + * it's easier to catch it at the top level. + */ + rec = &el->l_recs[index]; + rec_range = le32_to_cpu(rec->e_cpos) + + ocfs2_rec_clusters(el, rec); + if (rec_range != trunc_range) { + ocfs2_error(inode->i_sb, + "Inode %llu: error after split at cpos %u" + "trunc len %u, existing record is (%u,%u)", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + cpos, len, le32_to_cpu(rec->e_cpos), + ocfs2_rec_clusters(el, rec)); + ret = -EROFS; + goto out; + } + + ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc, + cpos, len, et); + if (ret) { + mlog_errno(ret); + goto out; + } + } + +out: + ocfs2_free_path(path); + return ret; +} + +int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb) +{ + struct buffer_head *tl_bh = osb->osb_tl_bh; + struct ocfs2_dinode *di; + struct ocfs2_truncate_log *tl; + + di = (struct ocfs2_dinode *) tl_bh->b_data; + tl = &di->id2.i_dealloc; + + mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count), + "slot %d, invalid truncate log parameters: used = " + "%u, count = %u\n", osb->slot_num, + le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count)); + return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count); +} + +static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl, + unsigned int new_start) +{ + unsigned int tail_index; + unsigned int current_tail; + + /* No records, nothing to coalesce */ + if (!le16_to_cpu(tl->tl_used)) + return 0; + + tail_index = le16_to_cpu(tl->tl_used) - 1; + current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start); + current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters); + + return current_tail == new_start; +} + +int ocfs2_truncate_log_append(struct ocfs2_super *osb, + handle_t *handle, + u64 start_blk, + unsigned int num_clusters) +{ + int status, index; + unsigned int start_cluster, tl_count; + struct inode *tl_inode = osb->osb_tl_inode; + struct buffer_head *tl_bh = osb->osb_tl_bh; + struct ocfs2_dinode *di; + struct ocfs2_truncate_log *tl; + + mlog_entry("start_blk = %llu, num_clusters = %u\n", + (unsigned long long)start_blk, num_clusters); + + BUG_ON(mutex_trylock(&tl_inode->i_mutex)); + + start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk); + + di = (struct ocfs2_dinode *) tl_bh->b_data; + tl = &di->id2.i_dealloc; + if (!OCFS2_IS_VALID_DINODE(di)) { + OCFS2_RO_ON_INVALID_DINODE(osb->sb, di); + status = -EIO; + goto bail; + } + + tl_count = le16_to_cpu(tl->tl_count); + mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || + tl_count == 0, + "Truncate record count on #%llu invalid " + "wanted %u, actual %u\n", + (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, + ocfs2_truncate_recs_per_inode(osb->sb), + le16_to_cpu(tl->tl_count)); + + /* Caller should have known to flush before calling us. */ + index = le16_to_cpu(tl->tl_used); + if (index >= tl_count) { + status = -ENOSPC; + mlog_errno(status); + goto bail; + } + + status = ocfs2_journal_access(handle, tl_inode, tl_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + mlog(0, "Log truncate of %u clusters starting at cluster %u to " + "%llu (index = %d)\n", num_clusters, start_cluster, + (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index); + + if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) { + /* + * Move index back to the record we are coalescing with. + * ocfs2_truncate_log_can_coalesce() guarantees nonzero + */ + index--; + + num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters); + mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n", + index, le32_to_cpu(tl->tl_recs[index].t_start), + num_clusters); + } else { + tl->tl_recs[index].t_start = cpu_to_le32(start_cluster); + tl->tl_used = cpu_to_le16(index + 1); + } + tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters); + + status = ocfs2_journal_dirty(handle, tl_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + +bail: + mlog_exit(status); + return status; +} + +static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, + handle_t *handle, + struct inode *data_alloc_inode, + struct buffer_head *data_alloc_bh) +{ + int status = 0; + int i; + unsigned int num_clusters; + u64 start_blk; + struct ocfs2_truncate_rec rec; + struct ocfs2_dinode *di; + struct ocfs2_truncate_log *tl; + struct inode *tl_inode = osb->osb_tl_inode; + struct buffer_head *tl_bh = osb->osb_tl_bh; + + mlog_entry_void(); + + di = (struct ocfs2_dinode *) tl_bh->b_data; + tl = &di->id2.i_dealloc; + i = le16_to_cpu(tl->tl_used) - 1; + while (i >= 0) { + /* Caller has given us at least enough credits to + * update the truncate log dinode */ + status = ocfs2_journal_access(handle, tl_inode, tl_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + tl->tl_used = cpu_to_le16(i); + + status = ocfs2_journal_dirty(handle, tl_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + /* TODO: Perhaps we can calculate the bulk of the + * credits up front rather than extending like + * this. */ + status = ocfs2_extend_trans(handle, + OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + rec = tl->tl_recs[i]; + start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb, + le32_to_cpu(rec.t_start)); + num_clusters = le32_to_cpu(rec.t_clusters); + + /* if start_blk is not set, we ignore the record as + * invalid. */ + if (start_blk) { + mlog(0, "free record %d, start = %u, clusters = %u\n", + i, le32_to_cpu(rec.t_start), num_clusters); + + status = ocfs2_free_clusters(handle, data_alloc_inode, + data_alloc_bh, start_blk, + num_clusters); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + i--; + } + +bail: + mlog_exit(status); + return status; +} + +/* Expects you to already be holding tl_inode->i_mutex */ +int __ocfs2_flush_truncate_log(struct ocfs2_super *osb) +{ + int status; + unsigned int num_to_flush; + handle_t *handle; + struct inode *tl_inode = osb->osb_tl_inode; + struct inode *data_alloc_inode = NULL; + struct buffer_head *tl_bh = osb->osb_tl_bh; + struct buffer_head *data_alloc_bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_truncate_log *tl; + + mlog_entry_void(); + + BUG_ON(mutex_trylock(&tl_inode->i_mutex)); + + di = (struct ocfs2_dinode *) tl_bh->b_data; + tl = &di->id2.i_dealloc; + if (!OCFS2_IS_VALID_DINODE(di)) { + OCFS2_RO_ON_INVALID_DINODE(osb->sb, di); + status = -EIO; + goto out; + } + + num_to_flush = le16_to_cpu(tl->tl_used); + mlog(0, "Flush %u records from truncate log #%llu\n", + num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno); + if (!num_to_flush) { + status = 0; + goto out; + } + + data_alloc_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_BITMAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT); + if (!data_alloc_inode) { + status = -EINVAL; + mlog(ML_ERROR, "Could not get bitmap inode!\n"); + goto out; + } + + mutex_lock(&data_alloc_inode->i_mutex); + + status = ocfs2_inode_lock(data_alloc_inode, &data_alloc_bh, 1); + if (status < 0) { + mlog_errno(status); + goto out_mutex; + } + + handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out_unlock; + } + + status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode, + data_alloc_bh); + if (status < 0) + mlog_errno(status); + + ocfs2_commit_trans(osb, handle); + +out_unlock: + brelse(data_alloc_bh); + ocfs2_inode_unlock(data_alloc_inode, 1); + +out_mutex: + mutex_unlock(&data_alloc_inode->i_mutex); + iput(data_alloc_inode); + +out: + mlog_exit(status); + return status; +} + +int ocfs2_flush_truncate_log(struct ocfs2_super *osb) +{ + int status; + struct inode *tl_inode = osb->osb_tl_inode; + + mutex_lock(&tl_inode->i_mutex); + status = __ocfs2_flush_truncate_log(osb); + mutex_unlock(&tl_inode->i_mutex); + + return status; +} + +static void ocfs2_truncate_log_worker(struct work_struct *work) +{ + int status; + struct ocfs2_super *osb = + container_of(work, struct ocfs2_super, + osb_truncate_log_wq.work); + + mlog_entry_void(); + + status = ocfs2_flush_truncate_log(osb); + if (status < 0) + mlog_errno(status); + else + ocfs2_init_inode_steal_slot(osb); + + mlog_exit(status); +} + +#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ) +void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, + int cancel) +{ + if (osb->osb_tl_inode) { + /* We want to push off log flushes while truncates are + * still running. */ + if (cancel) + cancel_delayed_work(&osb->osb_truncate_log_wq); + + queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq, + OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL); + } +} + +static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, + int slot_num, + struct inode **tl_inode, + struct buffer_head **tl_bh) +{ + int status; + struct inode *inode = NULL; + struct buffer_head *bh = NULL; + + inode = ocfs2_get_system_file_inode(osb, + TRUNCATE_LOG_SYSTEM_INODE, + slot_num); + if (!inode) { + status = -EINVAL; + mlog(ML_ERROR, "Could not get load truncate log inode!\n"); + goto bail; + } + + status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &bh); + if (status < 0) { + iput(inode); + mlog_errno(status); + goto bail; + } + + *tl_inode = inode; + *tl_bh = bh; +bail: + mlog_exit(status); + return status; +} + +/* called during the 1st stage of node recovery. we stamp a clean + * truncate log and pass back a copy for processing later. if the + * truncate log does not require processing, a *tl_copy is set to + * NULL. */ +int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, + int slot_num, + struct ocfs2_dinode **tl_copy) +{ + int status; + struct inode *tl_inode = NULL; + struct buffer_head *tl_bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_truncate_log *tl; + + *tl_copy = NULL; + + mlog(0, "recover truncate log from slot %d\n", slot_num); + + status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + di = (struct ocfs2_dinode *) tl_bh->b_data; + tl = &di->id2.i_dealloc; + if (!OCFS2_IS_VALID_DINODE(di)) { + OCFS2_RO_ON_INVALID_DINODE(tl_inode->i_sb, di); + status = -EIO; + goto bail; + } + + if (le16_to_cpu(tl->tl_used)) { + mlog(0, "We'll have %u logs to recover\n", + le16_to_cpu(tl->tl_used)); + + *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL); + if (!(*tl_copy)) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + /* Assuming the write-out below goes well, this copy + * will be passed back to recovery for processing. */ + memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size); + + /* All we need to do to clear the truncate log is set + * tl_used. */ + tl->tl_used = 0; + + status = ocfs2_write_block(osb, tl_bh, tl_inode); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + +bail: + if (tl_inode) + iput(tl_inode); + brelse(tl_bh); + + if (status < 0 && (*tl_copy)) { + kfree(*tl_copy); + *tl_copy = NULL; + } + + mlog_exit(status); + return status; +} + +int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, + struct ocfs2_dinode *tl_copy) +{ + int status = 0; + int i; + unsigned int clusters, num_recs, start_cluster; + u64 start_blk; + handle_t *handle; + struct inode *tl_inode = osb->osb_tl_inode; + struct ocfs2_truncate_log *tl; + + mlog_entry_void(); + + if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) { + mlog(ML_ERROR, "Asked to recover my own truncate log!\n"); + return -EINVAL; + } + + tl = &tl_copy->id2.i_dealloc; + num_recs = le16_to_cpu(tl->tl_used); + mlog(0, "cleanup %u records from %llu\n", num_recs, + (unsigned long long)le64_to_cpu(tl_copy->i_blkno)); + + mutex_lock(&tl_inode->i_mutex); + for(i = 0; i < num_recs; i++) { + if (ocfs2_truncate_log_needs_flush(osb)) { + status = __ocfs2_flush_truncate_log(osb); + if (status < 0) { + mlog_errno(status); + goto bail_up; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_up; + } + + clusters = le32_to_cpu(tl->tl_recs[i].t_clusters); + start_cluster = le32_to_cpu(tl->tl_recs[i].t_start); + start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster); + + status = ocfs2_truncate_log_append(osb, handle, + start_blk, clusters); + ocfs2_commit_trans(osb, handle); + if (status < 0) { + mlog_errno(status); + goto bail_up; + } + } + +bail_up: + mutex_unlock(&tl_inode->i_mutex); + + mlog_exit(status); + return status; +} + +void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb) +{ + int status; + struct inode *tl_inode = osb->osb_tl_inode; + + mlog_entry_void(); + + if (tl_inode) { + cancel_delayed_work(&osb->osb_truncate_log_wq); + flush_workqueue(ocfs2_wq); + + status = ocfs2_flush_truncate_log(osb); + if (status < 0) + mlog_errno(status); + + brelse(osb->osb_tl_bh); + iput(osb->osb_tl_inode); + } + + mlog_exit_void(); +} + +int ocfs2_truncate_log_init(struct ocfs2_super *osb) +{ + int status; + struct inode *tl_inode = NULL; + struct buffer_head *tl_bh = NULL; + + mlog_entry_void(); + + status = ocfs2_get_truncate_log_info(osb, + osb->slot_num, + &tl_inode, + &tl_bh); + if (status < 0) + mlog_errno(status); + + /* ocfs2_truncate_log_shutdown keys on the existence of + * osb->osb_tl_inode so we don't set any of the osb variables + * until we're sure all is well. */ + INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, + ocfs2_truncate_log_worker); + osb->osb_tl_bh = tl_bh; + osb->osb_tl_inode = tl_inode; + + mlog_exit(status); + return status; +} + +/* + * Delayed de-allocation of suballocator blocks. + * + * Some sets of block de-allocations might involve multiple suballocator inodes. + * + * The locking for this can get extremely complicated, especially when + * the suballocator inodes to delete from aren't known until deep + * within an unrelated codepath. + * + * ocfs2_extent_block structures are a good example of this - an inode + * btree could have been grown by any number of nodes each allocating + * out of their own suballoc inode. + * + * These structures allow the delay of block de-allocation until a + * later time, when locking of multiple cluster inodes won't cause + * deadlock. + */ + +/* + * Describes a single block free from a suballocator + */ +struct ocfs2_cached_block_free { + struct ocfs2_cached_block_free *free_next; + u64 free_blk; + unsigned int free_bit; +}; + +struct ocfs2_per_slot_free_list { + struct ocfs2_per_slot_free_list *f_next_suballocator; + int f_inode_type; + int f_slot; + struct ocfs2_cached_block_free *f_first; +}; + +static int ocfs2_free_cached_items(struct ocfs2_super *osb, + int sysfile_type, + int slot, + struct ocfs2_cached_block_free *head) +{ + int ret; + u64 bg_blkno; + handle_t *handle; + struct inode *inode; + struct buffer_head *di_bh = NULL; + struct ocfs2_cached_block_free *tmp; + + inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot); + if (!inode) { + ret = -EINVAL; + mlog_errno(ret); + goto out; + } + + mutex_lock(&inode->i_mutex); + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret) { + mlog_errno(ret); + goto out_mutex; + } + + handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out_unlock; + } + + while (head) { + bg_blkno = ocfs2_which_suballoc_group(head->free_blk, + head->free_bit); + mlog(0, "Free bit: (bit %u, blkno %llu)\n", + head->free_bit, (unsigned long long)head->free_blk); + + ret = ocfs2_free_suballoc_bits(handle, inode, di_bh, + head->free_bit, bg_blkno, 1); + if (ret) { + mlog_errno(ret); + goto out_journal; + } + + ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE); + if (ret) { + mlog_errno(ret); + goto out_journal; + } + + tmp = head; + head = head->free_next; + kfree(tmp); + } + +out_journal: + ocfs2_commit_trans(osb, handle); + +out_unlock: + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); +out_mutex: + mutex_unlock(&inode->i_mutex); + iput(inode); +out: + while(head) { + /* Premature exit may have left some dangling items. */ + tmp = head; + head = head->free_next; + kfree(tmp); + } + + return ret; +} + +int ocfs2_run_deallocs(struct ocfs2_super *osb, + struct ocfs2_cached_dealloc_ctxt *ctxt) +{ + int ret = 0, ret2; + struct ocfs2_per_slot_free_list *fl; + + if (!ctxt) + return 0; + + while (ctxt->c_first_suballocator) { + fl = ctxt->c_first_suballocator; + + if (fl->f_first) { + mlog(0, "Free items: (type %u, slot %d)\n", + fl->f_inode_type, fl->f_slot); + ret2 = ocfs2_free_cached_items(osb, fl->f_inode_type, + fl->f_slot, fl->f_first); + if (ret2) + mlog_errno(ret2); + if (!ret) + ret = ret2; + } + + ctxt->c_first_suballocator = fl->f_next_suballocator; + kfree(fl); + } + + return ret; +} + +static struct ocfs2_per_slot_free_list * +ocfs2_find_per_slot_free_list(int type, + int slot, + struct ocfs2_cached_dealloc_ctxt *ctxt) +{ + struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; + + while (fl) { + if (fl->f_inode_type == type && fl->f_slot == slot) + return fl; + + fl = fl->f_next_suballocator; + } + + fl = kmalloc(sizeof(*fl), GFP_NOFS); + if (fl) { + fl->f_inode_type = type; + fl->f_slot = slot; + fl->f_first = NULL; + fl->f_next_suballocator = ctxt->c_first_suballocator; + + ctxt->c_first_suballocator = fl; + } + return fl; +} + +static int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, + int type, int slot, u64 blkno, + unsigned int bit) +{ + int ret; + struct ocfs2_per_slot_free_list *fl; + struct ocfs2_cached_block_free *item; + + fl = ocfs2_find_per_slot_free_list(type, slot, ctxt); + if (fl == NULL) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + item = kmalloc(sizeof(*item), GFP_NOFS); + if (item == NULL) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + mlog(0, "Insert: (type %d, slot %u, bit %u, blk %llu)\n", + type, slot, bit, (unsigned long long)blkno); + + item->free_blk = blkno; + item->free_bit = bit; + item->free_next = fl->f_first; + + fl->f_first = item; + + ret = 0; +out: + return ret; +} + +static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, + struct ocfs2_extent_block *eb) +{ + return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE, + le16_to_cpu(eb->h_suballoc_slot), + le64_to_cpu(eb->h_blkno), + le16_to_cpu(eb->h_suballoc_bit)); +} + +/* This function will figure out whether the currently last extent + * block will be deleted, and if it will, what the new last extent + * block will be so we can update his h_next_leaf_blk field, as well + * as the dinodes i_last_eb_blk */ +static int ocfs2_find_new_last_ext_blk(struct inode *inode, + unsigned int clusters_to_del, + struct ocfs2_path *path, + struct buffer_head **new_last_eb) +{ + int next_free, ret = 0; + u32 cpos; + struct ocfs2_extent_rec *rec; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *el; + struct buffer_head *bh = NULL; + + *new_last_eb = NULL; + + /* we have no tree, so of course, no last_eb. */ + if (!path->p_tree_depth) + goto out; + + /* trunc to zero special case - this makes tree_depth = 0 + * regardless of what it is. */ + if (OCFS2_I(inode)->ip_clusters == clusters_to_del) + goto out; + + el = path_leaf_el(path); + BUG_ON(!el->l_next_free_rec); + + /* + * Make sure that this extent list will actually be empty + * after we clear away the data. We can shortcut out if + * there's more than one non-empty extent in the + * list. Otherwise, a check of the remaining extent is + * necessary. + */ + next_free = le16_to_cpu(el->l_next_free_rec); + rec = NULL; + if (ocfs2_is_empty_extent(&el->l_recs[0])) { + if (next_free > 2) + goto out; + + /* We may have a valid extent in index 1, check it. */ + if (next_free == 2) + rec = &el->l_recs[1]; + + /* + * Fall through - no more nonempty extents, so we want + * to delete this leaf. + */ + } else { + if (next_free > 1) + goto out; + + rec = &el->l_recs[0]; + } + + if (rec) { + /* + * Check it we'll only be trimming off the end of this + * cluster. + */ + if (le16_to_cpu(rec->e_leaf_clusters) > clusters_to_del) + goto out; + } + + ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_leaf(inode, path_root_el(path), cpos, &bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) bh->b_data; + el = &eb->h_list; + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); + ret = -EROFS; + goto out; + } + + *new_last_eb = bh; + get_bh(*new_last_eb); + mlog(0, "returning block %llu, (cpos: %u)\n", + (unsigned long long)le64_to_cpu(eb->h_blkno), cpos); +out: + brelse(bh); + + return ret; +} + +/* + * Trim some clusters off the rightmost edge of a tree. Only called + * during truncate. + * + * The caller needs to: + * - start journaling of each path component. + * - compute and fully set up any new last ext block + */ +static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path, + handle_t *handle, struct ocfs2_truncate_context *tc, + u32 clusters_to_del, u64 *delete_start) +{ + int ret, i, index = path->p_tree_depth; + u32 new_edge = 0; + u64 deleted_eb = 0; + struct buffer_head *bh; + struct ocfs2_extent_list *el; + struct ocfs2_extent_rec *rec; + + *delete_start = 0; + + while (index >= 0) { + bh = path->p_node[index].bh; + el = path->p_node[index].el; + + mlog(0, "traveling tree (index = %d, block = %llu)\n", + index, (unsigned long long)bh->b_blocknr); + + BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0); + + if (index != + (path->p_tree_depth - le16_to_cpu(el->l_tree_depth))) { + ocfs2_error(inode->i_sb, + "Inode %lu has invalid ext. block %llu", + inode->i_ino, + (unsigned long long)bh->b_blocknr); + ret = -EROFS; + goto out; + } + +find_tail_record: + i = le16_to_cpu(el->l_next_free_rec) - 1; + rec = &el->l_recs[i]; + + mlog(0, "Extent list before: record %d: (%u, %u, %llu), " + "next = %u\n", i, le32_to_cpu(rec->e_cpos), + ocfs2_rec_clusters(el, rec), + (unsigned long long)le64_to_cpu(rec->e_blkno), + le16_to_cpu(el->l_next_free_rec)); + + BUG_ON(ocfs2_rec_clusters(el, rec) < clusters_to_del); + + if (le16_to_cpu(el->l_tree_depth) == 0) { + /* + * If the leaf block contains a single empty + * extent and no records, we can just remove + * the block. + */ + if (i == 0 && ocfs2_is_empty_extent(rec)) { + memset(rec, 0, + sizeof(struct ocfs2_extent_rec)); + el->l_next_free_rec = cpu_to_le16(0); + + goto delete; + } + + /* + * Remove any empty extents by shifting things + * left. That should make life much easier on + * the code below. This condition is rare + * enough that we shouldn't see a performance + * hit. + */ + if (ocfs2_is_empty_extent(&el->l_recs[0])) { + le16_add_cpu(&el->l_next_free_rec, -1); + + for(i = 0; + i < le16_to_cpu(el->l_next_free_rec); i++) + el->l_recs[i] = el->l_recs[i + 1]; + + memset(&el->l_recs[i], 0, + sizeof(struct ocfs2_extent_rec)); + + /* + * We've modified our extent list. The + * simplest way to handle this change + * is to being the search from the + * start again. + */ + goto find_tail_record; + } + + le16_add_cpu(&rec->e_leaf_clusters, -clusters_to_del); + + /* + * We'll use "new_edge" on our way back up the + * tree to know what our rightmost cpos is. + */ + new_edge = le16_to_cpu(rec->e_leaf_clusters); + new_edge += le32_to_cpu(rec->e_cpos); + + /* + * The caller will use this to delete data blocks. + */ + *delete_start = le64_to_cpu(rec->e_blkno) + + ocfs2_clusters_to_blocks(inode->i_sb, + le16_to_cpu(rec->e_leaf_clusters)); + + /* + * If it's now empty, remove this record. + */ + if (le16_to_cpu(rec->e_leaf_clusters) == 0) { + memset(rec, 0, + sizeof(struct ocfs2_extent_rec)); + le16_add_cpu(&el->l_next_free_rec, -1); + } + } else { + if (le64_to_cpu(rec->e_blkno) == deleted_eb) { + memset(rec, 0, + sizeof(struct ocfs2_extent_rec)); + le16_add_cpu(&el->l_next_free_rec, -1); + + goto delete; + } + + /* Can this actually happen? */ + if (le16_to_cpu(el->l_next_free_rec) == 0) + goto delete; + + /* + * We never actually deleted any clusters + * because our leaf was empty. There's no + * reason to adjust the rightmost edge then. + */ + if (new_edge == 0) + goto delete; + + rec->e_int_clusters = cpu_to_le32(new_edge); + le32_add_cpu(&rec->e_int_clusters, + -le32_to_cpu(rec->e_cpos)); + + /* + * A deleted child record should have been + * caught above. + */ + BUG_ON(le32_to_cpu(rec->e_int_clusters) == 0); + } + +delete: + ret = ocfs2_journal_dirty(handle, bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + mlog(0, "extent list container %llu, after: record %d: " + "(%u, %u, %llu), next = %u.\n", + (unsigned long long)bh->b_blocknr, i, + le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec), + (unsigned long long)le64_to_cpu(rec->e_blkno), + le16_to_cpu(el->l_next_free_rec)); + + /* + * We must be careful to only attempt delete of an + * extent block (and not the root inode block). + */ + if (index > 0 && le16_to_cpu(el->l_next_free_rec) == 0) { + struct ocfs2_extent_block *eb = + (struct ocfs2_extent_block *)bh->b_data; + + /* + * Save this for use when processing the + * parent block. + */ + deleted_eb = le64_to_cpu(eb->h_blkno); + + mlog(0, "deleting this extent block.\n"); + + ocfs2_remove_from_cache(inode, bh); + + BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0])); + BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos)); + BUG_ON(le64_to_cpu(el->l_recs[0].e_blkno)); + + ret = ocfs2_cache_extent_block_free(&tc->tc_dealloc, eb); + /* An error here is not fatal. */ + if (ret < 0) + mlog_errno(ret); + } else { + deleted_eb = 0; + } + + index--; + } + + ret = 0; +out: + return ret; +} + +static int ocfs2_do_truncate(struct ocfs2_super *osb, + unsigned int clusters_to_del, + struct inode *inode, + struct buffer_head *fe_bh, + handle_t *handle, + struct ocfs2_truncate_context *tc, + struct ocfs2_path *path) +{ + int status; + struct ocfs2_dinode *fe; + struct ocfs2_extent_block *last_eb = NULL; + struct ocfs2_extent_list *el; + struct buffer_head *last_eb_bh = NULL; + u64 delete_blk = 0; + + fe = (struct ocfs2_dinode *) fe_bh->b_data; + + status = ocfs2_find_new_last_ext_blk(inode, clusters_to_del, + path, &last_eb_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + /* + * Each component will be touched, so we might as well journal + * here to avoid having to handle errors later. + */ + status = ocfs2_journal_access_path(inode, handle, path); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + if (last_eb_bh) { + status = ocfs2_journal_access(handle, inode, last_eb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; + } + + el = &(fe->id2.i_list); + + /* + * Lower levels depend on this never happening, but it's best + * to check it up here before changing the tree. + */ + if (el->l_tree_depth && el->l_recs[0].e_int_clusters == 0) { + ocfs2_error(inode->i_sb, + "Inode %lu has an empty extent record, depth %u\n", + inode->i_ino, le16_to_cpu(el->l_tree_depth)); + status = -EROFS; + goto bail; + } + + spin_lock(&OCFS2_I(inode)->ip_lock); + OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) - + clusters_to_del; + spin_unlock(&OCFS2_I(inode)->ip_lock); + le32_add_cpu(&fe->i_clusters, -clusters_to_del); + inode->i_blocks = ocfs2_inode_sector_count(inode); + + status = ocfs2_trim_tree(inode, path, handle, tc, + clusters_to_del, &delete_blk); + if (status) { + mlog_errno(status); + goto bail; + } + + if (le32_to_cpu(fe->i_clusters) == 0) { + /* trunc to zero is a special case. */ + el->l_tree_depth = 0; + fe->i_last_eb_blk = 0; + } else if (last_eb) + fe->i_last_eb_blk = last_eb->h_blkno; + + status = ocfs2_journal_dirty(handle, fe_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + if (last_eb) { + /* If there will be a new last extent block, then by + * definition, there cannot be any leaves to the right of + * him. */ + last_eb->h_next_leaf_blk = 0; + status = ocfs2_journal_dirty(handle, last_eb_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + if (delete_blk) { + status = ocfs2_truncate_log_append(osb, handle, delete_blk, + clusters_to_del); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + status = 0; +bail: + + mlog_exit(status); + return status; +} + +static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh) +{ + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + return 0; +} + +static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, + unsigned int from, unsigned int to, + struct page *page, int zero, u64 *phys) +{ + int ret, partial = 0; + + ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); + if (ret) + mlog_errno(ret); + + if (zero) + zero_user_segment(page, from, to); + + /* + * Need to set the buffers we zero'd into uptodate + * here if they aren't - ocfs2_map_page_blocks() + * might've skipped some + */ + ret = walk_page_buffers(handle, page_buffers(page), + from, to, &partial, + ocfs2_zero_func); + if (ret < 0) + mlog_errno(ret); + else if (ocfs2_should_order_data(inode)) { + ret = ocfs2_jbd2_file_inode(handle, inode); +#ifdef CONFIG_OCFS2_COMPAT_JBD + ret = walk_page_buffers(handle, page_buffers(page), + from, to, &partial, + ocfs2_journal_dirty_data); +#endif + if (ret < 0) + mlog_errno(ret); + } + + if (!partial) + SetPageUptodate(page); + + flush_dcache_page(page); +} + +static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, + loff_t end, struct page **pages, + int numpages, u64 phys, handle_t *handle) +{ + int i; + struct page *page; + unsigned int from, to = PAGE_CACHE_SIZE; + struct super_block *sb = inode->i_sb; + + BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); + + if (numpages == 0) + goto out; + + to = PAGE_CACHE_SIZE; + for(i = 0; i < numpages; i++) { + page = pages[i]; + + from = start & (PAGE_CACHE_SIZE - 1); + if ((end >> PAGE_CACHE_SHIFT) == page->index) + to = end & (PAGE_CACHE_SIZE - 1); + + BUG_ON(from > PAGE_CACHE_SIZE); + BUG_ON(to > PAGE_CACHE_SIZE); + + ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, + &phys); + + start = (page->index + 1) << PAGE_CACHE_SHIFT; + } +out: + if (pages) + ocfs2_unlock_and_free_pages(pages, numpages); +} + +static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, + struct page **pages, int *num) +{ + int numpages, ret = 0; + struct super_block *sb = inode->i_sb; + struct address_space *mapping = inode->i_mapping; + unsigned long index; + loff_t last_page_bytes; + + BUG_ON(start > end); + + BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != + (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); + + numpages = 0; + last_page_bytes = PAGE_ALIGN(end); + index = start >> PAGE_CACHE_SHIFT; + do { + pages[numpages] = grab_cache_page(mapping, index); + if (!pages[numpages]) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + numpages++; + index++; + } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); + +out: + if (ret != 0) { + if (pages) + ocfs2_unlock_and_free_pages(pages, numpages); + numpages = 0; + } + + *num = numpages; + + return ret; +} + +/* + * Zero the area past i_size but still within an allocated + * cluster. This avoids exposing nonzero data on subsequent file + * extends. + * + * We need to call this before i_size is updated on the inode because + * otherwise block_write_full_page() will skip writeout of pages past + * i_size. The new_i_size parameter is passed for this reason. + */ +int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, + u64 range_start, u64 range_end) +{ + int ret = 0, numpages; + struct page **pages = NULL; + u64 phys; + unsigned int ext_flags; + struct super_block *sb = inode->i_sb; + + /* + * File systems which don't support sparse files zero on every + * extend. + */ + if (!ocfs2_sparse_alloc(OCFS2_SB(sb))) + return 0; + + pages = kcalloc(ocfs2_pages_per_cluster(sb), + sizeof(struct page *), GFP_NOFS); + if (pages == NULL) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + if (range_start == range_end) + goto out; + + ret = ocfs2_extent_map_get_blocks(inode, + range_start >> sb->s_blocksize_bits, + &phys, NULL, &ext_flags); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * Tail is a hole, or is marked unwritten. In either case, we + * can count on read and write to return/push zero's. + */ + if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN) + goto out; + + ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages, + &numpages); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_zero_cluster_pages(inode, range_start, range_end, pages, + numpages, phys, handle); + + /* + * Initiate writeout of the pages we zero'd here. We don't + * wait on them - the truncate_inode_pages() call later will + * do that for us. + */ + ret = do_sync_mapping_range(inode->i_mapping, range_start, + range_end - 1, SYNC_FILE_RANGE_WRITE); + if (ret) + mlog_errno(ret); + +out: + if (pages) + kfree(pages); + + return ret; +} + +static void ocfs2_zero_dinode_id2_with_xattr(struct inode *inode, + struct ocfs2_dinode *di) +{ + unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits; + unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size); + + if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL) + memset(&di->id2, 0, blocksize - + offsetof(struct ocfs2_dinode, id2) - + xattrsize); + else + memset(&di->id2, 0, blocksize - + offsetof(struct ocfs2_dinode, id2)); +} + +void ocfs2_dinode_new_extent_list(struct inode *inode, + struct ocfs2_dinode *di) +{ + ocfs2_zero_dinode_id2_with_xattr(inode, di); + di->id2.i_list.l_tree_depth = 0; + di->id2.i_list.l_next_free_rec = 0; + di->id2.i_list.l_count = cpu_to_le16( + ocfs2_extent_recs_per_inode_with_xattr(inode->i_sb, di)); +} + +void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_inline_data *idata = &di->id2.i_data; + + spin_lock(&oi->ip_lock); + oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL; + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + spin_unlock(&oi->ip_lock); + + /* + * We clear the entire i_data structure here so that all + * fields can be properly initialized. + */ + ocfs2_zero_dinode_id2_with_xattr(inode, di); + + idata->id_count = cpu_to_le16( + ocfs2_max_inline_data_with_xattr(inode->i_sb, di)); +} + +int ocfs2_convert_inline_data_to_extents(struct inode *inode, + struct buffer_head *di_bh) +{ + int ret, i, has_data, num_pages = 0; + handle_t *handle; + u64 uninitialized_var(block); + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_alloc_context *data_ac = NULL; + struct page **pages = NULL; + loff_t end = osb->s_clustersize; + struct ocfs2_extent_tree et; + + has_data = i_size_read(inode) ? 1 : 0; + + if (has_data) { + pages = kcalloc(ocfs2_pages_per_cluster(osb->sb), + sizeof(struct page *), GFP_NOFS); + if (pages == NULL) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_reserve_clusters(osb, 1, &data_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_INLINE_TO_EXTENTS_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out_unlock; + } + + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + if (has_data) { + u32 bit_off, num; + unsigned int page_end; + u64 phys; + + ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, + &num); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* + * Save two copies, one for insert, and one that can + * be changed by ocfs2_map_and_dirty_page() below. + */ + block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); + + /* + * Non sparse file systems zero on extend, so no need + * to do that now. + */ + if (!ocfs2_sparse_alloc(osb) && + PAGE_CACHE_SIZE < osb->s_clustersize) + end = PAGE_CACHE_SIZE; + + ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* + * This should populate the 1st page for us and mark + * it up to date. + */ + ret = ocfs2_read_inline_data(inode, pages[0], di_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + page_end = PAGE_CACHE_SIZE; + if (PAGE_CACHE_SIZE > osb->s_clustersize) + page_end = osb->s_clustersize; + + for (i = 0; i < num_pages; i++) + ocfs2_map_and_dirty_page(inode, handle, 0, page_end, + pages[i], i > 0, &phys); + } + + spin_lock(&oi->ip_lock); + oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL; + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + spin_unlock(&oi->ip_lock); + + ocfs2_dinode_new_extent_list(inode, di); + + ocfs2_journal_dirty(handle, di_bh); + + if (has_data) { + /* + * An error at this point should be extremely rare. If + * this proves to be false, we could always re-build + * the in-inode data from our pages. + */ + ocfs2_init_dinode_extent_tree(&et, inode, di_bh); + ret = ocfs2_insert_extent(osb, handle, inode, &et, + 0, block, 1, 0, NULL); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + inode->i_blocks = ocfs2_inode_sector_count(inode); + } + +out_commit: + ocfs2_commit_trans(osb, handle); + +out_unlock: + if (data_ac) + ocfs2_free_alloc_context(data_ac); + +out: + if (pages) { + ocfs2_unlock_and_free_pages(pages, num_pages); + kfree(pages); + } + + return ret; +} + +/* + * It is expected, that by the time you call this function, + * inode->i_size and fe->i_size have been adjusted. + * + * WARNING: This will kfree the truncate context + */ +int ocfs2_commit_truncate(struct ocfs2_super *osb, + struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_truncate_context *tc) +{ + int status, i, credits, tl_sem = 0; + u32 clusters_to_del, new_highest_cpos, range; + struct ocfs2_extent_list *el; + handle_t *handle = NULL; + struct inode *tl_inode = osb->osb_tl_inode; + struct ocfs2_path *path = NULL; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data; + + mlog_entry_void(); + + new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb, + i_size_read(inode)); + + path = ocfs2_new_path(fe_bh, &di->id2.i_list); + if (!path) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + ocfs2_extent_map_trunc(inode, new_highest_cpos); + +start: + /* + * Check that we still have allocation to delete. + */ + if (OCFS2_I(inode)->ip_clusters == 0) { + status = 0; + goto bail; + } + + /* + * Truncate always works against the rightmost tree branch. + */ + status = ocfs2_find_path(inode, path, UINT_MAX); + if (status) { + mlog_errno(status); + goto bail; + } + + mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n", + OCFS2_I(inode)->ip_clusters, path->p_tree_depth); + + /* + * By now, el will point to the extent list on the bottom most + * portion of this tree. Only the tail record is considered in + * each pass. + * + * We handle the following cases, in order: + * - empty extent: delete the remaining branch + * - remove the entire record + * - remove a partial record + * - no record needs to be removed (truncate has completed) + */ + el = path_leaf_el(path); + if (le16_to_cpu(el->l_next_free_rec) == 0) { + ocfs2_error(inode->i_sb, + "Inode %llu has empty extent block at %llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)path_leaf_bh(path)->b_blocknr); + status = -EROFS; + goto bail; + } + + i = le16_to_cpu(el->l_next_free_rec) - 1; + range = le32_to_cpu(el->l_recs[i].e_cpos) + + ocfs2_rec_clusters(el, &el->l_recs[i]); + if (i == 0 && ocfs2_is_empty_extent(&el->l_recs[i])) { + clusters_to_del = 0; + } else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) { + clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]); + } else if (range > new_highest_cpos) { + clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) + + le32_to_cpu(el->l_recs[i].e_cpos)) - + new_highest_cpos; + } else { + status = 0; + goto bail; + } + + mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n", + clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr); + + mutex_lock(&tl_inode->i_mutex); + tl_sem = 1; + /* ocfs2_truncate_log_needs_flush guarantees us at least one + * record is free for use. If there isn't any, we flush to get + * an empty truncate log. */ + if (ocfs2_truncate_log_needs_flush(osb)) { + status = __ocfs2_flush_truncate_log(osb); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del, + (struct ocfs2_dinode *)fe_bh->b_data, + el); + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto bail; + } + + status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle, + tc, path); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + mutex_unlock(&tl_inode->i_mutex); + tl_sem = 0; + + ocfs2_commit_trans(osb, handle); + handle = NULL; + + ocfs2_reinit_path(path, 1); + + /* + * The check above will catch the case where we've truncated + * away all allocation. + */ + goto start; + +bail: + + ocfs2_schedule_truncate_log_flush(osb, 1); + + if (tl_sem) + mutex_unlock(&tl_inode->i_mutex); + + if (handle) + ocfs2_commit_trans(osb, handle); + + ocfs2_run_deallocs(osb, &tc->tc_dealloc); + + ocfs2_free_path(path); + + /* This will drop the ext_alloc cluster lock for us */ + ocfs2_free_truncate_context(tc); + + mlog_exit(status); + return status; +} + +/* + * Expects the inode to already be locked. + */ +int ocfs2_prepare_truncate(struct ocfs2_super *osb, + struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_truncate_context **tc) +{ + int status; + unsigned int new_i_clusters; + struct ocfs2_dinode *fe; + struct ocfs2_extent_block *eb; + struct buffer_head *last_eb_bh = NULL; + + mlog_entry_void(); + + *tc = NULL; + + new_i_clusters = ocfs2_clusters_for_bytes(osb->sb, + i_size_read(inode)); + fe = (struct ocfs2_dinode *) fe_bh->b_data; + + mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size =" + "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters, + (unsigned long long)le64_to_cpu(fe->i_size)); + + *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL); + if (!(*tc)) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc); + + if (fe->id2.i_list.l_tree_depth) { + status = ocfs2_read_block(inode, le64_to_cpu(fe->i_last_eb_blk), + &last_eb_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); + + brelse(last_eb_bh); + status = -EIO; + goto bail; + } + } + + (*tc)->tc_last_eb_bh = last_eb_bh; + + status = 0; +bail: + if (status < 0) { + if (*tc) + ocfs2_free_truncate_context(*tc); + *tc = NULL; + } + mlog_exit_void(); + return status; +} + +/* + * 'start' is inclusive, 'end' is not. + */ +int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, + unsigned int start, unsigned int end, int trunc) +{ + int ret; + unsigned int numbytes; + handle_t *handle; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_inline_data *idata = &di->id2.i_data; + + if (end > i_size_read(inode)) + end = i_size_read(inode); + + BUG_ON(start >= end); + + if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) || + !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) || + !ocfs2_supports_inline_data(osb)) { + ocfs2_error(inode->i_sb, + "Inline data flags for inode %llu don't agree! " + "Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + le16_to_cpu(di->i_dyn_features), + OCFS2_I(inode)->ip_dyn_features, + osb->s_feature_incompat); + ret = -EROFS; + goto out; + } + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + numbytes = end - start; + memset(idata->id_data + start, 0, numbytes); + + /* + * No need to worry about the data page here - it's been + * truncated already and inline data doesn't need it for + * pushing zero's to disk, so we'll let readpage pick it up + * later. + */ + if (trunc) { + i_size_write(inode, start); + di->i_size = cpu_to_le64(start); + } + + inode->i_blocks = ocfs2_inode_sector_count(inode); + inode->i_ctime = inode->i_mtime = CURRENT_TIME; + + di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); + di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); + + ocfs2_journal_dirty(handle, di_bh); + +out_commit: + ocfs2_commit_trans(osb, handle); + +out: + return ret; +} + +static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc) +{ + /* + * The caller is responsible for completing deallocation + * before freeing the context. + */ + if (tc->tc_dealloc.c_first_suballocator != NULL) + mlog(ML_NOTICE, + "Truncate completion has non-empty dealloc context\n"); + + brelse(tc->tc_last_eb_bh); + + kfree(tc); +} diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h new file mode 100644 index 0000000..70257c8 --- /dev/null +++ b/fs/ocfs2/alloc.h @@ -0,0 +1,230 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * alloc.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_ALLOC_H +#define OCFS2_ALLOC_H + + +/* + * For xattr tree leaf, we limit the leaf byte size to be 64K. + */ +#define OCFS2_MAX_XATTR_TREE_LEAF_SIZE 65536 + +/* + * ocfs2_extent_tree and ocfs2_extent_tree_operations are used to abstract + * the b-tree operations in ocfs2. Now all the b-tree operations are not + * limited to ocfs2_dinode only. Any data which need to allocate clusters + * to store can use b-tree. And it only needs to implement its ocfs2_extent_tree + * and operation. + * + * ocfs2_extent_tree becomes the first-class object for extent tree + * manipulation. Callers of the alloc.c code need to fill it via one of + * the ocfs2_init_*_extent_tree() operations below. + * + * ocfs2_extent_tree contains info for the root of the b-tree, it must have a + * root ocfs2_extent_list and a root_bh so that they can be used in the b-tree + * functions. + * ocfs2_extent_tree_operations abstract the normal operations we do for + * the root of extent b-tree. + */ +struct ocfs2_extent_tree_operations; +struct ocfs2_extent_tree { + struct ocfs2_extent_tree_operations *et_ops; + struct buffer_head *et_root_bh; + struct ocfs2_extent_list *et_root_el; + void *et_object; + unsigned int et_max_leaf_clusters; +}; + +/* + * ocfs2_init_*_extent_tree() will fill an ocfs2_extent_tree from the + * specified object buffer. + */ +void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et, + struct inode *inode, + struct buffer_head *bh); +void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, + struct inode *inode, + struct buffer_head *bh); +void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, + struct inode *inode, + struct buffer_head *bh, + struct ocfs2_xattr_value_root *xv); + +struct ocfs2_alloc_context; +int ocfs2_insert_extent(struct ocfs2_super *osb, + handle_t *handle, + struct inode *inode, + struct ocfs2_extent_tree *et, + u32 cpos, + u64 start_blk, + u32 new_clusters, + u8 flags, + struct ocfs2_alloc_context *meta_ac); + +enum ocfs2_alloc_restarted { + RESTART_NONE = 0, + RESTART_TRANS, + RESTART_META +}; +int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb, + struct inode *inode, + u32 *logical_offset, + u32 clusters_to_add, + int mark_unwritten, + struct ocfs2_extent_tree *et, + handle_t *handle, + struct ocfs2_alloc_context *data_ac, + struct ocfs2_alloc_context *meta_ac, + enum ocfs2_alloc_restarted *reason_ret); +struct ocfs2_cached_dealloc_ctxt; +int ocfs2_mark_extent_written(struct inode *inode, + struct ocfs2_extent_tree *et, + handle_t *handle, u32 cpos, u32 len, u32 phys, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc); +int ocfs2_remove_extent(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 cpos, u32 len, handle_t *handle, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc); +int ocfs2_num_free_extents(struct ocfs2_super *osb, + struct inode *inode, + struct ocfs2_extent_tree *et); + +/* + * how many new metadata chunks would an allocation need at maximum? + * + * Please note that the caller must make sure that root_el is the root + * of extent tree. So for an inode, it should be &fe->id2.i_list. Otherwise + * the result may be wrong. + */ +static inline int ocfs2_extend_meta_needed(struct ocfs2_extent_list *root_el) +{ + /* + * Rather than do all the work of determining how much we need + * (involves a ton of reads and locks), just ask for the + * maximal limit. That's a tree depth shift. So, one block for + * level of the tree (current l_tree_depth), one block for the + * new tree_depth==0 extent_block, and one block at the new + * top-of-the tree. + */ + return le16_to_cpu(root_el->l_tree_depth) + 2; +} + +void ocfs2_dinode_new_extent_list(struct inode *inode, struct ocfs2_dinode *di); +void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di); +int ocfs2_convert_inline_data_to_extents(struct inode *inode, + struct buffer_head *di_bh); + +int ocfs2_truncate_log_init(struct ocfs2_super *osb); +void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb); +void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb, + int cancel); +int ocfs2_flush_truncate_log(struct ocfs2_super *osb); +int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, + int slot_num, + struct ocfs2_dinode **tl_copy); +int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb, + struct ocfs2_dinode *tl_copy); +int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb); +int ocfs2_truncate_log_append(struct ocfs2_super *osb, + handle_t *handle, + u64 start_blk, + unsigned int num_clusters); +int __ocfs2_flush_truncate_log(struct ocfs2_super *osb); + +/* + * Process local structure which describes the block unlinks done + * during an operation. This is populated via + * ocfs2_cache_block_dealloc(). + * + * ocfs2_run_deallocs() should be called after the potentially + * de-allocating routines. No journal handles should be open, and most + * locks should have been dropped. + */ +struct ocfs2_cached_dealloc_ctxt { + struct ocfs2_per_slot_free_list *c_first_suballocator; +}; +static inline void ocfs2_init_dealloc_ctxt(struct ocfs2_cached_dealloc_ctxt *c) +{ + c->c_first_suballocator = NULL; +} +int ocfs2_run_deallocs(struct ocfs2_super *osb, + struct ocfs2_cached_dealloc_ctxt *ctxt); + +struct ocfs2_truncate_context { + struct ocfs2_cached_dealloc_ctxt tc_dealloc; + int tc_ext_alloc_locked; /* is it cluster locked? */ + /* these get destroyed once it's passed to ocfs2_commit_truncate. */ + struct buffer_head *tc_last_eb_bh; +}; + +int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, + u64 range_start, u64 range_end); +int ocfs2_prepare_truncate(struct ocfs2_super *osb, + struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_truncate_context **tc); +int ocfs2_commit_truncate(struct ocfs2_super *osb, + struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_truncate_context *tc); +int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, + unsigned int start, unsigned int end, int trunc); + +int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el, + u32 cpos, struct buffer_head **leaf_bh); +int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster); + +/* + * Helper function to look at the # of clusters in an extent record. + */ +static inline unsigned int ocfs2_rec_clusters(struct ocfs2_extent_list *el, + struct ocfs2_extent_rec *rec) +{ + /* + * Cluster count in extent records is slightly different + * between interior nodes and leaf nodes. This is to support + * unwritten extents which need a flags field in leaf node + * records, thus shrinking the available space for a clusters + * field. + */ + if (el->l_tree_depth) + return le32_to_cpu(rec->e_int_clusters); + else + return le16_to_cpu(rec->e_leaf_clusters); +} + +/* + * This is only valid for leaf nodes, which are the only ones that can + * have empty extents anyway. + */ +static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec) +{ + return !rec->e_leaf_clusters; +} + +#endif /* OCFS2_ALLOC_H */ diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c new file mode 100644 index 0000000..c22543b --- /dev/null +++ b/fs/ocfs2/aops.c @@ -0,0 +1,1984 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/pagemap.h> +#include <asm/byteorder.h> +#include <linux/swap.h> +#include <linux/pipe_fs_i.h> +#include <linux/mpage.h> + +#define MLOG_MASK_PREFIX ML_FILE_IO +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "aops.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "file.h" +#include "inode.h" +#include "journal.h" +#include "suballoc.h" +#include "super.h" +#include "symlink.h" + +#include "buffer_head_io.h" + +static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + int err = -EIO; + int status; + struct ocfs2_dinode *fe = NULL; + struct buffer_head *bh = NULL; + struct buffer_head *buffer_cache_bh = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + void *kaddr; + + mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, + (unsigned long long)iblock, bh_result, create); + + BUG_ON(ocfs2_inode_is_fast_symlink(inode)); + + if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { + mlog(ML_ERROR, "block offset > PATH_MAX: %llu", + (unsigned long long)iblock); + goto bail; + } + + status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + fe = (struct ocfs2_dinode *) bh->b_data; + + if (!OCFS2_IS_VALID_DINODE(fe)) { + mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n", + (unsigned long long)le64_to_cpu(fe->i_blkno), 7, + fe->i_signature); + goto bail; + } + + if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, + le32_to_cpu(fe->i_clusters))) { + mlog(ML_ERROR, "block offset is outside the allocated size: " + "%llu\n", (unsigned long long)iblock); + goto bail; + } + + /* We don't use the page cache to create symlink data, so if + * need be, copy it over from the buffer cache. */ + if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { + u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + + iblock; + buffer_cache_bh = sb_getblk(osb->sb, blkno); + if (!buffer_cache_bh) { + mlog(ML_ERROR, "couldn't getblock for symlink!\n"); + goto bail; + } + + /* we haven't locked out transactions, so a commit + * could've happened. Since we've got a reference on + * the bh, even if it commits while we're doing the + * copy, the data is still good. */ + if (buffer_jbd(buffer_cache_bh) + && ocfs2_inode_is_new(inode)) { + kaddr = kmap_atomic(bh_result->b_page, KM_USER0); + if (!kaddr) { + mlog(ML_ERROR, "couldn't kmap!\n"); + goto bail; + } + memcpy(kaddr + (bh_result->b_size * iblock), + buffer_cache_bh->b_data, + bh_result->b_size); + kunmap_atomic(kaddr, KM_USER0); + set_buffer_uptodate(bh_result); + } + brelse(buffer_cache_bh); + } + + map_bh(bh_result, inode->i_sb, + le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); + + err = 0; + +bail: + brelse(bh); + + mlog_exit(err); + return err; +} + +static int ocfs2_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + int err = 0; + unsigned int ext_flags; + u64 max_blocks = bh_result->b_size >> inode->i_blkbits; + u64 p_blkno, count, past_eof; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, + (unsigned long long)iblock, bh_result, create); + + if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) + mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", + inode, inode->i_ino); + + if (S_ISLNK(inode->i_mode)) { + /* this always does I/O for some reason. */ + err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); + goto bail; + } + + err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count, + &ext_flags); + if (err) { + mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " + "%llu, NULL)\n", err, inode, (unsigned long long)iblock, + (unsigned long long)p_blkno); + goto bail; + } + + if (max_blocks < count) + count = max_blocks; + + /* + * ocfs2 never allocates in this function - the only time we + * need to use BH_New is when we're extending i_size on a file + * system which doesn't support holes, in which case BH_New + * allows block_prepare_write() to zero. + * + * If we see this on a sparse file system, then a truncate has + * raced us and removed the cluster. In this case, we clear + * the buffers dirty and uptodate bits and let the buffer code + * ignore it as a hole. + */ + if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { + clear_buffer_dirty(bh_result); + clear_buffer_uptodate(bh_result); + goto bail; + } + + /* Treat the unwritten extent as a hole for zeroing purposes. */ + if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) + map_bh(bh_result, inode->i_sb, p_blkno); + + bh_result->b_size = count << inode->i_blkbits; + + if (!ocfs2_sparse_alloc(osb)) { + if (p_blkno == 0) { + err = -EIO; + mlog(ML_ERROR, + "iblock = %llu p_blkno = %llu blkno=(%llu)\n", + (unsigned long long)iblock, + (unsigned long long)p_blkno, + (unsigned long long)OCFS2_I(inode)->ip_blkno); + mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); + dump_stack(); + } + + past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); + mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, + (unsigned long long)past_eof); + + if (create && (iblock >= past_eof)) + set_buffer_new(bh_result); + } + +bail: + if (err < 0) + err = -EIO; + + mlog_exit(err); + return err; +} + +int ocfs2_read_inline_data(struct inode *inode, struct page *page, + struct buffer_head *di_bh) +{ + void *kaddr; + loff_t size; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + + if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) { + ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + return -EROFS; + } + + size = i_size_read(inode); + + if (size > PAGE_CACHE_SIZE || + size > ocfs2_max_inline_data(inode->i_sb)) { + ocfs2_error(inode->i_sb, + "Inode %llu has with inline data has bad size: %Lu", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)size); + return -EROFS; + } + + kaddr = kmap_atomic(page, KM_USER0); + if (size) + memcpy(kaddr, di->id2.i_data.id_data, size); + /* Clear the remaining part of the page */ + memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); + flush_dcache_page(page); + kunmap_atomic(kaddr, KM_USER0); + + SetPageUptodate(page); + + return 0; +} + +static int ocfs2_readpage_inline(struct inode *inode, struct page *page) +{ + int ret; + struct buffer_head *di_bh = NULL; + + BUG_ON(!PageLocked(page)); + BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); + + ret = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &di_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_read_inline_data(inode, page, di_bh); +out: + unlock_page(page); + + brelse(di_bh); + return ret; +} + +static int ocfs2_readpage(struct file *file, struct page *page) +{ + struct inode *inode = page->mapping->host; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; + int ret, unlock = 1; + + mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); + + ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); + if (ret != 0) { + if (ret == AOP_TRUNCATED_PAGE) + unlock = 0; + mlog_errno(ret); + goto out; + } + + if (down_read_trylock(&oi->ip_alloc_sem) == 0) { + ret = AOP_TRUNCATED_PAGE; + goto out_inode_unlock; + } + + /* + * i_size might have just been updated as we grabed the meta lock. We + * might now be discovering a truncate that hit on another node. + * block_read_full_page->get_block freaks out if it is asked to read + * beyond the end of a file, so we check here. Callers + * (generic_file_read, vm_ops->fault) are clever enough to check i_size + * and notice that the page they just read isn't needed. + * + * XXX sys_readahead() seems to get that wrong? + */ + if (start >= i_size_read(inode)) { + zero_user(page, 0, PAGE_SIZE); + SetPageUptodate(page); + ret = 0; + goto out_alloc; + } + + if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) + ret = ocfs2_readpage_inline(inode, page); + else + ret = block_read_full_page(page, ocfs2_get_block); + unlock = 0; + +out_alloc: + up_read(&OCFS2_I(inode)->ip_alloc_sem); +out_inode_unlock: + ocfs2_inode_unlock(inode, 0); +out: + if (unlock) + unlock_page(page); + mlog_exit(ret); + return ret; +} + +/* + * This is used only for read-ahead. Failures or difficult to handle + * situations are safe to ignore. + * + * Right now, we don't bother with BH_Boundary - in-inode extent lists + * are quite large (243 extents on 4k blocks), so most inodes don't + * grow out to a tree. If need be, detecting boundary extents could + * trivially be added in a future version of ocfs2_get_block(). + */ +static int ocfs2_readpages(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + int ret, err = -EIO; + struct inode *inode = mapping->host; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + loff_t start; + struct page *last; + + /* + * Use the nonblocking flag for the dlm code to avoid page + * lock inversion, but don't bother with retrying. + */ + ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); + if (ret) + return err; + + if (down_read_trylock(&oi->ip_alloc_sem) == 0) { + ocfs2_inode_unlock(inode, 0); + return err; + } + + /* + * Don't bother with inline-data. There isn't anything + * to read-ahead in that case anyway... + */ + if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) + goto out_unlock; + + /* + * Check whether a remote node truncated this file - we just + * drop out in that case as it's not worth handling here. + */ + last = list_entry(pages->prev, struct page, lru); + start = (loff_t)last->index << PAGE_CACHE_SHIFT; + if (start >= i_size_read(inode)) + goto out_unlock; + + err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); + +out_unlock: + up_read(&oi->ip_alloc_sem); + ocfs2_inode_unlock(inode, 0); + + return err; +} + +/* Note: Because we don't support holes, our allocation has + * already happened (allocation writes zeros to the file data) + * so we don't have to worry about ordered writes in + * ocfs2_writepage. + * + * ->writepage is called during the process of invalidating the page cache + * during blocked lock processing. It can't block on any cluster locks + * to during block mapping. It's relying on the fact that the block + * mapping can't have disappeared under the dirty pages that it is + * being asked to write back. + */ +static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) +{ + int ret; + + mlog_entry("(0x%p)\n", page); + + ret = block_write_full_page(page, ocfs2_get_block, wbc); + + mlog_exit(ret); + + return ret; +} + +/* + * This is called from ocfs2_write_zero_page() which has handled it's + * own cluster locking and has ensured allocation exists for those + * blocks to be written. + */ +int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, + unsigned from, unsigned to) +{ + int ret; + + ret = block_prepare_write(page, from, to, ocfs2_get_block); + + return ret; +} + +/* Taken from ext3. We don't necessarily need the full blown + * functionality yet, but IMHO it's better to cut and paste the whole + * thing so we can avoid introducing our own bugs (and easily pick up + * their fixes when they happen) --Mark */ +int walk_page_buffers( handle_t *handle, + struct buffer_head *head, + unsigned from, + unsigned to, + int *partial, + int (*fn)( handle_t *handle, + struct buffer_head *bh)) +{ + struct buffer_head *bh; + unsigned block_start, block_end; + unsigned blocksize = head->b_size; + int err, ret = 0; + struct buffer_head *next; + + for ( bh = head, block_start = 0; + ret == 0 && (bh != head || !block_start); + block_start = block_end, bh = next) + { + next = bh->b_this_page; + block_end = block_start + blocksize; + if (block_end <= from || block_start >= to) { + if (partial && !buffer_uptodate(bh)) + *partial = 1; + continue; + } + err = (*fn)(handle, bh); + if (!ret) + ret = err; + } + return ret; +} + +handle_t *ocfs2_start_walk_page_trans(struct inode *inode, + struct page *page, + unsigned from, + unsigned to) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + handle_t *handle; + int ret = 0; + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + if (ocfs2_should_order_data(inode)) { + ret = ocfs2_jbd2_file_inode(handle, inode); +#ifdef CONFIG_OCFS2_COMPAT_JBD + ret = walk_page_buffers(handle, + page_buffers(page), + from, to, NULL, + ocfs2_journal_dirty_data); +#endif + if (ret < 0) + mlog_errno(ret); + } +out: + if (ret) { + if (!IS_ERR(handle)) + ocfs2_commit_trans(osb, handle); + handle = ERR_PTR(ret); + } + return handle; +} + +static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) +{ + sector_t status; + u64 p_blkno = 0; + int err = 0; + struct inode *inode = mapping->host; + + mlog_entry("(block = %llu)\n", (unsigned long long)block); + + /* We don't need to lock journal system files, since they aren't + * accessed concurrently from multiple nodes. + */ + if (!INODE_JOURNAL(inode)) { + err = ocfs2_inode_lock(inode, NULL, 0); + if (err) { + if (err != -ENOENT) + mlog_errno(err); + goto bail; + } + down_read(&OCFS2_I(inode)->ip_alloc_sem); + } + + if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) + err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, + NULL); + + if (!INODE_JOURNAL(inode)) { + up_read(&OCFS2_I(inode)->ip_alloc_sem); + ocfs2_inode_unlock(inode, 0); + } + + if (err) { + mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", + (unsigned long long)block); + mlog_errno(err); + goto bail; + } + +bail: + status = err ? 0 : p_blkno; + + mlog_exit((int)status); + + return status; +} + +/* + * TODO: Make this into a generic get_blocks function. + * + * From do_direct_io in direct-io.c: + * "So what we do is to permit the ->get_blocks function to populate + * bh.b_size with the size of IO which is permitted at this offset and + * this i_blkbits." + * + * This function is called directly from get_more_blocks in direct-io.c. + * + * called like this: dio->get_blocks(dio->inode, fs_startblk, + * fs_count, map_bh, dio->rw == WRITE); + */ +static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + int ret; + u64 p_blkno, inode_blocks, contig_blocks; + unsigned int ext_flags; + unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; + unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; + + /* This function won't even be called if the request isn't all + * nicely aligned and of the right size, so there's no need + * for us to check any of that. */ + + inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); + + /* + * Any write past EOF is not allowed because we'd be extending. + */ + if (create && (iblock + max_blocks) > inode_blocks) { + ret = -EIO; + goto bail; + } + + /* This figures out the size of the next contiguous block, and + * our logical offset */ + ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, + &contig_blocks, &ext_flags); + if (ret) { + mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", + (unsigned long long)iblock); + ret = -EIO; + goto bail; + } + + if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno && create) { + ocfs2_error(inode->i_sb, + "Inode %llu has a hole at block %llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)iblock); + ret = -EROFS; + goto bail; + } + + /* + * get_more_blocks() expects us to describe a hole by clearing + * the mapped bit on bh_result(). + * + * Consider an unwritten extent as a hole. + */ + if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) + map_bh(bh_result, inode->i_sb, p_blkno); + else { + /* + * ocfs2_prepare_inode_for_write() should have caught + * the case where we'd be filling a hole and triggered + * a buffered write instead. + */ + if (create) { + ret = -EIO; + mlog_errno(ret); + goto bail; + } + + clear_buffer_mapped(bh_result); + } + + /* make sure we don't map more than max_blocks blocks here as + that's all the kernel will handle at this point. */ + if (max_blocks < contig_blocks) + contig_blocks = max_blocks; + bh_result->b_size = contig_blocks << blocksize_bits; +bail: + return ret; +} + +/* + * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're + * particularly interested in the aio/dio case. Like the core uses + * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from + * truncation on another. + */ +static void ocfs2_dio_end_io(struct kiocb *iocb, + loff_t offset, + ssize_t bytes, + void *private) +{ + struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; + int level; + + /* this io's submitter should not have unlocked this before we could */ + BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); + + ocfs2_iocb_clear_rw_locked(iocb); + + level = ocfs2_iocb_rw_locked_level(iocb); + if (!level) + up_read(&inode->i_alloc_sem); + ocfs2_rw_unlock(inode, level); +} + +/* + * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen + * from ext3. PageChecked() bits have been removed as OCFS2 does not + * do journalled data. + */ +static void ocfs2_invalidatepage(struct page *page, unsigned long offset) +{ + journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; + + jbd2_journal_invalidatepage(journal, page, offset); +} + +static int ocfs2_releasepage(struct page *page, gfp_t wait) +{ + journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal; + + if (!page_has_buffers(page)) + return 0; + return jbd2_journal_try_to_free_buffers(journal, page, wait); +} + +static ssize_t ocfs2_direct_IO(int rw, + struct kiocb *iocb, + const struct iovec *iov, + loff_t offset, + unsigned long nr_segs) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; + int ret; + + mlog_entry_void(); + + /* + * Fallback to buffered I/O if we see an inode without + * extents. + */ + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + return 0; + + ret = blockdev_direct_IO_no_locking(rw, iocb, inode, + inode->i_sb->s_bdev, iov, offset, + nr_segs, + ocfs2_direct_IO_get_blocks, + ocfs2_dio_end_io); + + mlog_exit(ret); + return ret; +} + +static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, + u32 cpos, + unsigned int *start, + unsigned int *end) +{ + unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; + + if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) { + unsigned int cpp; + + cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits); + + cluster_start = cpos % cpp; + cluster_start = cluster_start << osb->s_clustersize_bits; + + cluster_end = cluster_start + osb->s_clustersize; + } + + BUG_ON(cluster_start > PAGE_SIZE); + BUG_ON(cluster_end > PAGE_SIZE); + + if (start) + *start = cluster_start; + if (end) + *end = cluster_end; +} + +/* + * 'from' and 'to' are the region in the page to avoid zeroing. + * + * If pagesize > clustersize, this function will avoid zeroing outside + * of the cluster boundary. + * + * from == to == 0 is code for "zero the entire cluster region" + */ +static void ocfs2_clear_page_regions(struct page *page, + struct ocfs2_super *osb, u32 cpos, + unsigned from, unsigned to) +{ + void *kaddr; + unsigned int cluster_start, cluster_end; + + ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); + + kaddr = kmap_atomic(page, KM_USER0); + + if (from || to) { + if (from > cluster_start) + memset(kaddr + cluster_start, 0, from - cluster_start); + if (to < cluster_end) + memset(kaddr + to, 0, cluster_end - to); + } else { + memset(kaddr + cluster_start, 0, cluster_end - cluster_start); + } + + kunmap_atomic(kaddr, KM_USER0); +} + +/* + * Nonsparse file systems fully allocate before we get to the write + * code. This prevents ocfs2_write() from tagging the write as an + * allocating one, which means ocfs2_map_page_blocks() might try to + * read-in the blocks at the tail of our file. Avoid reading them by + * testing i_size against each block offset. + */ +static int ocfs2_should_read_blk(struct inode *inode, struct page *page, + unsigned int block_start) +{ + u64 offset = page_offset(page) + block_start; + + if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) + return 1; + + if (i_size_read(inode) > offset) + return 1; + + return 0; +} + +/* + * Some of this taken from block_prepare_write(). We already have our + * mapping by now though, and the entire write will be allocating or + * it won't, so not much need to use BH_New. + * + * This will also skip zeroing, which is handled externally. + */ +int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, + struct inode *inode, unsigned int from, + unsigned int to, int new) +{ + int ret = 0; + struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; + unsigned int block_end, block_start; + unsigned int bsize = 1 << inode->i_blkbits; + + if (!page_has_buffers(page)) + create_empty_buffers(page, bsize, 0); + + head = page_buffers(page); + for (bh = head, block_start = 0; bh != head || !block_start; + bh = bh->b_this_page, block_start += bsize) { + block_end = block_start + bsize; + + clear_buffer_new(bh); + + /* + * Ignore blocks outside of our i/o range - + * they may belong to unallocated clusters. + */ + if (block_start >= to || block_end <= from) { + if (PageUptodate(page)) + set_buffer_uptodate(bh); + continue; + } + + /* + * For an allocating write with cluster size >= page + * size, we always write the entire page. + */ + if (new) + set_buffer_new(bh); + + if (!buffer_mapped(bh)) { + map_bh(bh, inode->i_sb, *p_blkno); + unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); + } + + if (PageUptodate(page)) { + if (!buffer_uptodate(bh)) + set_buffer_uptodate(bh); + } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && + !buffer_new(bh) && + ocfs2_should_read_blk(inode, page, block_start) && + (block_start < from || block_end > to)) { + ll_rw_block(READ, 1, &bh); + *wait_bh++=bh; + } + + *p_blkno = *p_blkno + 1; + } + + /* + * If we issued read requests - let them complete. + */ + while(wait_bh > wait) { + wait_on_buffer(*--wait_bh); + if (!buffer_uptodate(*wait_bh)) + ret = -EIO; + } + + if (ret == 0 || !new) + return ret; + + /* + * If we get -EIO above, zero out any newly allocated blocks + * to avoid exposing stale data. + */ + bh = head; + block_start = 0; + do { + block_end = block_start + bsize; + if (block_end <= from) + goto next_bh; + if (block_start >= to) + break; + + zero_user(page, block_start, bh->b_size); + set_buffer_uptodate(bh); + mark_buffer_dirty(bh); + +next_bh: + block_start = block_end; + bh = bh->b_this_page; + } while (bh != head); + + return ret; +} + +#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) +#define OCFS2_MAX_CTXT_PAGES 1 +#else +#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE) +#endif + +#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) + +/* + * Describe the state of a single cluster to be written to. + */ +struct ocfs2_write_cluster_desc { + u32 c_cpos; + u32 c_phys; + /* + * Give this a unique field because c_phys eventually gets + * filled. + */ + unsigned c_new; + unsigned c_unwritten; +}; + +static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d) +{ + return d->c_new || d->c_unwritten; +} + +struct ocfs2_write_ctxt { + /* Logical cluster position / len of write */ + u32 w_cpos; + u32 w_clen; + + struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; + + /* + * This is true if page_size > cluster_size. + * + * It triggers a set of special cases during write which might + * have to deal with allocating writes to partial pages. + */ + unsigned int w_large_pages; + + /* + * Pages involved in this write. + * + * w_target_page is the page being written to by the user. + * + * w_pages is an array of pages which always contains + * w_target_page, and in the case of an allocating write with + * page_size < cluster size, it will contain zero'd and mapped + * pages adjacent to w_target_page which need to be written + * out in so that future reads from that region will get + * zero's. + */ + struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; + unsigned int w_num_pages; + struct page *w_target_page; + + /* + * ocfs2_write_end() uses this to know what the real range to + * write in the target should be. + */ + unsigned int w_target_from; + unsigned int w_target_to; + + /* + * We could use journal_current_handle() but this is cleaner, + * IMHO -Mark + */ + handle_t *w_handle; + + struct buffer_head *w_di_bh; + + struct ocfs2_cached_dealloc_ctxt w_dealloc; +}; + +void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) +{ + int i; + + for(i = 0; i < num_pages; i++) { + if (pages[i]) { + unlock_page(pages[i]); + mark_page_accessed(pages[i]); + page_cache_release(pages[i]); + } + } +} + +static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) +{ + ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); + + brelse(wc->w_di_bh); + kfree(wc); +} + +static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, + struct ocfs2_super *osb, loff_t pos, + unsigned len, struct buffer_head *di_bh) +{ + u32 cend; + struct ocfs2_write_ctxt *wc; + + wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); + if (!wc) + return -ENOMEM; + + wc->w_cpos = pos >> osb->s_clustersize_bits; + cend = (pos + len - 1) >> osb->s_clustersize_bits; + wc->w_clen = cend - wc->w_cpos + 1; + get_bh(di_bh); + wc->w_di_bh = di_bh; + + if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) + wc->w_large_pages = 1; + else + wc->w_large_pages = 0; + + ocfs2_init_dealloc_ctxt(&wc->w_dealloc); + + *wcp = wc; + + return 0; +} + +/* + * If a page has any new buffers, zero them out here, and mark them uptodate + * and dirty so they'll be written out (in order to prevent uninitialised + * block data from leaking). And clear the new bit. + */ +static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) +{ + unsigned int block_start, block_end; + struct buffer_head *head, *bh; + + BUG_ON(!PageLocked(page)); + if (!page_has_buffers(page)) + return; + + bh = head = page_buffers(page); + block_start = 0; + do { + block_end = block_start + bh->b_size; + + if (buffer_new(bh)) { + if (block_end > from && block_start < to) { + if (!PageUptodate(page)) { + unsigned start, end; + + start = max(from, block_start); + end = min(to, block_end); + + zero_user_segment(page, start, end); + set_buffer_uptodate(bh); + } + + clear_buffer_new(bh); + mark_buffer_dirty(bh); + } + } + + block_start = block_end; + bh = bh->b_this_page; + } while (bh != head); +} + +/* + * Only called when we have a failure during allocating write to write + * zero's to the newly allocated region. + */ +static void ocfs2_write_failure(struct inode *inode, + struct ocfs2_write_ctxt *wc, + loff_t user_pos, unsigned user_len) +{ + int i; + unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), + to = user_pos + user_len; + struct page *tmppage; + + ocfs2_zero_new_buffers(wc->w_target_page, from, to); + + for(i = 0; i < wc->w_num_pages; i++) { + tmppage = wc->w_pages[i]; + + if (page_has_buffers(tmppage)) { + if (ocfs2_should_order_data(inode)) { + ocfs2_jbd2_file_inode(wc->w_handle, inode); +#ifdef CONFIG_OCFS2_COMPAT_JBD + walk_page_buffers(wc->w_handle, + page_buffers(tmppage), + from, to, NULL, + ocfs2_journal_dirty_data); +#endif + } + + block_commit_write(tmppage, from, to); + } + } +} + +static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, + struct ocfs2_write_ctxt *wc, + struct page *page, u32 cpos, + loff_t user_pos, unsigned user_len, + int new) +{ + int ret; + unsigned int map_from = 0, map_to = 0; + unsigned int cluster_start, cluster_end; + unsigned int user_data_from = 0, user_data_to = 0; + + ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, + &cluster_start, &cluster_end); + + if (page == wc->w_target_page) { + map_from = user_pos & (PAGE_CACHE_SIZE - 1); + map_to = map_from + user_len; + + if (new) + ret = ocfs2_map_page_blocks(page, p_blkno, inode, + cluster_start, cluster_end, + new); + else + ret = ocfs2_map_page_blocks(page, p_blkno, inode, + map_from, map_to, new); + if (ret) { + mlog_errno(ret); + goto out; + } + + user_data_from = map_from; + user_data_to = map_to; + if (new) { + map_from = cluster_start; + map_to = cluster_end; + } + } else { + /* + * If we haven't allocated the new page yet, we + * shouldn't be writing it out without copying user + * data. This is likely a math error from the caller. + */ + BUG_ON(!new); + + map_from = cluster_start; + map_to = cluster_end; + + ret = ocfs2_map_page_blocks(page, p_blkno, inode, + cluster_start, cluster_end, new); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + /* + * Parts of newly allocated pages need to be zero'd. + * + * Above, we have also rewritten 'to' and 'from' - as far as + * the rest of the function is concerned, the entire cluster + * range inside of a page needs to be written. + * + * We can skip this if the page is up to date - it's already + * been zero'd from being read in as a hole. + */ + if (new && !PageUptodate(page)) + ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), + cpos, user_data_from, user_data_to); + + flush_dcache_page(page); + +out: + return ret; +} + +/* + * This function will only grab one clusters worth of pages. + */ +static int ocfs2_grab_pages_for_write(struct address_space *mapping, + struct ocfs2_write_ctxt *wc, + u32 cpos, loff_t user_pos, int new, + struct page *mmap_page) +{ + int ret = 0, i; + unsigned long start, target_index, index; + struct inode *inode = mapping->host; + + target_index = user_pos >> PAGE_CACHE_SHIFT; + + /* + * Figure out how many pages we'll be manipulating here. For + * non allocating write, we just change the one + * page. Otherwise, we'll need a whole clusters worth. + */ + if (new) { + wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); + start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); + } else { + wc->w_num_pages = 1; + start = target_index; + } + + for(i = 0; i < wc->w_num_pages; i++) { + index = start + i; + + if (index == target_index && mmap_page) { + /* + * ocfs2_pagemkwrite() is a little different + * and wants us to directly use the page + * passed in. + */ + lock_page(mmap_page); + + if (mmap_page->mapping != mapping) { + unlock_page(mmap_page); + /* + * Sanity check - the locking in + * ocfs2_pagemkwrite() should ensure + * that this code doesn't trigger. + */ + ret = -EINVAL; + mlog_errno(ret); + goto out; + } + + page_cache_get(mmap_page); + wc->w_pages[i] = mmap_page; + } else { + wc->w_pages[i] = find_or_create_page(mapping, index, + GFP_NOFS); + if (!wc->w_pages[i]) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + } + + if (index == target_index) + wc->w_target_page = wc->w_pages[i]; + } +out: + return ret; +} + +/* + * Prepare a single cluster for write one cluster into the file. + */ +static int ocfs2_write_cluster(struct address_space *mapping, + u32 phys, unsigned int unwritten, + struct ocfs2_alloc_context *data_ac, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_write_ctxt *wc, u32 cpos, + loff_t user_pos, unsigned user_len) +{ + int ret, i, new, should_zero = 0; + u64 v_blkno, p_blkno; + struct inode *inode = mapping->host; + struct ocfs2_extent_tree et; + + new = phys == 0 ? 1 : 0; + if (new || unwritten) + should_zero = 1; + + if (new) { + u32 tmp_pos; + + /* + * This is safe to call with the page locks - it won't take + * any additional semaphores or cluster locks. + */ + tmp_pos = cpos; + ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, + &tmp_pos, 1, 0, wc->w_di_bh, + wc->w_handle, data_ac, + meta_ac, NULL); + /* + * This shouldn't happen because we must have already + * calculated the correct meta data allocation required. The + * internal tree allocation code should know how to increase + * transaction credits itself. + * + * If need be, we could handle -EAGAIN for a + * RESTART_TRANS here. + */ + mlog_bug_on_msg(ret == -EAGAIN, + "Inode %llu: EAGAIN return during allocation.\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } else if (unwritten) { + ocfs2_init_dinode_extent_tree(&et, inode, wc->w_di_bh); + ret = ocfs2_mark_extent_written(inode, &et, + wc->w_handle, cpos, 1, phys, + meta_ac, &wc->w_dealloc); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + if (should_zero) + v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos); + else + v_blkno = user_pos >> inode->i_sb->s_blocksize_bits; + + /* + * The only reason this should fail is due to an inability to + * find the extent added. + */ + ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL, + NULL); + if (ret < 0) { + ocfs2_error(inode->i_sb, "Corrupting extend for inode %llu, " + "at logical block %llu", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)v_blkno); + goto out; + } + + BUG_ON(p_blkno == 0); + + for(i = 0; i < wc->w_num_pages; i++) { + int tmpret; + + tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, + wc->w_pages[i], cpos, + user_pos, user_len, + should_zero); + if (tmpret) { + mlog_errno(tmpret); + if (ret == 0) + tmpret = ret; + } + } + + /* + * We only have cleanup to do in case of allocating write. + */ + if (ret && new) + ocfs2_write_failure(inode, wc, user_pos, user_len); + +out: + + return ret; +} + +static int ocfs2_write_cluster_by_desc(struct address_space *mapping, + struct ocfs2_alloc_context *data_ac, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_write_ctxt *wc, + loff_t pos, unsigned len) +{ + int ret, i; + loff_t cluster_off; + unsigned int local_len = len; + struct ocfs2_write_cluster_desc *desc; + struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); + + for (i = 0; i < wc->w_clen; i++) { + desc = &wc->w_desc[i]; + + /* + * We have to make sure that the total write passed in + * doesn't extend past a single cluster. + */ + local_len = len; + cluster_off = pos & (osb->s_clustersize - 1); + if ((cluster_off + local_len) > osb->s_clustersize) + local_len = osb->s_clustersize - cluster_off; + + ret = ocfs2_write_cluster(mapping, desc->c_phys, + desc->c_unwritten, data_ac, meta_ac, + wc, desc->c_cpos, pos, local_len); + if (ret) { + mlog_errno(ret); + goto out; + } + + len -= local_len; + pos += local_len; + } + + ret = 0; +out: + return ret; +} + +/* + * ocfs2_write_end() wants to know which parts of the target page it + * should complete the write on. It's easiest to compute them ahead of + * time when a more complete view of the write is available. + */ +static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, + struct ocfs2_write_ctxt *wc, + loff_t pos, unsigned len, int alloc) +{ + struct ocfs2_write_cluster_desc *desc; + + wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); + wc->w_target_to = wc->w_target_from + len; + + if (alloc == 0) + return; + + /* + * Allocating write - we may have different boundaries based + * on page size and cluster size. + * + * NOTE: We can no longer compute one value from the other as + * the actual write length and user provided length may be + * different. + */ + + if (wc->w_large_pages) { + /* + * We only care about the 1st and last cluster within + * our range and whether they should be zero'd or not. Either + * value may be extended out to the start/end of a + * newly allocated cluster. + */ + desc = &wc->w_desc[0]; + if (ocfs2_should_zero_cluster(desc)) + ocfs2_figure_cluster_boundaries(osb, + desc->c_cpos, + &wc->w_target_from, + NULL); + + desc = &wc->w_desc[wc->w_clen - 1]; + if (ocfs2_should_zero_cluster(desc)) + ocfs2_figure_cluster_boundaries(osb, + desc->c_cpos, + NULL, + &wc->w_target_to); + } else { + wc->w_target_from = 0; + wc->w_target_to = PAGE_CACHE_SIZE; + } +} + +/* + * Populate each single-cluster write descriptor in the write context + * with information about the i/o to be done. + * + * Returns the number of clusters that will have to be allocated, as + * well as a worst case estimate of the number of extent records that + * would have to be created during a write to an unwritten region. + */ +static int ocfs2_populate_write_desc(struct inode *inode, + struct ocfs2_write_ctxt *wc, + unsigned int *clusters_to_alloc, + unsigned int *extents_to_split) +{ + int ret; + struct ocfs2_write_cluster_desc *desc; + unsigned int num_clusters = 0; + unsigned int ext_flags = 0; + u32 phys = 0; + int i; + + *clusters_to_alloc = 0; + *extents_to_split = 0; + + for (i = 0; i < wc->w_clen; i++) { + desc = &wc->w_desc[i]; + desc->c_cpos = wc->w_cpos + i; + + if (num_clusters == 0) { + /* + * Need to look up the next extent record. + */ + ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, + &num_clusters, &ext_flags); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * Assume worst case - that we're writing in + * the middle of the extent. + * + * We can assume that the write proceeds from + * left to right, in which case the extent + * insert code is smart enough to coalesce the + * next splits into the previous records created. + */ + if (ext_flags & OCFS2_EXT_UNWRITTEN) + *extents_to_split = *extents_to_split + 2; + } else if (phys) { + /* + * Only increment phys if it doesn't describe + * a hole. + */ + phys++; + } + + desc->c_phys = phys; + if (phys == 0) { + desc->c_new = 1; + *clusters_to_alloc = *clusters_to_alloc + 1; + } + if (ext_flags & OCFS2_EXT_UNWRITTEN) + desc->c_unwritten = 1; + + num_clusters--; + } + + ret = 0; +out: + return ret; +} + +static int ocfs2_write_begin_inline(struct address_space *mapping, + struct inode *inode, + struct ocfs2_write_ctxt *wc) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct page *page; + handle_t *handle; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; + + page = find_or_create_page(mapping, 0, GFP_NOFS); + if (!page) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + /* + * If we don't set w_num_pages then this page won't get unlocked + * and freed on cleanup of the write context. + */ + wc->w_pages[0] = wc->w_target_page = page; + wc->w_num_pages = 1; + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, wc->w_di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + ocfs2_commit_trans(osb, handle); + + mlog_errno(ret); + goto out; + } + + if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) + ocfs2_set_inode_data_inline(inode, di); + + if (!PageUptodate(page)) { + ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); + if (ret) { + ocfs2_commit_trans(osb, handle); + + goto out; + } + } + + wc->w_handle = handle; +out: + return ret; +} + +int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) +{ + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + + if (new_size <= le16_to_cpu(di->id2.i_data.id_count)) + return 1; + return 0; +} + +static int ocfs2_try_to_write_inline_data(struct address_space *mapping, + struct inode *inode, loff_t pos, + unsigned len, struct page *mmap_page, + struct ocfs2_write_ctxt *wc) +{ + int ret, written = 0; + loff_t end = pos + len; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + mlog(0, "Inode %llu, write of %u bytes at off %llu. features: 0x%x\n", + (unsigned long long)oi->ip_blkno, len, (unsigned long long)pos, + oi->ip_dyn_features); + + /* + * Handle inodes which already have inline data 1st. + */ + if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + if (mmap_page == NULL && + ocfs2_size_fits_inline_data(wc->w_di_bh, end)) + goto do_inline_write; + + /* + * The write won't fit - we have to give this inode an + * inline extent list now. + */ + ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh); + if (ret) + mlog_errno(ret); + goto out; + } + + /* + * Check whether the inode can accept inline data. + */ + if (oi->ip_clusters != 0 || i_size_read(inode) != 0) + return 0; + + /* + * Check whether the write can fit. + */ + if (mmap_page || end > ocfs2_max_inline_data(inode->i_sb)) + return 0; + +do_inline_write: + ret = ocfs2_write_begin_inline(mapping, inode, wc); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * This signals to the caller that the data can be written + * inline. + */ + written = 1; +out: + return written ? written : ret; +} + +/* + * This function only does anything for file systems which can't + * handle sparse files. + * + * What we want to do here is fill in any hole between the current end + * of allocation and the end of our write. That way the rest of the + * write path can treat it as an non-allocating write, which has no + * special case code for sparse/nonsparse files. + */ +static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos, + unsigned len, + struct ocfs2_write_ctxt *wc) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + loff_t newsize = pos + len; + + if (ocfs2_sparse_alloc(osb)) + return 0; + + if (newsize <= i_size_read(inode)) + return 0; + + ret = ocfs2_extend_no_holes(inode, newsize, newsize - len); + if (ret) + mlog_errno(ret); + + return ret; +} + +int ocfs2_write_begin_nolock(struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata, + struct buffer_head *di_bh, struct page *mmap_page) +{ + int ret, credits = OCFS2_INODE_UPDATE_CREDITS; + unsigned int clusters_to_alloc, extents_to_split; + struct ocfs2_write_ctxt *wc; + struct inode *inode = mapping->host; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_dinode *di; + struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *meta_ac = NULL; + handle_t *handle; + struct ocfs2_extent_tree et; + + ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); + if (ret) { + mlog_errno(ret); + return ret; + } + + if (ocfs2_supports_inline_data(osb)) { + ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, + mmap_page, wc); + if (ret == 1) { + ret = 0; + goto success; + } + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, + &extents_to_split); + if (ret) { + mlog_errno(ret); + goto out; + } + + di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; + + /* + * We set w_target_from, w_target_to here so that + * ocfs2_write_end() knows which range in the target page to + * write out. An allocation requires that we write the entire + * cluster range. + */ + if (clusters_to_alloc || extents_to_split) { + /* + * XXX: We are stretching the limits of + * ocfs2_lock_allocators(). It greatly over-estimates + * the work to be done. + */ + mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u," + " clusters_to_add = %u, extents_to_split = %u\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), + clusters_to_alloc, extents_to_split); + + ocfs2_init_dinode_extent_tree(&et, inode, wc->w_di_bh); + ret = ocfs2_lock_allocators(inode, &et, + clusters_to_alloc, extents_to_split, + &data_ac, &meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + credits = ocfs2_calc_extend_credits(inode->i_sb, + &di->id2.i_list, + clusters_to_alloc); + + } + + ocfs2_set_target_boundaries(osb, wc, pos, len, + clusters_to_alloc + extents_to_split); + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + wc->w_handle = handle; + + /* + * We don't want this to fail in ocfs2_write_end(), so do it + * here. + */ + ret = ocfs2_journal_access(handle, inode, wc->w_di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* + * Fill our page array first. That way we've grabbed enough so + * that we can zero and flush if we error after adding the + * extent. + */ + ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, + clusters_to_alloc + extents_to_split, + mmap_page); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, + len); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + if (data_ac) + ocfs2_free_alloc_context(data_ac); + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + +success: + *pagep = wc->w_target_page; + *fsdata = wc; + return 0; +out_commit: + ocfs2_commit_trans(osb, handle); + +out: + ocfs2_free_write_ctxt(wc); + + if (data_ac) + ocfs2_free_alloc_context(data_ac); + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + return ret; +} + +static int ocfs2_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + int ret; + struct buffer_head *di_bh = NULL; + struct inode *inode = mapping->host; + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret) { + mlog_errno(ret); + return ret; + } + + /* + * Take alloc sem here to prevent concurrent lookups. That way + * the mapping, zeroing and tree manipulation within + * ocfs2_write() will be safe against ->readpage(). This + * should also serve to lock out allocation from a shared + * writeable region. + */ + down_write(&OCFS2_I(inode)->ip_alloc_sem); + + ret = ocfs2_write_begin_nolock(mapping, pos, len, flags, pagep, + fsdata, di_bh, NULL); + if (ret) { + mlog_errno(ret); + goto out_fail; + } + + brelse(di_bh); + + return 0; + +out_fail: + up_write(&OCFS2_I(inode)->ip_alloc_sem); + + brelse(di_bh); + ocfs2_inode_unlock(inode, 1); + + return ret; +} + +static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, + unsigned len, unsigned *copied, + struct ocfs2_dinode *di, + struct ocfs2_write_ctxt *wc) +{ + void *kaddr; + + if (unlikely(*copied < len)) { + if (!PageUptodate(wc->w_target_page)) { + *copied = 0; + return; + } + } + + kaddr = kmap_atomic(wc->w_target_page, KM_USER0); + memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); + kunmap_atomic(kaddr, KM_USER0); + + mlog(0, "Data written to inode at offset %llu. " + "id_count = %u, copied = %u, i_dyn_features = 0x%x\n", + (unsigned long long)pos, *copied, + le16_to_cpu(di->id2.i_data.id_count), + le16_to_cpu(di->i_dyn_features)); +} + +int ocfs2_write_end_nolock(struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + int i; + unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1); + struct inode *inode = mapping->host; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_write_ctxt *wc = fsdata; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; + handle_t *handle = wc->w_handle; + struct page *tmppage; + + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + ocfs2_write_end_inline(inode, pos, len, &copied, di, wc); + goto out_write_size; + } + + if (unlikely(copied < len)) { + if (!PageUptodate(wc->w_target_page)) + copied = 0; + + ocfs2_zero_new_buffers(wc->w_target_page, start+copied, + start+len); + } + flush_dcache_page(wc->w_target_page); + + for(i = 0; i < wc->w_num_pages; i++) { + tmppage = wc->w_pages[i]; + + if (tmppage == wc->w_target_page) { + from = wc->w_target_from; + to = wc->w_target_to; + + BUG_ON(from > PAGE_CACHE_SIZE || + to > PAGE_CACHE_SIZE || + to < from); + } else { + /* + * Pages adjacent to the target (if any) imply + * a hole-filling write in which case we want + * to flush their entire range. + */ + from = 0; + to = PAGE_CACHE_SIZE; + } + + if (page_has_buffers(tmppage)) { + if (ocfs2_should_order_data(inode)) { + ocfs2_jbd2_file_inode(wc->w_handle, inode); +#ifdef CONFIG_OCFS2_COMPAT_JBD + walk_page_buffers(wc->w_handle, + page_buffers(tmppage), + from, to, NULL, + ocfs2_journal_dirty_data); +#endif + } + block_commit_write(tmppage, from, to); + } + } + +out_write_size: + pos += copied; + if (pos > inode->i_size) { + i_size_write(inode, pos); + mark_inode_dirty(inode); + } + inode->i_blocks = ocfs2_inode_sector_count(inode); + di->i_size = cpu_to_le64((u64)i_size_read(inode)); + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); + di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); + ocfs2_journal_dirty(handle, wc->w_di_bh); + + ocfs2_commit_trans(osb, handle); + + ocfs2_run_deallocs(osb, &wc->w_dealloc); + + ocfs2_free_write_ctxt(wc); + + return copied; +} + +static int ocfs2_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + int ret; + struct inode *inode = mapping->host; + + ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata); + + up_write(&OCFS2_I(inode)->ip_alloc_sem); + ocfs2_inode_unlock(inode, 1); + + return ret; +} + +const struct address_space_operations ocfs2_aops = { + .readpage = ocfs2_readpage, + .readpages = ocfs2_readpages, + .writepage = ocfs2_writepage, + .write_begin = ocfs2_write_begin, + .write_end = ocfs2_write_end, + .bmap = ocfs2_bmap, + .sync_page = block_sync_page, + .direct_IO = ocfs2_direct_IO, + .invalidatepage = ocfs2_invalidatepage, + .releasepage = ocfs2_releasepage, + .migratepage = buffer_migrate_page, +}; diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h new file mode 100644 index 0000000..503e492 --- /dev/null +++ b/fs/ocfs2/aops.h @@ -0,0 +1,75 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_AOPS_H +#define OCFS2_AOPS_H + +int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, + unsigned from, unsigned to); + +handle_t *ocfs2_start_walk_page_trans(struct inode *inode, + struct page *page, + unsigned from, + unsigned to); + +int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, + struct inode *inode, unsigned int from, + unsigned int to, int new); + +void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages); + +int walk_page_buffers( handle_t *handle, + struct buffer_head *head, + unsigned from, + unsigned to, + int *partial, + int (*fn)( handle_t *handle, + struct buffer_head *bh)); + +int ocfs2_write_end_nolock(struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + +int ocfs2_write_begin_nolock(struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata, + struct buffer_head *di_bh, struct page *mmap_page); + +int ocfs2_read_inline_data(struct inode *inode, struct page *page, + struct buffer_head *di_bh); +int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size); + +/* all ocfs2_dio_end_io()'s fault */ +#define ocfs2_iocb_is_rw_locked(iocb) \ + test_bit(0, (unsigned long *)&iocb->private) +static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level) +{ + set_bit(0, (unsigned long *)&iocb->private); + if (level) + set_bit(1, (unsigned long *)&iocb->private); + else + clear_bit(1, (unsigned long *)&iocb->private); +} +#define ocfs2_iocb_clear_rw_locked(iocb) \ + clear_bit(0, (unsigned long *)&iocb->private) +#define ocfs2_iocb_rw_locked_level(iocb) \ + test_bit(1, (unsigned long *)&iocb->private) +#endif /* OCFS2_FILE_H */ diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c new file mode 100644 index 0000000..3a178ec --- /dev/null +++ b/fs/ocfs2/buffer_head_io.c @@ -0,0 +1,410 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * io.c + * + * Buffer cache handling + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> + +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "inode.h" +#include "journal.h" +#include "uptodate.h" + +#include "buffer_head_io.h" + +int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, + struct inode *inode) +{ + int ret = 0; + + mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n", + (unsigned long long)bh->b_blocknr, inode); + + BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); + BUG_ON(buffer_jbd(bh)); + + /* No need to check for a soft readonly file system here. non + * journalled writes are only ever done on system files which + * can get modified during recovery even if read-only. */ + if (ocfs2_is_hard_readonly(osb)) { + ret = -EROFS; + goto out; + } + + mutex_lock(&OCFS2_I(inode)->ip_io_mutex); + + lock_buffer(bh); + set_buffer_uptodate(bh); + + /* remove from dirty list before I/O. */ + clear_buffer_dirty(bh); + + get_bh(bh); /* for end_buffer_write_sync() */ + bh->b_end_io = end_buffer_write_sync; + submit_bh(WRITE, bh); + + wait_on_buffer(bh); + + if (buffer_uptodate(bh)) { + ocfs2_set_buffer_uptodate(inode, bh); + } else { + /* We don't need to remove the clustered uptodate + * information for this bh as it's not marked locally + * uptodate. */ + ret = -EIO; + put_bh(bh); + } + + mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); +out: + mlog_exit(ret); + return ret; +} + +int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, + unsigned int nr, struct buffer_head *bhs[]) +{ + int status = 0; + unsigned int i; + struct buffer_head *bh; + + if (!nr) { + mlog(ML_BH_IO, "No buffers will be read!\n"); + goto bail; + } + + for (i = 0 ; i < nr ; i++) { + if (bhs[i] == NULL) { + bhs[i] = sb_getblk(osb->sb, block++); + if (bhs[i] == NULL) { + status = -EIO; + mlog_errno(status); + goto bail; + } + } + bh = bhs[i]; + + if (buffer_jbd(bh)) { + mlog(ML_BH_IO, + "trying to sync read a jbd " + "managed bh (blocknr = %llu), skipping\n", + (unsigned long long)bh->b_blocknr); + continue; + } + + if (buffer_dirty(bh)) { + /* This should probably be a BUG, or + * at least return an error. */ + mlog(ML_ERROR, + "trying to sync read a dirty " + "buffer! (blocknr = %llu), skipping\n", + (unsigned long long)bh->b_blocknr); + continue; + } + + lock_buffer(bh); + if (buffer_jbd(bh)) { + mlog(ML_ERROR, + "block %llu had the JBD bit set " + "while I was in lock_buffer!", + (unsigned long long)bh->b_blocknr); + BUG(); + } + + clear_buffer_uptodate(bh); + get_bh(bh); /* for end_buffer_read_sync() */ + bh->b_end_io = end_buffer_read_sync; + submit_bh(READ, bh); + } + + for (i = nr; i > 0; i--) { + bh = bhs[i - 1]; + + /* No need to wait on the buffer if it's managed by JBD. */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); + + if (!buffer_uptodate(bh)) { + /* Status won't be cleared from here on out, + * so we can safely record this and loop back + * to cleanup the other buffers. */ + status = -EIO; + put_bh(bh); + bhs[i - 1] = NULL; + } + } + +bail: + return status; +} + +int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, + struct buffer_head *bhs[], int flags) +{ + int status = 0; + int i, ignore_cache = 0; + struct buffer_head *bh; + + mlog_entry("(inode=%p, block=(%llu), nr=(%d), flags=%d)\n", + inode, (unsigned long long)block, nr, flags); + + BUG_ON(!inode); + BUG_ON((flags & OCFS2_BH_READAHEAD) && + (flags & OCFS2_BH_IGNORE_CACHE)); + + if (bhs == NULL) { + status = -EINVAL; + mlog_errno(status); + goto bail; + } + + if (nr < 0) { + mlog(ML_ERROR, "asked to read %d blocks!\n", nr); + status = -EINVAL; + mlog_errno(status); + goto bail; + } + + if (nr == 0) { + mlog(ML_BH_IO, "No buffers will be read!\n"); + status = 0; + goto bail; + } + + mutex_lock(&OCFS2_I(inode)->ip_io_mutex); + for (i = 0 ; i < nr ; i++) { + if (bhs[i] == NULL) { + bhs[i] = sb_getblk(inode->i_sb, block++); + if (bhs[i] == NULL) { + mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); + status = -EIO; + mlog_errno(status); + goto bail; + } + } + bh = bhs[i]; + ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE); + + /* There are three read-ahead cases here which we need to + * be concerned with. All three assume a buffer has + * previously been submitted with OCFS2_BH_READAHEAD + * and it hasn't yet completed I/O. + * + * 1) The current request is sync to disk. This rarely + * happens these days, and never when performance + * matters - the code can just wait on the buffer + * lock and re-submit. + * + * 2) The current request is cached, but not + * readahead. ocfs2_buffer_uptodate() will return + * false anyway, so we'll wind up waiting on the + * buffer lock to do I/O. We re-check the request + * with after getting the lock to avoid a re-submit. + * + * 3) The current request is readahead (and so must + * also be a caching one). We short circuit if the + * buffer is locked (under I/O) and if it's in the + * uptodate cache. The re-check from #2 catches the + * case that the previous read-ahead completes just + * before our is-it-in-flight check. + */ + + if (!ignore_cache && !ocfs2_buffer_uptodate(inode, bh)) { + mlog(ML_UPTODATE, + "bh (%llu), inode %llu not uptodate\n", + (unsigned long long)bh->b_blocknr, + (unsigned long long)OCFS2_I(inode)->ip_blkno); + /* We're using ignore_cache here to say + * "go to disk" */ + ignore_cache = 1; + } + + if (buffer_jbd(bh)) { + if (ignore_cache) + mlog(ML_BH_IO, "trying to sync read a jbd " + "managed bh (blocknr = %llu)\n", + (unsigned long long)bh->b_blocknr); + continue; + } + + if (ignore_cache) { + if (buffer_dirty(bh)) { + /* This should probably be a BUG, or + * at least return an error. */ + mlog(ML_BH_IO, "asking me to sync read a dirty " + "buffer! (blocknr = %llu)\n", + (unsigned long long)bh->b_blocknr); + continue; + } + + /* A read-ahead request was made - if the + * buffer is already under read-ahead from a + * previously submitted request than we are + * done here. */ + if ((flags & OCFS2_BH_READAHEAD) + && ocfs2_buffer_read_ahead(inode, bh)) + continue; + + lock_buffer(bh); + if (buffer_jbd(bh)) { +#ifdef CATCH_BH_JBD_RACES + mlog(ML_ERROR, "block %llu had the JBD bit set " + "while I was in lock_buffer!", + (unsigned long long)bh->b_blocknr); + BUG(); +#else + unlock_buffer(bh); + continue; +#endif + } + + /* Re-check ocfs2_buffer_uptodate() as a + * previously read-ahead buffer may have + * completed I/O while we were waiting for the + * buffer lock. */ + if (!(flags & OCFS2_BH_IGNORE_CACHE) + && !(flags & OCFS2_BH_READAHEAD) + && ocfs2_buffer_uptodate(inode, bh)) { + unlock_buffer(bh); + continue; + } + + clear_buffer_uptodate(bh); + get_bh(bh); /* for end_buffer_read_sync() */ + bh->b_end_io = end_buffer_read_sync; + submit_bh(READ, bh); + continue; + } + } + + status = 0; + + for (i = (nr - 1); i >= 0; i--) { + bh = bhs[i]; + + if (!(flags & OCFS2_BH_READAHEAD)) { + /* We know this can't have changed as we hold the + * inode sem. Avoid doing any work on the bh if the + * journal has it. */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); + + if (!buffer_uptodate(bh)) { + /* Status won't be cleared from here on out, + * so we can safely record this and loop back + * to cleanup the other buffers. Don't need to + * remove the clustered uptodate information + * for this bh as it's not marked locally + * uptodate. */ + status = -EIO; + put_bh(bh); + bhs[i] = NULL; + continue; + } + } + + /* Always set the buffer in the cache, even if it was + * a forced read, or read-ahead which hasn't yet + * completed. */ + ocfs2_set_buffer_uptodate(inode, bh); + } + mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); + + mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", + (unsigned long long)block, nr, + ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", + flags); + +bail: + + mlog_exit(status); + return status; +} + +/* Check whether the blkno is the super block or one of the backups. */ +static void ocfs2_check_super_or_backup(struct super_block *sb, + sector_t blkno) +{ + int i; + u64 backup_blkno; + + if (blkno == OCFS2_SUPER_BLOCK_BLKNO) + return; + + for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { + backup_blkno = ocfs2_backup_super_blkno(sb, i); + if (backup_blkno == blkno) + return; + } + + BUG(); +} + +/* + * Write super block and backups doesn't need to collaborate with journal, + * so we don't need to lock ip_io_mutex and inode doesn't need to bea passed + * into this function. + */ +int ocfs2_write_super_or_backup(struct ocfs2_super *osb, + struct buffer_head *bh) +{ + int ret = 0; + + mlog_entry_void(); + + BUG_ON(buffer_jbd(bh)); + ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr); + + if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) { + ret = -EROFS; + goto out; + } + + lock_buffer(bh); + set_buffer_uptodate(bh); + + /* remove from dirty list before I/O. */ + clear_buffer_dirty(bh); + + get_bh(bh); /* for end_buffer_write_sync() */ + bh->b_end_io = end_buffer_write_sync; + submit_bh(WRITE, bh); + + wait_on_buffer(bh); + + if (!buffer_uptodate(bh)) { + ret = -EIO; + put_bh(bh); + } + +out: + mlog_exit(ret); + return ret; +} diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h new file mode 100644 index 0000000..75e1dcb --- /dev/null +++ b/fs/ocfs2/buffer_head_io.h @@ -0,0 +1,72 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs2_buffer_head.h + * + * Buffer cache handling functions defined + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_BUFFER_HEAD_IO_H +#define OCFS2_BUFFER_HEAD_IO_H + +#include <linux/buffer_head.h> + +void ocfs2_end_buffer_io_sync(struct buffer_head *bh, + int uptodate); + +static inline int ocfs2_read_block(struct inode *inode, + u64 off, + struct buffer_head **bh); + +int ocfs2_write_block(struct ocfs2_super *osb, + struct buffer_head *bh, + struct inode *inode); +int ocfs2_read_blocks(struct inode *inode, + u64 block, + int nr, + struct buffer_head *bhs[], + int flags); +int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, + unsigned int nr, struct buffer_head *bhs[]); + +int ocfs2_write_super_or_backup(struct ocfs2_super *osb, + struct buffer_head *bh); + +#define OCFS2_BH_IGNORE_CACHE 1 +#define OCFS2_BH_READAHEAD 8 + +static inline int ocfs2_read_block(struct inode *inode, u64 off, + struct buffer_head **bh) +{ + int status = 0; + + if (bh == NULL) { + printk("ocfs2: bh == NULL\n"); + status = -EINVAL; + goto bail; + } + + status = ocfs2_read_blocks(inode, off, 1, bh, 0); + +bail: + return status; +} + +#endif /* OCFS2_BUFFER_HEAD_IO_H */ diff --git a/fs/ocfs2/cluster/Makefile b/fs/ocfs2/cluster/Makefile new file mode 100644 index 0000000..bc8c5e7 --- /dev/null +++ b/fs/ocfs2/cluster/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_OCFS2_FS) += ocfs2_nodemanager.o + +ocfs2_nodemanager-objs := heartbeat.o masklog.o sys.o nodemanager.o \ + quorum.o tcp.o netdebug.o ver.o diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c new file mode 100644 index 0000000..6ebaa58 --- /dev/null +++ b/fs/ocfs2/cluster/heartbeat.c @@ -0,0 +1,1868 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2004, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/jiffies.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/delay.h> +#include <linux/file.h> +#include <linux/kthread.h> +#include <linux/configfs.h> +#include <linux/random.h> +#include <linux/crc32.h> +#include <linux/time.h> + +#include "heartbeat.h" +#include "tcp.h" +#include "nodemanager.h" +#include "quorum.h" + +#include "masklog.h" + + +/* + * The first heartbeat pass had one global thread that would serialize all hb + * callback calls. This global serializing sem should only be removed once + * we've made sure that all callees can deal with being called concurrently + * from multiple hb region threads. + */ +static DECLARE_RWSEM(o2hb_callback_sem); + +/* + * multiple hb threads are watching multiple regions. A node is live + * whenever any of the threads sees activity from the node in its region. + */ +static DEFINE_SPINLOCK(o2hb_live_lock); +static struct list_head o2hb_live_slots[O2NM_MAX_NODES]; +static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; +static LIST_HEAD(o2hb_node_events); +static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue); + +static LIST_HEAD(o2hb_all_regions); + +static struct o2hb_callback { + struct list_head list; +} o2hb_callbacks[O2HB_NUM_CB]; + +static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type); + +#define O2HB_DEFAULT_BLOCK_BITS 9 + +unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; + +/* Only sets a new threshold if there are no active regions. + * + * No locking or otherwise interesting code is required for reading + * o2hb_dead_threshold as it can't change once regions are active and + * it's not interesting to anyone until then anyway. */ +static void o2hb_dead_threshold_set(unsigned int threshold) +{ + if (threshold > O2HB_MIN_DEAD_THRESHOLD) { + spin_lock(&o2hb_live_lock); + if (list_empty(&o2hb_all_regions)) + o2hb_dead_threshold = threshold; + spin_unlock(&o2hb_live_lock); + } +} + +struct o2hb_node_event { + struct list_head hn_item; + enum o2hb_callback_type hn_event_type; + struct o2nm_node *hn_node; + int hn_node_num; +}; + +struct o2hb_disk_slot { + struct o2hb_disk_heartbeat_block *ds_raw_block; + u8 ds_node_num; + u64 ds_last_time; + u64 ds_last_generation; + u16 ds_equal_samples; + u16 ds_changed_samples; + struct list_head ds_live_item; +}; + +/* each thread owns a region.. when we're asked to tear down the region + * we ask the thread to stop, who cleans up the region */ +struct o2hb_region { + struct config_item hr_item; + + struct list_head hr_all_item; + unsigned hr_unclean_stop:1; + + /* protected by the hr_callback_sem */ + struct task_struct *hr_task; + + unsigned int hr_blocks; + unsigned long long hr_start_block; + + unsigned int hr_block_bits; + unsigned int hr_block_bytes; + + unsigned int hr_slots_per_page; + unsigned int hr_num_pages; + + struct page **hr_slot_data; + struct block_device *hr_bdev; + struct o2hb_disk_slot *hr_slots; + + /* let the person setting up hb wait for it to return until it + * has reached a 'steady' state. This will be fixed when we have + * a more complete api that doesn't lead to this sort of fragility. */ + atomic_t hr_steady_iterations; + + char hr_dev_name[BDEVNAME_SIZE]; + + unsigned int hr_timeout_ms; + + /* randomized as the region goes up and down so that a node + * recognizes a node going up and down in one iteration */ + u64 hr_generation; + + struct delayed_work hr_write_timeout_work; + unsigned long hr_last_timeout_start; + + /* Used during o2hb_check_slot to hold a copy of the block + * being checked because we temporarily have to zero out the + * crc field. */ + struct o2hb_disk_heartbeat_block *hr_tmp_block; +}; + +struct o2hb_bio_wait_ctxt { + atomic_t wc_num_reqs; + struct completion wc_io_complete; + int wc_error; +}; + +static void o2hb_write_timeout(struct work_struct *work) +{ + struct o2hb_region *reg = + container_of(work, struct o2hb_region, + hr_write_timeout_work.work); + + mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " + "milliseconds\n", reg->hr_dev_name, + jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); + o2quo_disk_timeout(); +} + +static void o2hb_arm_write_timeout(struct o2hb_region *reg) +{ + mlog(0, "Queue write timeout for %u ms\n", O2HB_MAX_WRITE_TIMEOUT_MS); + + cancel_delayed_work(®->hr_write_timeout_work); + reg->hr_last_timeout_start = jiffies; + schedule_delayed_work(®->hr_write_timeout_work, + msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS)); +} + +static void o2hb_disarm_write_timeout(struct o2hb_region *reg) +{ + cancel_delayed_work(®->hr_write_timeout_work); + flush_scheduled_work(); +} + +static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc) +{ + atomic_set(&wc->wc_num_reqs, 1); + init_completion(&wc->wc_io_complete); + wc->wc_error = 0; +} + +/* Used in error paths too */ +static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc, + unsigned int num) +{ + /* sadly atomic_sub_and_test() isn't available on all platforms. The + * good news is that the fast path only completes one at a time */ + while(num--) { + if (atomic_dec_and_test(&wc->wc_num_reqs)) { + BUG_ON(num > 0); + complete(&wc->wc_io_complete); + } + } +} + +static void o2hb_wait_on_io(struct o2hb_region *reg, + struct o2hb_bio_wait_ctxt *wc) +{ + struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping; + + blk_run_address_space(mapping); + o2hb_bio_wait_dec(wc, 1); + + wait_for_completion(&wc->wc_io_complete); +} + +static void o2hb_bio_end_io(struct bio *bio, + int error) +{ + struct o2hb_bio_wait_ctxt *wc = bio->bi_private; + + if (error) { + mlog(ML_ERROR, "IO Error %d\n", error); + wc->wc_error = error; + } + + o2hb_bio_wait_dec(wc, 1); + bio_put(bio); +} + +/* Setup a Bio to cover I/O against num_slots slots starting at + * start_slot. */ +static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, + struct o2hb_bio_wait_ctxt *wc, + unsigned int *current_slot, + unsigned int max_slots) +{ + int len, current_page; + unsigned int vec_len, vec_start; + unsigned int bits = reg->hr_block_bits; + unsigned int spp = reg->hr_slots_per_page; + unsigned int cs = *current_slot; + struct bio *bio; + struct page *page; + + /* Testing has shown this allocation to take long enough under + * GFP_KERNEL that the local node can get fenced. It would be + * nicest if we could pre-allocate these bios and avoid this + * all together. */ + bio = bio_alloc(GFP_ATOMIC, 16); + if (!bio) { + mlog(ML_ERROR, "Could not alloc slots BIO!\n"); + bio = ERR_PTR(-ENOMEM); + goto bail; + } + + /* Must put everything in 512 byte sectors for the bio... */ + bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9); + bio->bi_bdev = reg->hr_bdev; + bio->bi_private = wc; + bio->bi_end_io = o2hb_bio_end_io; + + vec_start = (cs << bits) % PAGE_CACHE_SIZE; + while(cs < max_slots) { + current_page = cs / spp; + page = reg->hr_slot_data[current_page]; + + vec_len = min(PAGE_CACHE_SIZE - vec_start, + (max_slots-cs) * (PAGE_CACHE_SIZE/spp) ); + + mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", + current_page, vec_len, vec_start); + + len = bio_add_page(bio, page, vec_len, vec_start); + if (len != vec_len) break; + + cs += vec_len / (PAGE_CACHE_SIZE/spp); + vec_start = 0; + } + +bail: + *current_slot = cs; + return bio; +} + +static int o2hb_read_slots(struct o2hb_region *reg, + unsigned int max_slots) +{ + unsigned int current_slot=0; + int status; + struct o2hb_bio_wait_ctxt wc; + struct bio *bio; + + o2hb_bio_wait_init(&wc); + + while(current_slot < max_slots) { + bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots); + if (IS_ERR(bio)) { + status = PTR_ERR(bio); + mlog_errno(status); + goto bail_and_wait; + } + + atomic_inc(&wc.wc_num_reqs); + submit_bio(READ, bio); + } + + status = 0; + +bail_and_wait: + o2hb_wait_on_io(reg, &wc); + if (wc.wc_error && !status) + status = wc.wc_error; + + return status; +} + +static int o2hb_issue_node_write(struct o2hb_region *reg, + struct o2hb_bio_wait_ctxt *write_wc) +{ + int status; + unsigned int slot; + struct bio *bio; + + o2hb_bio_wait_init(write_wc); + + slot = o2nm_this_node(); + + bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1); + if (IS_ERR(bio)) { + status = PTR_ERR(bio); + mlog_errno(status); + goto bail; + } + + atomic_inc(&write_wc->wc_num_reqs); + submit_bio(WRITE, bio); + + status = 0; +bail: + return status; +} + +static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg, + struct o2hb_disk_heartbeat_block *hb_block) +{ + __le32 old_cksum; + u32 ret; + + /* We want to compute the block crc with a 0 value in the + * hb_cksum field. Save it off here and replace after the + * crc. */ + old_cksum = hb_block->hb_cksum; + hb_block->hb_cksum = 0; + + ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes); + + hb_block->hb_cksum = old_cksum; + + return ret; +} + +static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block) +{ + mlog(ML_ERROR, "Dump slot information: seq = 0x%llx, node = %u, " + "cksum = 0x%x, generation 0x%llx\n", + (long long)le64_to_cpu(hb_block->hb_seq), + hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum), + (long long)le64_to_cpu(hb_block->hb_generation)); +} + +static int o2hb_verify_crc(struct o2hb_region *reg, + struct o2hb_disk_heartbeat_block *hb_block) +{ + u32 read, computed; + + read = le32_to_cpu(hb_block->hb_cksum); + computed = o2hb_compute_block_crc_le(reg, hb_block); + + return read == computed; +} + +/* We want to make sure that nobody is heartbeating on top of us -- + * this will help detect an invalid configuration. */ +static int o2hb_check_last_timestamp(struct o2hb_region *reg) +{ + int node_num, ret; + struct o2hb_disk_slot *slot; + struct o2hb_disk_heartbeat_block *hb_block; + + node_num = o2nm_this_node(); + + ret = 1; + slot = ®->hr_slots[node_num]; + /* Don't check on our 1st timestamp */ + if (slot->ds_last_time) { + hb_block = slot->ds_raw_block; + + if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time) + ret = 0; + } + + return ret; +} + +static inline void o2hb_prepare_block(struct o2hb_region *reg, + u64 generation) +{ + int node_num; + u64 cputime; + struct o2hb_disk_slot *slot; + struct o2hb_disk_heartbeat_block *hb_block; + + node_num = o2nm_this_node(); + slot = ®->hr_slots[node_num]; + + hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block; + memset(hb_block, 0, reg->hr_block_bytes); + /* TODO: time stuff */ + cputime = CURRENT_TIME.tv_sec; + if (!cputime) + cputime = 1; + + hb_block->hb_seq = cpu_to_le64(cputime); + hb_block->hb_node = node_num; + hb_block->hb_generation = cpu_to_le64(generation); + hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS); + + /* This step must always happen last! */ + hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg, + hb_block)); + + mlog(ML_HB_BIO, "our node generation = 0x%llx, cksum = 0x%x\n", + (long long)generation, + le32_to_cpu(hb_block->hb_cksum)); +} + +static void o2hb_fire_callbacks(struct o2hb_callback *hbcall, + struct o2nm_node *node, + int idx) +{ + struct list_head *iter; + struct o2hb_callback_func *f; + + list_for_each(iter, &hbcall->list) { + f = list_entry(iter, struct o2hb_callback_func, hc_item); + mlog(ML_HEARTBEAT, "calling funcs %p\n", f); + (f->hc_func)(node, idx, f->hc_data); + } +} + +/* Will run the list in order until we process the passed event */ +static void o2hb_run_event_list(struct o2hb_node_event *queued_event) +{ + int empty; + struct o2hb_callback *hbcall; + struct o2hb_node_event *event; + + spin_lock(&o2hb_live_lock); + empty = list_empty(&queued_event->hn_item); + spin_unlock(&o2hb_live_lock); + if (empty) + return; + + /* Holding callback sem assures we don't alter the callback + * lists when doing this, and serializes ourselves with other + * processes wanting callbacks. */ + down_write(&o2hb_callback_sem); + + spin_lock(&o2hb_live_lock); + while (!list_empty(&o2hb_node_events) + && !list_empty(&queued_event->hn_item)) { + event = list_entry(o2hb_node_events.next, + struct o2hb_node_event, + hn_item); + list_del_init(&event->hn_item); + spin_unlock(&o2hb_live_lock); + + mlog(ML_HEARTBEAT, "Node %s event for %d\n", + event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN", + event->hn_node_num); + + hbcall = hbcall_from_type(event->hn_event_type); + + /* We should *never* have gotten on to the list with a + * bad type... This isn't something that we should try + * to recover from. */ + BUG_ON(IS_ERR(hbcall)); + + o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num); + + spin_lock(&o2hb_live_lock); + } + spin_unlock(&o2hb_live_lock); + + up_write(&o2hb_callback_sem); +} + +static void o2hb_queue_node_event(struct o2hb_node_event *event, + enum o2hb_callback_type type, + struct o2nm_node *node, + int node_num) +{ + assert_spin_locked(&o2hb_live_lock); + + event->hn_event_type = type; + event->hn_node = node; + event->hn_node_num = node_num; + + mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n", + type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num); + + list_add_tail(&event->hn_item, &o2hb_node_events); +} + +static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot) +{ + struct o2hb_node_event event = + { .hn_item = LIST_HEAD_INIT(event.hn_item), }; + struct o2nm_node *node; + + node = o2nm_get_node_by_num(slot->ds_node_num); + if (!node) + return; + + spin_lock(&o2hb_live_lock); + if (!list_empty(&slot->ds_live_item)) { + mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n", + slot->ds_node_num); + + list_del_init(&slot->ds_live_item); + + if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { + clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); + + o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node, + slot->ds_node_num); + } + } + spin_unlock(&o2hb_live_lock); + + o2hb_run_event_list(&event); + + o2nm_node_put(node); +} + +static int o2hb_check_slot(struct o2hb_region *reg, + struct o2hb_disk_slot *slot) +{ + int changed = 0, gen_changed = 0; + struct o2hb_node_event event = + { .hn_item = LIST_HEAD_INIT(event.hn_item), }; + struct o2nm_node *node; + struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block; + u64 cputime; + unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS; + unsigned int slot_dead_ms; + + memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); + + /* Is this correct? Do we assume that the node doesn't exist + * if we're not configured for him? */ + node = o2nm_get_node_by_num(slot->ds_node_num); + if (!node) + return 0; + + if (!o2hb_verify_crc(reg, hb_block)) { + /* all paths from here will drop o2hb_live_lock for + * us. */ + spin_lock(&o2hb_live_lock); + + /* Don't print an error on the console in this case - + * a freshly formatted heartbeat area will not have a + * crc set on it. */ + if (list_empty(&slot->ds_live_item)) + goto out; + + /* The node is live but pushed out a bad crc. We + * consider it a transient miss but don't populate any + * other values as they may be junk. */ + mlog(ML_ERROR, "Node %d has written a bad crc to %s\n", + slot->ds_node_num, reg->hr_dev_name); + o2hb_dump_slot(hb_block); + + slot->ds_equal_samples++; + goto fire_callbacks; + } + + /* we don't care if these wrap.. the state transitions below + * clear at the right places */ + cputime = le64_to_cpu(hb_block->hb_seq); + if (slot->ds_last_time != cputime) + slot->ds_changed_samples++; + else + slot->ds_equal_samples++; + slot->ds_last_time = cputime; + + /* The node changed heartbeat generations. We assume this to + * mean it dropped off but came back before we timed out. We + * want to consider it down for the time being but don't want + * to lose any changed_samples state we might build up to + * considering it live again. */ + if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) { + gen_changed = 1; + slot->ds_equal_samples = 0; + mlog(ML_HEARTBEAT, "Node %d changed generation (0x%llx " + "to 0x%llx)\n", slot->ds_node_num, + (long long)slot->ds_last_generation, + (long long)le64_to_cpu(hb_block->hb_generation)); + } + + slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); + + mlog(ML_HEARTBEAT, "Slot %d gen 0x%llx cksum 0x%x " + "seq %llu last %llu changed %u equal %u\n", + slot->ds_node_num, (long long)slot->ds_last_generation, + le32_to_cpu(hb_block->hb_cksum), + (unsigned long long)le64_to_cpu(hb_block->hb_seq), + (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, + slot->ds_equal_samples); + + spin_lock(&o2hb_live_lock); + +fire_callbacks: + /* dead nodes only come to life after some number of + * changes at any time during their dead time */ + if (list_empty(&slot->ds_live_item) && + slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) { + mlog(ML_HEARTBEAT, "Node %d (id 0x%llx) joined my region\n", + slot->ds_node_num, (long long)slot->ds_last_generation); + + /* first on the list generates a callback */ + if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { + set_bit(slot->ds_node_num, o2hb_live_node_bitmap); + + o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node, + slot->ds_node_num); + + changed = 1; + } + + list_add_tail(&slot->ds_live_item, + &o2hb_live_slots[slot->ds_node_num]); + + slot->ds_equal_samples = 0; + + /* We want to be sure that all nodes agree on the + * number of milliseconds before a node will be + * considered dead. The self-fencing timeout is + * computed from this value, and a discrepancy might + * result in heartbeat calling a node dead when it + * hasn't self-fenced yet. */ + slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms); + if (slot_dead_ms && slot_dead_ms != dead_ms) { + /* TODO: Perhaps we can fail the region here. */ + mlog(ML_ERROR, "Node %d on device %s has a dead count " + "of %u ms, but our count is %u ms.\n" + "Please double check your configuration values " + "for 'O2CB_HEARTBEAT_THRESHOLD'\n", + slot->ds_node_num, reg->hr_dev_name, slot_dead_ms, + dead_ms); + } + goto out; + } + + /* if the list is dead, we're done.. */ + if (list_empty(&slot->ds_live_item)) + goto out; + + /* live nodes only go dead after enough consequtive missed + * samples.. reset the missed counter whenever we see + * activity */ + if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) { + mlog(ML_HEARTBEAT, "Node %d left my region\n", + slot->ds_node_num); + + /* last off the live_slot generates a callback */ + list_del_init(&slot->ds_live_item); + if (list_empty(&o2hb_live_slots[slot->ds_node_num])) { + clear_bit(slot->ds_node_num, o2hb_live_node_bitmap); + + o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node, + slot->ds_node_num); + + changed = 1; + } + + /* We don't clear this because the node is still + * actually writing new blocks. */ + if (!gen_changed) + slot->ds_changed_samples = 0; + goto out; + } + if (slot->ds_changed_samples) { + slot->ds_changed_samples = 0; + slot->ds_equal_samples = 0; + } +out: + spin_unlock(&o2hb_live_lock); + + o2hb_run_event_list(&event); + + o2nm_node_put(node); + return changed; +} + +/* This could be faster if we just implmented a find_last_bit, but I + * don't think the circumstances warrant it. */ +static int o2hb_highest_node(unsigned long *nodes, + int numbits) +{ + int highest, node; + + highest = numbits; + node = -1; + while ((node = find_next_bit(nodes, numbits, node + 1)) != -1) { + if (node >= numbits) + break; + + highest = node; + } + + return highest; +} + +static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) +{ + int i, ret, highest_node, change = 0; + unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; + struct o2hb_bio_wait_ctxt write_wc; + + ret = o2nm_configured_node_map(configured_nodes, + sizeof(configured_nodes)); + if (ret) { + mlog_errno(ret); + return ret; + } + + highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); + if (highest_node >= O2NM_MAX_NODES) { + mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n"); + return -EINVAL; + } + + /* No sense in reading the slots of nodes that don't exist + * yet. Of course, if the node definitions have holes in them + * then we're reading an empty slot anyway... Consider this + * best-effort. */ + ret = o2hb_read_slots(reg, highest_node + 1); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + /* With an up to date view of the slots, we can check that no + * other node has been improperly configured to heartbeat in + * our slot. */ + if (!o2hb_check_last_timestamp(reg)) + mlog(ML_ERROR, "Device \"%s\": another node is heartbeating " + "in our slot!\n", reg->hr_dev_name); + + /* fill in the proper info for our next heartbeat */ + o2hb_prepare_block(reg, reg->hr_generation); + + /* And fire off the write. Note that we don't wait on this I/O + * until later. */ + ret = o2hb_issue_node_write(reg, &write_wc); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + i = -1; + while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { + + change |= o2hb_check_slot(reg, ®->hr_slots[i]); + } + + /* + * We have to be sure we've advertised ourselves on disk + * before we can go to steady state. This ensures that + * people we find in our steady state have seen us. + */ + o2hb_wait_on_io(reg, &write_wc); + if (write_wc.wc_error) { + /* Do not re-arm the write timeout on I/O error - we + * can't be sure that the new block ever made it to + * disk */ + mlog(ML_ERROR, "Write error %d on device \"%s\"\n", + write_wc.wc_error, reg->hr_dev_name); + return write_wc.wc_error; + } + + o2hb_arm_write_timeout(reg); + + /* let the person who launched us know when things are steady */ + if (!change && (atomic_read(®->hr_steady_iterations) != 0)) { + if (atomic_dec_and_test(®->hr_steady_iterations)) + wake_up(&o2hb_steady_queue); + } + + return 0; +} + +/* Subtract b from a, storing the result in a. a *must* have a larger + * value than b. */ +static void o2hb_tv_subtract(struct timeval *a, + struct timeval *b) +{ + /* just return 0 when a is after b */ + if (a->tv_sec < b->tv_sec || + (a->tv_sec == b->tv_sec && a->tv_usec < b->tv_usec)) { + a->tv_sec = 0; + a->tv_usec = 0; + return; + } + + a->tv_sec -= b->tv_sec; + a->tv_usec -= b->tv_usec; + while ( a->tv_usec < 0 ) { + a->tv_sec--; + a->tv_usec += 1000000; + } +} + +static unsigned int o2hb_elapsed_msecs(struct timeval *start, + struct timeval *end) +{ + struct timeval res = *end; + + o2hb_tv_subtract(&res, start); + + return res.tv_sec * 1000 + res.tv_usec / 1000; +} + +/* + * we ride the region ref that the region dir holds. before the region + * dir is removed and drops it ref it will wait to tear down this + * thread. + */ +static int o2hb_thread(void *data) +{ + int i, ret; + struct o2hb_region *reg = data; + struct o2hb_bio_wait_ctxt write_wc; + struct timeval before_hb, after_hb; + unsigned int elapsed_msec; + + mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n"); + + set_user_nice(current, -20); + + while (!kthread_should_stop() && !reg->hr_unclean_stop) { + /* We track the time spent inside + * o2hb_do_disk_heartbeat so that we avoid more then + * hr_timeout_ms between disk writes. On busy systems + * this should result in a heartbeat which is less + * likely to time itself out. */ + do_gettimeofday(&before_hb); + + i = 0; + do { + ret = o2hb_do_disk_heartbeat(reg); + } while (ret && ++i < 2); + + do_gettimeofday(&after_hb); + elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); + + mlog(0, "start = %lu.%lu, end = %lu.%lu, msec = %u\n", + before_hb.tv_sec, (unsigned long) before_hb.tv_usec, + after_hb.tv_sec, (unsigned long) after_hb.tv_usec, + elapsed_msec); + + if (elapsed_msec < reg->hr_timeout_ms) { + /* the kthread api has blocked signals for us so no + * need to record the return value. */ + msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); + } + } + + o2hb_disarm_write_timeout(reg); + + /* unclean stop is only used in very bad situation */ + for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++) + o2hb_shutdown_slot(®->hr_slots[i]); + + /* Explicit down notification - avoid forcing the other nodes + * to timeout on this region when we could just as easily + * write a clear generation - thus indicating to them that + * this node has left this region. + * + * XXX: Should we skip this on unclean_stop? */ + o2hb_prepare_block(reg, 0); + ret = o2hb_issue_node_write(reg, &write_wc); + if (ret == 0) { + o2hb_wait_on_io(reg, &write_wc); + } else { + mlog_errno(ret); + } + + mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); + + return 0; +} + +void o2hb_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++) + INIT_LIST_HEAD(&o2hb_callbacks[i].list); + + for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++) + INIT_LIST_HEAD(&o2hb_live_slots[i]); + + INIT_LIST_HEAD(&o2hb_node_events); + + memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap)); +} + +/* if we're already in a callback then we're already serialized by the sem */ +static void o2hb_fill_node_map_from_callback(unsigned long *map, + unsigned bytes) +{ + BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long))); + + memcpy(map, &o2hb_live_node_bitmap, bytes); +} + +/* + * get a map of all nodes that are heartbeating in any regions + */ +void o2hb_fill_node_map(unsigned long *map, unsigned bytes) +{ + /* callers want to serialize this map and callbacks so that they + * can trust that they don't miss nodes coming to the party */ + down_read(&o2hb_callback_sem); + spin_lock(&o2hb_live_lock); + o2hb_fill_node_map_from_callback(map, bytes); + spin_unlock(&o2hb_live_lock); + up_read(&o2hb_callback_sem); +} +EXPORT_SYMBOL_GPL(o2hb_fill_node_map); + +/* + * heartbeat configfs bits. The heartbeat set is a default set under + * the cluster set in nodemanager.c. + */ + +static struct o2hb_region *to_o2hb_region(struct config_item *item) +{ + return item ? container_of(item, struct o2hb_region, hr_item) : NULL; +} + +/* drop_item only drops its ref after killing the thread, nothing should + * be using the region anymore. this has to clean up any state that + * attributes might have built up. */ +static void o2hb_region_release(struct config_item *item) +{ + int i; + struct page *page; + struct o2hb_region *reg = to_o2hb_region(item); + + if (reg->hr_tmp_block) + kfree(reg->hr_tmp_block); + + if (reg->hr_slot_data) { + for (i = 0; i < reg->hr_num_pages; i++) { + page = reg->hr_slot_data[i]; + if (page) + __free_page(page); + } + kfree(reg->hr_slot_data); + } + + if (reg->hr_bdev) + blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE); + + if (reg->hr_slots) + kfree(reg->hr_slots); + + spin_lock(&o2hb_live_lock); + list_del(®->hr_all_item); + spin_unlock(&o2hb_live_lock); + + kfree(reg); +} + +static int o2hb_read_block_input(struct o2hb_region *reg, + const char *page, + size_t count, + unsigned long *ret_bytes, + unsigned int *ret_bits) +{ + unsigned long bytes; + char *p = (char *)page; + + bytes = simple_strtoul(p, &p, 0); + if (!p || (*p && (*p != '\n'))) + return -EINVAL; + + /* Heartbeat and fs min / max block sizes are the same. */ + if (bytes > 4096 || bytes < 512) + return -ERANGE; + if (hweight16(bytes) != 1) + return -EINVAL; + + if (ret_bytes) + *ret_bytes = bytes; + if (ret_bits) + *ret_bits = ffs(bytes) - 1; + + return 0; +} + +static ssize_t o2hb_region_block_bytes_read(struct o2hb_region *reg, + char *page) +{ + return sprintf(page, "%u\n", reg->hr_block_bytes); +} + +static ssize_t o2hb_region_block_bytes_write(struct o2hb_region *reg, + const char *page, + size_t count) +{ + int status; + unsigned long block_bytes; + unsigned int block_bits; + + if (reg->hr_bdev) + return -EINVAL; + + status = o2hb_read_block_input(reg, page, count, + &block_bytes, &block_bits); + if (status) + return status; + + reg->hr_block_bytes = (unsigned int)block_bytes; + reg->hr_block_bits = block_bits; + + return count; +} + +static ssize_t o2hb_region_start_block_read(struct o2hb_region *reg, + char *page) +{ + return sprintf(page, "%llu\n", reg->hr_start_block); +} + +static ssize_t o2hb_region_start_block_write(struct o2hb_region *reg, + const char *page, + size_t count) +{ + unsigned long long tmp; + char *p = (char *)page; + + if (reg->hr_bdev) + return -EINVAL; + + tmp = simple_strtoull(p, &p, 0); + if (!p || (*p && (*p != '\n'))) + return -EINVAL; + + reg->hr_start_block = tmp; + + return count; +} + +static ssize_t o2hb_region_blocks_read(struct o2hb_region *reg, + char *page) +{ + return sprintf(page, "%d\n", reg->hr_blocks); +} + +static ssize_t o2hb_region_blocks_write(struct o2hb_region *reg, + const char *page, + size_t count) +{ + unsigned long tmp; + char *p = (char *)page; + + if (reg->hr_bdev) + return -EINVAL; + + tmp = simple_strtoul(p, &p, 0); + if (!p || (*p && (*p != '\n'))) + return -EINVAL; + + if (tmp > O2NM_MAX_NODES || tmp == 0) + return -ERANGE; + + reg->hr_blocks = (unsigned int)tmp; + + return count; +} + +static ssize_t o2hb_region_dev_read(struct o2hb_region *reg, + char *page) +{ + unsigned int ret = 0; + + if (reg->hr_bdev) + ret = sprintf(page, "%s\n", reg->hr_dev_name); + + return ret; +} + +static void o2hb_init_region_params(struct o2hb_region *reg) +{ + reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits; + reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; + + mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n", + reg->hr_start_block, reg->hr_blocks); + mlog(ML_HEARTBEAT, "hr_block_bytes = %u, hr_block_bits = %u\n", + reg->hr_block_bytes, reg->hr_block_bits); + mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms); + mlog(ML_HEARTBEAT, "dead threshold = %u\n", o2hb_dead_threshold); +} + +static int o2hb_map_slot_data(struct o2hb_region *reg) +{ + int i, j; + unsigned int last_slot; + unsigned int spp = reg->hr_slots_per_page; + struct page *page; + char *raw; + struct o2hb_disk_slot *slot; + + reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL); + if (reg->hr_tmp_block == NULL) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + + reg->hr_slots = kcalloc(reg->hr_blocks, + sizeof(struct o2hb_disk_slot), GFP_KERNEL); + if (reg->hr_slots == NULL) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + + for(i = 0; i < reg->hr_blocks; i++) { + slot = ®->hr_slots[i]; + slot->ds_node_num = i; + INIT_LIST_HEAD(&slot->ds_live_item); + slot->ds_raw_block = NULL; + } + + reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp; + mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks " + "at %u blocks per page\n", + reg->hr_num_pages, reg->hr_blocks, spp); + + reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *), + GFP_KERNEL); + if (!reg->hr_slot_data) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + + for(i = 0; i < reg->hr_num_pages; i++) { + page = alloc_page(GFP_KERNEL); + if (!page) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + + reg->hr_slot_data[i] = page; + + last_slot = i * spp; + raw = page_address(page); + for (j = 0; + (j < spp) && ((j + last_slot) < reg->hr_blocks); + j++) { + BUG_ON((j + last_slot) >= reg->hr_blocks); + + slot = ®->hr_slots[j + last_slot]; + slot->ds_raw_block = + (struct o2hb_disk_heartbeat_block *) raw; + + raw += reg->hr_block_bytes; + } + } + + return 0; +} + +/* Read in all the slots available and populate the tracking + * structures so that we can start with a baseline idea of what's + * there. */ +static int o2hb_populate_slot_data(struct o2hb_region *reg) +{ + int ret, i; + struct o2hb_disk_slot *slot; + struct o2hb_disk_heartbeat_block *hb_block; + + mlog_entry_void(); + + ret = o2hb_read_slots(reg, reg->hr_blocks); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* We only want to get an idea of the values initially in each + * slot, so we do no verification - o2hb_check_slot will + * actually determine if each configured slot is valid and + * whether any values have changed. */ + for(i = 0; i < reg->hr_blocks; i++) { + slot = ®->hr_slots[i]; + hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block; + + /* Only fill the values that o2hb_check_slot uses to + * determine changing slots */ + slot->ds_last_time = le64_to_cpu(hb_block->hb_seq); + slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation); + } + +out: + mlog_exit(ret); + return ret; +} + +/* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */ +static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, + const char *page, + size_t count) +{ + struct task_struct *hb_task; + long fd; + int sectsize; + char *p = (char *)page; + struct file *filp = NULL; + struct inode *inode = NULL; + ssize_t ret = -EINVAL; + + if (reg->hr_bdev) + goto out; + + /* We can't heartbeat without having had our node number + * configured yet. */ + if (o2nm_this_node() == O2NM_MAX_NODES) + goto out; + + fd = simple_strtol(p, &p, 0); + if (!p || (*p && (*p != '\n'))) + goto out; + + if (fd < 0 || fd >= INT_MAX) + goto out; + + filp = fget(fd); + if (filp == NULL) + goto out; + + if (reg->hr_blocks == 0 || reg->hr_start_block == 0 || + reg->hr_block_bytes == 0) + goto out; + + inode = igrab(filp->f_mapping->host); + if (inode == NULL) + goto out; + + if (!S_ISBLK(inode->i_mode)) + goto out; + + reg->hr_bdev = I_BDEV(filp->f_mapping->host); + ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ); + if (ret) { + reg->hr_bdev = NULL; + goto out; + } + inode = NULL; + + bdevname(reg->hr_bdev, reg->hr_dev_name); + + sectsize = bdev_hardsect_size(reg->hr_bdev); + if (sectsize != reg->hr_block_bytes) { + mlog(ML_ERROR, + "blocksize %u incorrect for device, expected %d", + reg->hr_block_bytes, sectsize); + ret = -EINVAL; + goto out; + } + + o2hb_init_region_params(reg); + + /* Generation of zero is invalid */ + do { + get_random_bytes(®->hr_generation, + sizeof(reg->hr_generation)); + } while (reg->hr_generation == 0); + + ret = o2hb_map_slot_data(reg); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = o2hb_populate_slot_data(reg); + if (ret) { + mlog_errno(ret); + goto out; + } + + INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout); + + /* + * A node is considered live after it has beat LIVE_THRESHOLD + * times. We're not steady until we've given them a chance + * _after_ our first read. + */ + atomic_set(®->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1); + + hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", + reg->hr_item.ci_name); + if (IS_ERR(hb_task)) { + ret = PTR_ERR(hb_task); + mlog_errno(ret); + goto out; + } + + spin_lock(&o2hb_live_lock); + reg->hr_task = hb_task; + spin_unlock(&o2hb_live_lock); + + ret = wait_event_interruptible(o2hb_steady_queue, + atomic_read(®->hr_steady_iterations) == 0); + if (ret) { + /* We got interrupted (hello ptrace!). Clean up */ + spin_lock(&o2hb_live_lock); + hb_task = reg->hr_task; + reg->hr_task = NULL; + spin_unlock(&o2hb_live_lock); + + if (hb_task) + kthread_stop(hb_task); + goto out; + } + + /* Ok, we were woken. Make sure it wasn't by drop_item() */ + spin_lock(&o2hb_live_lock); + hb_task = reg->hr_task; + spin_unlock(&o2hb_live_lock); + + if (hb_task) + ret = count; + else + ret = -EIO; + +out: + if (filp) + fput(filp); + if (inode) + iput(inode); + if (ret < 0) { + if (reg->hr_bdev) { + blkdev_put(reg->hr_bdev, FMODE_READ|FMODE_WRITE); + reg->hr_bdev = NULL; + } + } + return ret; +} + +static ssize_t o2hb_region_pid_read(struct o2hb_region *reg, + char *page) +{ + pid_t pid = 0; + + spin_lock(&o2hb_live_lock); + if (reg->hr_task) + pid = task_pid_nr(reg->hr_task); + spin_unlock(&o2hb_live_lock); + + if (!pid) + return 0; + + return sprintf(page, "%u\n", pid); +} + +struct o2hb_region_attribute { + struct configfs_attribute attr; + ssize_t (*show)(struct o2hb_region *, char *); + ssize_t (*store)(struct o2hb_region *, const char *, size_t); +}; + +static struct o2hb_region_attribute o2hb_region_attr_block_bytes = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "block_bytes", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2hb_region_block_bytes_read, + .store = o2hb_region_block_bytes_write, +}; + +static struct o2hb_region_attribute o2hb_region_attr_start_block = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "start_block", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2hb_region_start_block_read, + .store = o2hb_region_start_block_write, +}; + +static struct o2hb_region_attribute o2hb_region_attr_blocks = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "blocks", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2hb_region_blocks_read, + .store = o2hb_region_blocks_write, +}; + +static struct o2hb_region_attribute o2hb_region_attr_dev = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "dev", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2hb_region_dev_read, + .store = o2hb_region_dev_write, +}; + +static struct o2hb_region_attribute o2hb_region_attr_pid = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "pid", + .ca_mode = S_IRUGO | S_IRUSR }, + .show = o2hb_region_pid_read, +}; + +static struct configfs_attribute *o2hb_region_attrs[] = { + &o2hb_region_attr_block_bytes.attr, + &o2hb_region_attr_start_block.attr, + &o2hb_region_attr_blocks.attr, + &o2hb_region_attr_dev.attr, + &o2hb_region_attr_pid.attr, + NULL, +}; + +static ssize_t o2hb_region_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct o2hb_region *reg = to_o2hb_region(item); + struct o2hb_region_attribute *o2hb_region_attr = + container_of(attr, struct o2hb_region_attribute, attr); + ssize_t ret = 0; + + if (o2hb_region_attr->show) + ret = o2hb_region_attr->show(reg, page); + return ret; +} + +static ssize_t o2hb_region_store(struct config_item *item, + struct configfs_attribute *attr, + const char *page, size_t count) +{ + struct o2hb_region *reg = to_o2hb_region(item); + struct o2hb_region_attribute *o2hb_region_attr = + container_of(attr, struct o2hb_region_attribute, attr); + ssize_t ret = -EINVAL; + + if (o2hb_region_attr->store) + ret = o2hb_region_attr->store(reg, page, count); + return ret; +} + +static struct configfs_item_operations o2hb_region_item_ops = { + .release = o2hb_region_release, + .show_attribute = o2hb_region_show, + .store_attribute = o2hb_region_store, +}; + +static struct config_item_type o2hb_region_type = { + .ct_item_ops = &o2hb_region_item_ops, + .ct_attrs = o2hb_region_attrs, + .ct_owner = THIS_MODULE, +}; + +/* heartbeat set */ + +struct o2hb_heartbeat_group { + struct config_group hs_group; + /* some stuff? */ +}; + +static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group) +{ + return group ? + container_of(group, struct o2hb_heartbeat_group, hs_group) + : NULL; +} + +static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group, + const char *name) +{ + struct o2hb_region *reg = NULL; + + reg = kzalloc(sizeof(struct o2hb_region), GFP_KERNEL); + if (reg == NULL) + return ERR_PTR(-ENOMEM); + + config_item_init_type_name(®->hr_item, name, &o2hb_region_type); + + spin_lock(&o2hb_live_lock); + list_add_tail(®->hr_all_item, &o2hb_all_regions); + spin_unlock(&o2hb_live_lock); + + return ®->hr_item; +} + +static void o2hb_heartbeat_group_drop_item(struct config_group *group, + struct config_item *item) +{ + struct task_struct *hb_task; + struct o2hb_region *reg = to_o2hb_region(item); + + /* stop the thread when the user removes the region dir */ + spin_lock(&o2hb_live_lock); + hb_task = reg->hr_task; + reg->hr_task = NULL; + spin_unlock(&o2hb_live_lock); + + if (hb_task) + kthread_stop(hb_task); + + /* + * If we're racing a dev_write(), we need to wake them. They will + * check reg->hr_task + */ + if (atomic_read(®->hr_steady_iterations) != 0) { + atomic_set(®->hr_steady_iterations, 0); + wake_up(&o2hb_steady_queue); + } + + config_item_put(item); +} + +struct o2hb_heartbeat_group_attribute { + struct configfs_attribute attr; + ssize_t (*show)(struct o2hb_heartbeat_group *, char *); + ssize_t (*store)(struct o2hb_heartbeat_group *, const char *, size_t); +}; + +static ssize_t o2hb_heartbeat_group_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item)); + struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr = + container_of(attr, struct o2hb_heartbeat_group_attribute, attr); + ssize_t ret = 0; + + if (o2hb_heartbeat_group_attr->show) + ret = o2hb_heartbeat_group_attr->show(reg, page); + return ret; +} + +static ssize_t o2hb_heartbeat_group_store(struct config_item *item, + struct configfs_attribute *attr, + const char *page, size_t count) +{ + struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item)); + struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr = + container_of(attr, struct o2hb_heartbeat_group_attribute, attr); + ssize_t ret = -EINVAL; + + if (o2hb_heartbeat_group_attr->store) + ret = o2hb_heartbeat_group_attr->store(reg, page, count); + return ret; +} + +static ssize_t o2hb_heartbeat_group_threshold_show(struct o2hb_heartbeat_group *group, + char *page) +{ + return sprintf(page, "%u\n", o2hb_dead_threshold); +} + +static ssize_t o2hb_heartbeat_group_threshold_store(struct o2hb_heartbeat_group *group, + const char *page, + size_t count) +{ + unsigned long tmp; + char *p = (char *)page; + + tmp = simple_strtoul(p, &p, 10); + if (!p || (*p && (*p != '\n'))) + return -EINVAL; + + /* this will validate ranges for us. */ + o2hb_dead_threshold_set((unsigned int) tmp); + + return count; +} + +static struct o2hb_heartbeat_group_attribute o2hb_heartbeat_group_attr_threshold = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "dead_threshold", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2hb_heartbeat_group_threshold_show, + .store = o2hb_heartbeat_group_threshold_store, +}; + +static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { + &o2hb_heartbeat_group_attr_threshold.attr, + NULL, +}; + +static struct configfs_item_operations o2hb_hearbeat_group_item_ops = { + .show_attribute = o2hb_heartbeat_group_show, + .store_attribute = o2hb_heartbeat_group_store, +}; + +static struct configfs_group_operations o2hb_heartbeat_group_group_ops = { + .make_item = o2hb_heartbeat_group_make_item, + .drop_item = o2hb_heartbeat_group_drop_item, +}; + +static struct config_item_type o2hb_heartbeat_group_type = { + .ct_group_ops = &o2hb_heartbeat_group_group_ops, + .ct_item_ops = &o2hb_hearbeat_group_item_ops, + .ct_attrs = o2hb_heartbeat_group_attrs, + .ct_owner = THIS_MODULE, +}; + +/* this is just here to avoid touching group in heartbeat.h which the + * entire damn world #includes */ +struct config_group *o2hb_alloc_hb_set(void) +{ + struct o2hb_heartbeat_group *hs = NULL; + struct config_group *ret = NULL; + + hs = kzalloc(sizeof(struct o2hb_heartbeat_group), GFP_KERNEL); + if (hs == NULL) + goto out; + + config_group_init_type_name(&hs->hs_group, "heartbeat", + &o2hb_heartbeat_group_type); + + ret = &hs->hs_group; +out: + if (ret == NULL) + kfree(hs); + return ret; +} + +void o2hb_free_hb_set(struct config_group *group) +{ + struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group); + kfree(hs); +} + +/* hb callback registration and issueing */ + +static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type) +{ + if (type == O2HB_NUM_CB) + return ERR_PTR(-EINVAL); + + return &o2hb_callbacks[type]; +} + +void o2hb_setup_callback(struct o2hb_callback_func *hc, + enum o2hb_callback_type type, + o2hb_cb_func *func, + void *data, + int priority) +{ + INIT_LIST_HEAD(&hc->hc_item); + hc->hc_func = func; + hc->hc_data = data; + hc->hc_priority = priority; + hc->hc_type = type; + hc->hc_magic = O2HB_CB_MAGIC; +} +EXPORT_SYMBOL_GPL(o2hb_setup_callback); + +static struct o2hb_region *o2hb_find_region(const char *region_uuid) +{ + struct o2hb_region *p, *reg = NULL; + + assert_spin_locked(&o2hb_live_lock); + + list_for_each_entry(p, &o2hb_all_regions, hr_all_item) { + if (!strcmp(region_uuid, config_item_name(&p->hr_item))) { + reg = p; + break; + } + } + + return reg; +} + +static int o2hb_region_get(const char *region_uuid) +{ + int ret = 0; + struct o2hb_region *reg; + + spin_lock(&o2hb_live_lock); + + reg = o2hb_find_region(region_uuid); + if (!reg) + ret = -ENOENT; + spin_unlock(&o2hb_live_lock); + + if (ret) + goto out; + + ret = o2nm_depend_this_node(); + if (ret) + goto out; + + ret = o2nm_depend_item(®->hr_item); + if (ret) + o2nm_undepend_this_node(); + +out: + return ret; +} + +static void o2hb_region_put(const char *region_uuid) +{ + struct o2hb_region *reg; + + spin_lock(&o2hb_live_lock); + + reg = o2hb_find_region(region_uuid); + + spin_unlock(&o2hb_live_lock); + + if (reg) { + o2nm_undepend_item(®->hr_item); + o2nm_undepend_this_node(); + } +} + +int o2hb_register_callback(const char *region_uuid, + struct o2hb_callback_func *hc) +{ + struct o2hb_callback_func *tmp; + struct list_head *iter; + struct o2hb_callback *hbcall; + int ret; + + BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); + BUG_ON(!list_empty(&hc->hc_item)); + + hbcall = hbcall_from_type(hc->hc_type); + if (IS_ERR(hbcall)) { + ret = PTR_ERR(hbcall); + goto out; + } + + if (region_uuid) { + ret = o2hb_region_get(region_uuid); + if (ret) + goto out; + } + + down_write(&o2hb_callback_sem); + + list_for_each(iter, &hbcall->list) { + tmp = list_entry(iter, struct o2hb_callback_func, hc_item); + if (hc->hc_priority < tmp->hc_priority) { + list_add_tail(&hc->hc_item, iter); + break; + } + } + if (list_empty(&hc->hc_item)) + list_add_tail(&hc->hc_item, &hbcall->list); + + up_write(&o2hb_callback_sem); + ret = 0; +out: + mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n", + ret, __builtin_return_address(0), hc); + return ret; +} +EXPORT_SYMBOL_GPL(o2hb_register_callback); + +void o2hb_unregister_callback(const char *region_uuid, + struct o2hb_callback_func *hc) +{ + BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); + + mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n", + __builtin_return_address(0), hc); + + /* XXX Can this happen _with_ a region reference? */ + if (list_empty(&hc->hc_item)) + return; + + if (region_uuid) + o2hb_region_put(region_uuid); + + down_write(&o2hb_callback_sem); + + list_del_init(&hc->hc_item); + + up_write(&o2hb_callback_sem); +} +EXPORT_SYMBOL_GPL(o2hb_unregister_callback); + +int o2hb_check_node_heartbeating(u8 node_num) +{ + unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + + o2hb_fill_node_map(testing_map, sizeof(testing_map)); + if (!test_bit(node_num, testing_map)) { + mlog(ML_HEARTBEAT, + "node (%u) does not have heartbeating enabled.\n", + node_num); + return 0; + } + + return 1; +} +EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating); + +int o2hb_check_node_heartbeating_from_callback(u8 node_num) +{ + unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + + o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map)); + if (!test_bit(node_num, testing_map)) { + mlog(ML_HEARTBEAT, + "node (%u) does not have heartbeating enabled.\n", + node_num); + return 0; + } + + return 1; +} +EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_from_callback); + +/* Makes sure our local node is configured with a node number, and is + * heartbeating. */ +int o2hb_check_local_node_heartbeating(void) +{ + u8 node_num; + + /* if this node was set then we have networking */ + node_num = o2nm_this_node(); + if (node_num == O2NM_MAX_NODES) { + mlog(ML_HEARTBEAT, "this node has not been configured.\n"); + return 0; + } + + return o2hb_check_node_heartbeating(node_num); +} +EXPORT_SYMBOL_GPL(o2hb_check_local_node_heartbeating); + +/* + * this is just a hack until we get the plumbing which flips file systems + * read only and drops the hb ref instead of killing the node dead. + */ +void o2hb_stop_all_regions(void) +{ + struct o2hb_region *reg; + + mlog(ML_ERROR, "stopping heartbeat on all active regions.\n"); + + spin_lock(&o2hb_live_lock); + + list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) + reg->hr_unclean_stop = 1; + + spin_unlock(&o2hb_live_lock); +} +EXPORT_SYMBOL_GPL(o2hb_stop_all_regions); diff --git a/fs/ocfs2/cluster/heartbeat.h b/fs/ocfs2/cluster/heartbeat.h new file mode 100644 index 0000000..e511339 --- /dev/null +++ b/fs/ocfs2/cluster/heartbeat.h @@ -0,0 +1,84 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * heartbeat.h + * + * Function prototypes + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef O2CLUSTER_HEARTBEAT_H +#define O2CLUSTER_HEARTBEAT_H + +#include "ocfs2_heartbeat.h" + +#define O2HB_REGION_TIMEOUT_MS 2000 + +/* number of changes to be seen as live */ +#define O2HB_LIVE_THRESHOLD 2 +/* number of equal samples to be seen as dead */ +extern unsigned int o2hb_dead_threshold; +#define O2HB_DEFAULT_DEAD_THRESHOLD 31 +/* Otherwise MAX_WRITE_TIMEOUT will be zero... */ +#define O2HB_MIN_DEAD_THRESHOLD 2 +#define O2HB_MAX_WRITE_TIMEOUT_MS (O2HB_REGION_TIMEOUT_MS * (o2hb_dead_threshold - 1)) + +#define O2HB_CB_MAGIC 0x51d1e4ec + +/* callback stuff */ +enum o2hb_callback_type { + O2HB_NODE_DOWN_CB = 0, + O2HB_NODE_UP_CB, + O2HB_NUM_CB +}; + +struct o2nm_node; +typedef void (o2hb_cb_func)(struct o2nm_node *, int, void *); + +struct o2hb_callback_func { + u32 hc_magic; + struct list_head hc_item; + o2hb_cb_func *hc_func; + void *hc_data; + int hc_priority; + enum o2hb_callback_type hc_type; +}; + +struct config_group *o2hb_alloc_hb_set(void); +void o2hb_free_hb_set(struct config_group *group); + +void o2hb_setup_callback(struct o2hb_callback_func *hc, + enum o2hb_callback_type type, + o2hb_cb_func *func, + void *data, + int priority); +int o2hb_register_callback(const char *region_uuid, + struct o2hb_callback_func *hc); +void o2hb_unregister_callback(const char *region_uuid, + struct o2hb_callback_func *hc); +void o2hb_fill_node_map(unsigned long *map, + unsigned bytes); +void o2hb_init(void); +int o2hb_check_node_heartbeating(u8 node_num); +int o2hb_check_node_heartbeating_from_callback(u8 node_num); +int o2hb_check_local_node_heartbeating(void); +void o2hb_stop_all_regions(void); + +#endif /* O2CLUSTER_HEARTBEAT_H */ diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c new file mode 100644 index 0000000..d8a0cb9 --- /dev/null +++ b/fs/ocfs2/cluster/masklog.c @@ -0,0 +1,168 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2004, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/string.h> +#include <asm/uaccess.h> + +#include "masklog.h" + +struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK); +EXPORT_SYMBOL_GPL(mlog_and_bits); +struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(MLOG_INITIAL_NOT_MASK); +EXPORT_SYMBOL_GPL(mlog_not_bits); + +static ssize_t mlog_mask_show(u64 mask, char *buf) +{ + char *state; + + if (__mlog_test_u64(mask, mlog_and_bits)) + state = "allow"; + else if (__mlog_test_u64(mask, mlog_not_bits)) + state = "deny"; + else + state = "off"; + + return snprintf(buf, PAGE_SIZE, "%s\n", state); +} + +static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count) +{ + if (!strnicmp(buf, "allow", 5)) { + __mlog_set_u64(mask, mlog_and_bits); + __mlog_clear_u64(mask, mlog_not_bits); + } else if (!strnicmp(buf, "deny", 4)) { + __mlog_set_u64(mask, mlog_not_bits); + __mlog_clear_u64(mask, mlog_and_bits); + } else if (!strnicmp(buf, "off", 3)) { + __mlog_clear_u64(mask, mlog_not_bits); + __mlog_clear_u64(mask, mlog_and_bits); + } else + return -EINVAL; + + return count; +} + +struct mlog_attribute { + struct attribute attr; + u64 mask; +}; + +#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr) + +#define define_mask(_name) { \ + .attr = { \ + .name = #_name, \ + .mode = S_IRUGO | S_IWUSR, \ + }, \ + .mask = ML_##_name, \ +} + +static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { + define_mask(ENTRY), + define_mask(EXIT), + define_mask(TCP), + define_mask(MSG), + define_mask(SOCKET), + define_mask(HEARTBEAT), + define_mask(HB_BIO), + define_mask(DLMFS), + define_mask(DLM), + define_mask(DLM_DOMAIN), + define_mask(DLM_THREAD), + define_mask(DLM_MASTER), + define_mask(DLM_RECOVERY), + define_mask(AIO), + define_mask(JOURNAL), + define_mask(DISK_ALLOC), + define_mask(SUPER), + define_mask(FILE_IO), + define_mask(EXTENT_MAP), + define_mask(DLM_GLUE), + define_mask(BH_IO), + define_mask(UPTODATE), + define_mask(NAMEI), + define_mask(INODE), + define_mask(VOTE), + define_mask(DCACHE), + define_mask(CONN), + define_mask(QUORUM), + define_mask(EXPORT), + define_mask(XATTR), + define_mask(ERROR), + define_mask(NOTICE), + define_mask(KTHREAD), +}; + +static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, }; + +static ssize_t mlog_show(struct kobject *obj, struct attribute *attr, + char *buf) +{ + struct mlog_attribute *mlog_attr = to_mlog_attr(attr); + + return mlog_mask_show(mlog_attr->mask, buf); +} + +static ssize_t mlog_store(struct kobject *obj, struct attribute *attr, + const char *buf, size_t count) +{ + struct mlog_attribute *mlog_attr = to_mlog_attr(attr); + + return mlog_mask_store(mlog_attr->mask, buf, count); +} + +static struct sysfs_ops mlog_attr_ops = { + .show = mlog_show, + .store = mlog_store, +}; + +static struct kobj_type mlog_ktype = { + .default_attrs = mlog_attr_ptrs, + .sysfs_ops = &mlog_attr_ops, +}; + +static struct kset mlog_kset = { + .kobj = {.ktype = &mlog_ktype}, +}; + +int mlog_sys_init(struct kset *o2cb_kset) +{ + int i = 0; + + while (mlog_attrs[i].attr.mode) { + mlog_attr_ptrs[i] = &mlog_attrs[i].attr; + i++; + } + mlog_attr_ptrs[i] = NULL; + + kobject_set_name(&mlog_kset.kobj, "logmask"); + mlog_kset.kobj.kset = o2cb_kset; + return kset_register(&mlog_kset); +} + +void mlog_sys_shutdown(void) +{ + kset_unregister(&mlog_kset); +} diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h new file mode 100644 index 0000000..57670c6 --- /dev/null +++ b/fs/ocfs2/cluster/masklog.h @@ -0,0 +1,285 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef O2CLUSTER_MASKLOG_H +#define O2CLUSTER_MASKLOG_H + +/* + * For now this is a trivial wrapper around printk() that gives the critical + * ability to enable sets of debugging output at run-time. In the future this + * will almost certainly be redirected to relayfs so that it can pay a + * substantially lower heisenberg tax. + * + * Callers associate the message with a bitmask and a global bitmask is + * maintained with help from /proc. If any of the bits match the message is + * output. + * + * We must have efficient bit tests on i386 and it seems gcc still emits crazy + * code for the 64bit compare. It emits very good code for the dual unsigned + * long tests, though, completely avoiding tests that can never pass if the + * caller gives a constant bitmask that fills one of the longs with all 0s. So + * the desire is to have almost all of the calls decided on by comparing just + * one of the longs. This leads to having infrequently given bits that are + * frequently matched in the high bits. + * + * _ERROR and _NOTICE are used for messages that always go to the console and + * have appropriate KERN_ prefixes. We wrap these in our function instead of + * just calling printk() so that this can eventually make its way through + * relayfs along with the debugging messages. Everything else gets KERN_DEBUG. + * The inline tests and macro dance give GCC the opportunity to quite cleverly + * only emit the appropriage printk() when the caller passes in a constant + * mask, as is almost always the case. + * + * All this bitmask nonsense is hidden from the /proc interface so that Joel + * doesn't have an aneurism. Reading the file gives a straight forward + * indication of which bits are on or off: + * ENTRY off + * EXIT off + * TCP off + * MSG off + * SOCKET off + * ERROR off + * NOTICE on + * + * Writing changes the state of a given bit and requires a strictly formatted + * single write() call: + * + * write(fd, "ENTRY on", 8); + * + * would turn the entry bit on. "1" is also accepted in the place of "on", and + * "off" and "0" behave as expected. + * + * Some trivial shell can flip all the bits on or off: + * + * log_mask="/proc/fs/ocfs2_nodemanager/log_mask" + * cat $log_mask | ( + * while read bit status; do + * # $1 is "on" or "off", say + * echo "$bit $1" > $log_mask + * done + * ) + */ + +/* for task_struct */ +#include <linux/sched.h> + +/* bits that are frequently given and infrequently matched in the low word */ +/* NOTE: If you add a flag, you need to also update mlog.c! */ +#define ML_ENTRY 0x0000000000000001ULL /* func call entry */ +#define ML_EXIT 0x0000000000000002ULL /* func call exit */ +#define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */ +#define ML_MSG 0x0000000000000008ULL /* net network messages */ +#define ML_SOCKET 0x0000000000000010ULL /* net socket lifetime */ +#define ML_HEARTBEAT 0x0000000000000020ULL /* hb all heartbeat tracking */ +#define ML_HB_BIO 0x0000000000000040ULL /* hb io tracing */ +#define ML_DLMFS 0x0000000000000080ULL /* dlm user dlmfs */ +#define ML_DLM 0x0000000000000100ULL /* dlm general debugging */ +#define ML_DLM_DOMAIN 0x0000000000000200ULL /* dlm domain debugging */ +#define ML_DLM_THREAD 0x0000000000000400ULL /* dlm domain thread */ +#define ML_DLM_MASTER 0x0000000000000800ULL /* dlm master functions */ +#define ML_DLM_RECOVERY 0x0000000000001000ULL /* dlm master functions */ +#define ML_AIO 0x0000000000002000ULL /* ocfs2 aio read and write */ +#define ML_JOURNAL 0x0000000000004000ULL /* ocfs2 journalling functions */ +#define ML_DISK_ALLOC 0x0000000000008000ULL /* ocfs2 disk allocation */ +#define ML_SUPER 0x0000000000010000ULL /* ocfs2 mount / umount */ +#define ML_FILE_IO 0x0000000000020000ULL /* ocfs2 file I/O */ +#define ML_EXTENT_MAP 0x0000000000040000ULL /* ocfs2 extent map caching */ +#define ML_DLM_GLUE 0x0000000000080000ULL /* ocfs2 dlm glue layer */ +#define ML_BH_IO 0x0000000000100000ULL /* ocfs2 buffer I/O */ +#define ML_UPTODATE 0x0000000000200000ULL /* ocfs2 caching sequence #'s */ +#define ML_NAMEI 0x0000000000400000ULL /* ocfs2 directory / namespace */ +#define ML_INODE 0x0000000000800000ULL /* ocfs2 inode manipulation */ +#define ML_VOTE 0x0000000001000000ULL /* ocfs2 node messaging */ +#define ML_DCACHE 0x0000000002000000ULL /* ocfs2 dcache operations */ +#define ML_CONN 0x0000000004000000ULL /* net connection management */ +#define ML_QUORUM 0x0000000008000000ULL /* net connection quorum */ +#define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */ +#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ +/* bits that are infrequently given and frequently matched in the high word */ +#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */ +#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */ +#define ML_KTHREAD 0x0000000400000000ULL /* kernel thread activity */ + +#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE) +#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT) +#ifndef MLOG_MASK_PREFIX +#define MLOG_MASK_PREFIX 0 +#endif + +/* + * When logging is disabled, force the bit test to 0 for anything other + * than errors and notices, allowing gcc to remove the code completely. + * When enabled, allow all masks. + */ +#if defined(CONFIG_OCFS2_DEBUG_MASKLOG) +#define ML_ALLOWED_BITS ~0 +#else +#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE) +#endif + +#define MLOG_MAX_BITS 64 + +struct mlog_bits { + unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG]; +}; + +extern struct mlog_bits mlog_and_bits, mlog_not_bits; + +#if BITS_PER_LONG == 32 + +#define __mlog_test_u64(mask, bits) \ + ( (u32)(mask & 0xffffffff) & bits.words[0] || \ + ((u64)(mask) >> 32) & bits.words[1] ) +#define __mlog_set_u64(mask, bits) do { \ + bits.words[0] |= (u32)(mask & 0xffffffff); \ + bits.words[1] |= (u64)(mask) >> 32; \ +} while (0) +#define __mlog_clear_u64(mask, bits) do { \ + bits.words[0] &= ~((u32)(mask & 0xffffffff)); \ + bits.words[1] &= ~((u64)(mask) >> 32); \ +} while (0) +#define MLOG_BITS_RHS(mask) { \ + { \ + [0] = (u32)(mask & 0xffffffff), \ + [1] = (u64)(mask) >> 32, \ + } \ +} + +#else /* 32bit long above, 64bit long below */ + +#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0]) +#define __mlog_set_u64(mask, bits) do { \ + bits.words[0] |= (mask); \ +} while (0) +#define __mlog_clear_u64(mask, bits) do { \ + bits.words[0] &= ~(mask); \ +} while (0) +#define MLOG_BITS_RHS(mask) { { (mask) } } + +#endif + +/* + * smp_processor_id() "helpfully" screams when called outside preemptible + * regions in current kernels. sles doesn't have the variants that don't + * scream. just do this instead of trying to guess which we're building + * against.. *sigh*. + */ +#define __mlog_cpu_guess ({ \ + unsigned long _cpu = get_cpu(); \ + put_cpu(); \ + _cpu; \ +}) + +/* In the following two macros, the whitespace after the ',' just + * before ##args is intentional. Otherwise, gcc 2.95 will eat the + * previous token if args expands to nothing. + */ +#define __mlog_printk(level, fmt, args...) \ + printk(level "(%u,%lu):%s:%d " fmt, task_pid_nr(current), \ + __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \ + ##args) + +#define mlog(mask, fmt, args...) do { \ + u64 __m = MLOG_MASK_PREFIX | (mask); \ + if ((__m & ML_ALLOWED_BITS) && \ + __mlog_test_u64(__m, mlog_and_bits) && \ + !__mlog_test_u64(__m, mlog_not_bits)) { \ + if (__m & ML_ERROR) \ + __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \ + else if (__m & ML_NOTICE) \ + __mlog_printk(KERN_NOTICE, fmt , ##args); \ + else __mlog_printk(KERN_INFO, fmt , ##args); \ + } \ +} while (0) + +#define mlog_errno(st) do { \ + int _st = (st); \ + if (_st != -ERESTARTSYS && _st != -EINTR && \ + _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \ + mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ +} while (0) + +#if defined(CONFIG_OCFS2_DEBUG_MASKLOG) +#define mlog_entry(fmt, args...) do { \ + mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \ +} while (0) + +#define mlog_entry_void() do { \ + mlog(ML_ENTRY, "ENTRY:\n"); \ +} while (0) + +/* + * We disable this for sparse. + */ +#if !defined(__CHECKER__) +#define mlog_exit(st) do { \ + if (__builtin_types_compatible_p(typeof(st), unsigned long)) \ + mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \ + else if (__builtin_types_compatible_p(typeof(st), signed long)) \ + mlog(ML_EXIT, "EXIT: %ld\n", (signed long) (st)); \ + else if (__builtin_types_compatible_p(typeof(st), unsigned int) \ + || __builtin_types_compatible_p(typeof(st), unsigned short) \ + || __builtin_types_compatible_p(typeof(st), unsigned char)) \ + mlog(ML_EXIT, "EXIT: %u\n", (unsigned int) (st)); \ + else if (__builtin_types_compatible_p(typeof(st), signed int) \ + || __builtin_types_compatible_p(typeof(st), signed short) \ + || __builtin_types_compatible_p(typeof(st), signed char)) \ + mlog(ML_EXIT, "EXIT: %d\n", (signed int) (st)); \ + else if (__builtin_types_compatible_p(typeof(st), long long)) \ + mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \ + else \ + mlog(ML_EXIT, "EXIT: %llu\n", (unsigned long long) (st)); \ +} while (0) +#else +#define mlog_exit(st) do { \ + mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \ +} while (0) +#endif + +#define mlog_exit_ptr(ptr) do { \ + mlog(ML_EXIT, "EXIT: %p\n", ptr); \ +} while (0) + +#define mlog_exit_void() do { \ + mlog(ML_EXIT, "EXIT\n"); \ +} while (0) +#else +#define mlog_entry(...) do { } while (0) +#define mlog_entry_void(...) do { } while (0) +#define mlog_exit(...) do { } while (0) +#define mlog_exit_ptr(...) do { } while (0) +#define mlog_exit_void(...) do { } while (0) +#endif /* defined(CONFIG_OCFS2_DEBUG_MASKLOG) */ + +#define mlog_bug_on_msg(cond, fmt, args...) do { \ + if (cond) { \ + mlog(ML_ERROR, "bug expression: " #cond "\n"); \ + mlog(ML_ERROR, fmt, ##args); \ + BUG(); \ + } \ +} while (0) + +#include <linux/kobject.h> +#include <linux/sysfs.h> +int mlog_sys_init(struct kset *o2cb_subsys); +void mlog_sys_shutdown(void); + +#endif /* O2CLUSTER_MASKLOG_H */ diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c new file mode 100644 index 0000000..52276c0 --- /dev/null +++ b/fs/ocfs2/cluster/netdebug.c @@ -0,0 +1,443 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * netdebug.c + * + * debug functionality for o2net + * + * Copyright (C) 2005, 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifdef CONFIG_DEBUG_FS + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/kref.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> + +#include <linux/uaccess.h> + +#include "tcp.h" +#include "nodemanager.h" +#define MLOG_MASK_PREFIX ML_TCP +#include "masklog.h" + +#include "tcp_internal.h" + +#define O2NET_DEBUG_DIR "o2net" +#define SC_DEBUG_NAME "sock_containers" +#define NST_DEBUG_NAME "send_tracking" + +static struct dentry *o2net_dentry; +static struct dentry *sc_dentry; +static struct dentry *nst_dentry; + +static DEFINE_SPINLOCK(o2net_debug_lock); + +static LIST_HEAD(sock_containers); +static LIST_HEAD(send_tracking); + +void o2net_debug_add_nst(struct o2net_send_tracking *nst) +{ + spin_lock(&o2net_debug_lock); + list_add(&nst->st_net_debug_item, &send_tracking); + spin_unlock(&o2net_debug_lock); +} + +void o2net_debug_del_nst(struct o2net_send_tracking *nst) +{ + spin_lock(&o2net_debug_lock); + if (!list_empty(&nst->st_net_debug_item)) + list_del_init(&nst->st_net_debug_item); + spin_unlock(&o2net_debug_lock); +} + +static struct o2net_send_tracking + *next_nst(struct o2net_send_tracking *nst_start) +{ + struct o2net_send_tracking *nst, *ret = NULL; + + assert_spin_locked(&o2net_debug_lock); + + list_for_each_entry(nst, &nst_start->st_net_debug_item, + st_net_debug_item) { + /* discover the head of the list */ + if (&nst->st_net_debug_item == &send_tracking) + break; + + /* use st_task to detect real nsts in the list */ + if (nst->st_task != NULL) { + ret = nst; + break; + } + } + + return ret; +} + +static void *nst_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct o2net_send_tracking *nst, *dummy_nst = seq->private; + + spin_lock(&o2net_debug_lock); + nst = next_nst(dummy_nst); + spin_unlock(&o2net_debug_lock); + + return nst; +} + +static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct o2net_send_tracking *nst, *dummy_nst = seq->private; + + spin_lock(&o2net_debug_lock); + nst = next_nst(dummy_nst); + list_del_init(&dummy_nst->st_net_debug_item); + if (nst) + list_add(&dummy_nst->st_net_debug_item, + &nst->st_net_debug_item); + spin_unlock(&o2net_debug_lock); + + return nst; /* unused, just needs to be null when done */ +} + +static int nst_seq_show(struct seq_file *seq, void *v) +{ + struct o2net_send_tracking *nst, *dummy_nst = seq->private; + + spin_lock(&o2net_debug_lock); + nst = next_nst(dummy_nst); + + if (nst != NULL) { + /* get_task_comm isn't exported. oh well. */ + seq_printf(seq, "%p:\n" + " pid: %lu\n" + " tgid: %lu\n" + " process name: %s\n" + " node: %u\n" + " sc: %p\n" + " message id: %d\n" + " message type: %u\n" + " message key: 0x%08x\n" + " sock acquiry: %lu.%ld\n" + " send start: %lu.%ld\n" + " wait start: %lu.%ld\n", + nst, (unsigned long)nst->st_task->pid, + (unsigned long)nst->st_task->tgid, + nst->st_task->comm, nst->st_node, + nst->st_sc, nst->st_id, nst->st_msg_type, + nst->st_msg_key, + nst->st_sock_time.tv_sec, + (long)nst->st_sock_time.tv_usec, + nst->st_send_time.tv_sec, + (long)nst->st_send_time.tv_usec, + nst->st_status_time.tv_sec, + (long)nst->st_status_time.tv_usec); + } + + spin_unlock(&o2net_debug_lock); + + return 0; +} + +static void nst_seq_stop(struct seq_file *seq, void *v) +{ +} + +static struct seq_operations nst_seq_ops = { + .start = nst_seq_start, + .next = nst_seq_next, + .stop = nst_seq_stop, + .show = nst_seq_show, +}; + +static int nst_fop_open(struct inode *inode, struct file *file) +{ + struct o2net_send_tracking *dummy_nst; + struct seq_file *seq; + int ret; + + dummy_nst = kmalloc(sizeof(struct o2net_send_tracking), GFP_KERNEL); + if (dummy_nst == NULL) { + ret = -ENOMEM; + goto out; + } + dummy_nst->st_task = NULL; + + ret = seq_open(file, &nst_seq_ops); + if (ret) + goto out; + + seq = file->private_data; + seq->private = dummy_nst; + o2net_debug_add_nst(dummy_nst); + + dummy_nst = NULL; + +out: + kfree(dummy_nst); + return ret; +} + +static int nst_fop_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct o2net_send_tracking *dummy_nst = seq->private; + + o2net_debug_del_nst(dummy_nst); + return seq_release_private(inode, file); +} + +static struct file_operations nst_seq_fops = { + .open = nst_fop_open, + .read = seq_read, + .llseek = seq_lseek, + .release = nst_fop_release, +}; + +void o2net_debug_add_sc(struct o2net_sock_container *sc) +{ + spin_lock(&o2net_debug_lock); + list_add(&sc->sc_net_debug_item, &sock_containers); + spin_unlock(&o2net_debug_lock); +} + +void o2net_debug_del_sc(struct o2net_sock_container *sc) +{ + spin_lock(&o2net_debug_lock); + list_del_init(&sc->sc_net_debug_item); + spin_unlock(&o2net_debug_lock); +} + +static struct o2net_sock_container + *next_sc(struct o2net_sock_container *sc_start) +{ + struct o2net_sock_container *sc, *ret = NULL; + + assert_spin_locked(&o2net_debug_lock); + + list_for_each_entry(sc, &sc_start->sc_net_debug_item, + sc_net_debug_item) { + /* discover the head of the list miscast as a sc */ + if (&sc->sc_net_debug_item == &sock_containers) + break; + + /* use sc_page to detect real scs in the list */ + if (sc->sc_page != NULL) { + ret = sc; + break; + } + } + + return ret; +} + +static void *sc_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct o2net_sock_container *sc, *dummy_sc = seq->private; + + spin_lock(&o2net_debug_lock); + sc = next_sc(dummy_sc); + spin_unlock(&o2net_debug_lock); + + return sc; +} + +static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct o2net_sock_container *sc, *dummy_sc = seq->private; + + spin_lock(&o2net_debug_lock); + sc = next_sc(dummy_sc); + list_del_init(&dummy_sc->sc_net_debug_item); + if (sc) + list_add(&dummy_sc->sc_net_debug_item, &sc->sc_net_debug_item); + spin_unlock(&o2net_debug_lock); + + return sc; /* unused, just needs to be null when done */ +} + +#define TV_SEC_USEC(TV) TV.tv_sec, (long)TV.tv_usec + +static int sc_seq_show(struct seq_file *seq, void *v) +{ + struct o2net_sock_container *sc, *dummy_sc = seq->private; + + spin_lock(&o2net_debug_lock); + sc = next_sc(dummy_sc); + + if (sc != NULL) { + struct inet_sock *inet = NULL; + + __be32 saddr = 0, daddr = 0; + __be16 sport = 0, dport = 0; + + if (sc->sc_sock) { + inet = inet_sk(sc->sc_sock->sk); + /* the stack's structs aren't sparse endian clean */ + saddr = (__force __be32)inet->saddr; + daddr = (__force __be32)inet->daddr; + sport = (__force __be16)inet->sport; + dport = (__force __be16)inet->dport; + } + + /* XXX sigh, inet-> doesn't have sparse annotation so any + * use of it here generates a warning with -Wbitwise */ + seq_printf(seq, "%p:\n" + " krefs: %d\n" + " sock: %u.%u.%u.%u:%u -> " + "%u.%u.%u.%u:%u\n" + " remote node: %s\n" + " page off: %zu\n" + " handshake ok: %u\n" + " timer: %lu.%ld\n" + " data ready: %lu.%ld\n" + " advance start: %lu.%ld\n" + " advance stop: %lu.%ld\n" + " func start: %lu.%ld\n" + " func stop: %lu.%ld\n" + " func key: %u\n" + " func type: %u\n", + sc, + atomic_read(&sc->sc_kref.refcount), + NIPQUAD(saddr), inet ? ntohs(sport) : 0, + NIPQUAD(daddr), inet ? ntohs(dport) : 0, + sc->sc_node->nd_name, + sc->sc_page_off, + sc->sc_handshake_ok, + TV_SEC_USEC(sc->sc_tv_timer), + TV_SEC_USEC(sc->sc_tv_data_ready), + TV_SEC_USEC(sc->sc_tv_advance_start), + TV_SEC_USEC(sc->sc_tv_advance_stop), + TV_SEC_USEC(sc->sc_tv_func_start), + TV_SEC_USEC(sc->sc_tv_func_stop), + sc->sc_msg_key, + sc->sc_msg_type); + } + + + spin_unlock(&o2net_debug_lock); + + return 0; +} + +static void sc_seq_stop(struct seq_file *seq, void *v) +{ +} + +static struct seq_operations sc_seq_ops = { + .start = sc_seq_start, + .next = sc_seq_next, + .stop = sc_seq_stop, + .show = sc_seq_show, +}; + +static int sc_fop_open(struct inode *inode, struct file *file) +{ + struct o2net_sock_container *dummy_sc; + struct seq_file *seq; + int ret; + + dummy_sc = kmalloc(sizeof(struct o2net_sock_container), GFP_KERNEL); + if (dummy_sc == NULL) { + ret = -ENOMEM; + goto out; + } + dummy_sc->sc_page = NULL; + + ret = seq_open(file, &sc_seq_ops); + if (ret) + goto out; + + seq = file->private_data; + seq->private = dummy_sc; + o2net_debug_add_sc(dummy_sc); + + dummy_sc = NULL; + +out: + kfree(dummy_sc); + return ret; +} + +static int sc_fop_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct o2net_sock_container *dummy_sc = seq->private; + + o2net_debug_del_sc(dummy_sc); + return seq_release_private(inode, file); +} + +static struct file_operations sc_seq_fops = { + .open = sc_fop_open, + .read = seq_read, + .llseek = seq_lseek, + .release = sc_fop_release, +}; + +int o2net_debugfs_init(void) +{ + o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); + if (!o2net_dentry) { + mlog_errno(-ENOMEM); + goto bail; + } + + nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR, + o2net_dentry, NULL, + &nst_seq_fops); + if (!nst_dentry) { + mlog_errno(-ENOMEM); + goto bail; + } + + sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR, + o2net_dentry, NULL, + &sc_seq_fops); + if (!sc_dentry) { + mlog_errno(-ENOMEM); + goto bail; + } + + return 0; +bail: + if (sc_dentry) + debugfs_remove(sc_dentry); + if (nst_dentry) + debugfs_remove(nst_dentry); + if (o2net_dentry) + debugfs_remove(o2net_dentry); + return -ENOMEM; +} + +void o2net_debugfs_exit(void) +{ + if (sc_dentry) + debugfs_remove(sc_dentry); + if (nst_dentry) + debugfs_remove(nst_dentry); + if (o2net_dentry) + debugfs_remove(o2net_dentry); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c new file mode 100644 index 0000000..816a3f6 --- /dev/null +++ b/fs/ocfs2/cluster/nodemanager.c @@ -0,0 +1,927 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2004, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/configfs.h> + +#include "tcp.h" +#include "nodemanager.h" +#include "heartbeat.h" +#include "masklog.h" +#include "sys.h" +#include "ver.h" + +/* for now we operate under the assertion that there can be only one + * cluster active at a time. Changing this will require trickling + * cluster references throughout where nodes are looked up */ +struct o2nm_cluster *o2nm_single_cluster = NULL; + + +struct o2nm_node *o2nm_get_node_by_num(u8 node_num) +{ + struct o2nm_node *node = NULL; + + if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) + goto out; + + read_lock(&o2nm_single_cluster->cl_nodes_lock); + node = o2nm_single_cluster->cl_nodes[node_num]; + if (node) + config_item_get(&node->nd_item); + read_unlock(&o2nm_single_cluster->cl_nodes_lock); +out: + return node; +} +EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); + +int o2nm_configured_node_map(unsigned long *map, unsigned bytes) +{ + struct o2nm_cluster *cluster = o2nm_single_cluster; + + BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); + + if (cluster == NULL) + return -EINVAL; + + read_lock(&cluster->cl_nodes_lock); + memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); + read_unlock(&cluster->cl_nodes_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(o2nm_configured_node_map); + +static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, + __be32 ip_needle, + struct rb_node ***ret_p, + struct rb_node **ret_parent) +{ + struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; + struct rb_node *parent = NULL; + struct o2nm_node *node, *ret = NULL; + + while (*p) { + int cmp; + + parent = *p; + node = rb_entry(parent, struct o2nm_node, nd_ip_node); + + cmp = memcmp(&ip_needle, &node->nd_ipv4_address, + sizeof(ip_needle)); + if (cmp < 0) + p = &(*p)->rb_left; + else if (cmp > 0) + p = &(*p)->rb_right; + else { + ret = node; + break; + } + } + + if (ret_p != NULL) + *ret_p = p; + if (ret_parent != NULL) + *ret_parent = parent; + + return ret; +} + +struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) +{ + struct o2nm_node *node = NULL; + struct o2nm_cluster *cluster = o2nm_single_cluster; + + if (cluster == NULL) + goto out; + + read_lock(&cluster->cl_nodes_lock); + node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); + if (node) + config_item_get(&node->nd_item); + read_unlock(&cluster->cl_nodes_lock); + +out: + return node; +} +EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); + +void o2nm_node_put(struct o2nm_node *node) +{ + config_item_put(&node->nd_item); +} +EXPORT_SYMBOL_GPL(o2nm_node_put); + +void o2nm_node_get(struct o2nm_node *node) +{ + config_item_get(&node->nd_item); +} +EXPORT_SYMBOL_GPL(o2nm_node_get); + +u8 o2nm_this_node(void) +{ + u8 node_num = O2NM_MAX_NODES; + + if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) + node_num = o2nm_single_cluster->cl_local_node; + + return node_num; +} +EXPORT_SYMBOL_GPL(o2nm_this_node); + +/* node configfs bits */ + +static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) +{ + return item ? + container_of(to_config_group(item), struct o2nm_cluster, + cl_group) + : NULL; +} + +static struct o2nm_node *to_o2nm_node(struct config_item *item) +{ + return item ? container_of(item, struct o2nm_node, nd_item) : NULL; +} + +static void o2nm_node_release(struct config_item *item) +{ + struct o2nm_node *node = to_o2nm_node(item); + kfree(node); +} + +static ssize_t o2nm_node_num_read(struct o2nm_node *node, char *page) +{ + return sprintf(page, "%d\n", node->nd_num); +} + +static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) +{ + /* through the first node_set .parent + * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ + return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); +} + +enum { + O2NM_NODE_ATTR_NUM = 0, + O2NM_NODE_ATTR_PORT, + O2NM_NODE_ATTR_ADDRESS, + O2NM_NODE_ATTR_LOCAL, +}; + +static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page, + size_t count) +{ + struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); + unsigned long tmp; + char *p = (char *)page; + + tmp = simple_strtoul(p, &p, 0); + if (!p || (*p && (*p != '\n'))) + return -EINVAL; + + if (tmp >= O2NM_MAX_NODES) + return -ERANGE; + + /* once we're in the cl_nodes tree networking can look us up by + * node number and try to use our address and port attributes + * to connect to this node.. make sure that they've been set + * before writing the node attribute? */ + if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || + !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) + return -EINVAL; /* XXX */ + + write_lock(&cluster->cl_nodes_lock); + if (cluster->cl_nodes[tmp]) + p = NULL; + else { + cluster->cl_nodes[tmp] = node; + node->nd_num = tmp; + set_bit(tmp, cluster->cl_nodes_bitmap); + } + write_unlock(&cluster->cl_nodes_lock); + if (p == NULL) + return -EEXIST; + + return count; +} +static ssize_t o2nm_node_ipv4_port_read(struct o2nm_node *node, char *page) +{ + return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port)); +} + +static ssize_t o2nm_node_ipv4_port_write(struct o2nm_node *node, + const char *page, size_t count) +{ + unsigned long tmp; + char *p = (char *)page; + + tmp = simple_strtoul(p, &p, 0); + if (!p || (*p && (*p != '\n'))) + return -EINVAL; + + if (tmp == 0) + return -EINVAL; + if (tmp >= (u16)-1) + return -ERANGE; + + node->nd_ipv4_port = htons(tmp); + + return count; +} + +static ssize_t o2nm_node_ipv4_address_read(struct o2nm_node *node, char *page) +{ + return sprintf(page, "%u.%u.%u.%u\n", NIPQUAD(node->nd_ipv4_address)); +} + +static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node, + const char *page, + size_t count) +{ + struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); + int ret, i; + struct rb_node **p, *parent; + unsigned int octets[4]; + __be32 ipv4_addr = 0; + + ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], + &octets[1], &octets[0]); + if (ret != 4) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(octets); i++) { + if (octets[i] > 255) + return -ERANGE; + be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); + } + + ret = 0; + write_lock(&cluster->cl_nodes_lock); + if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) + ret = -EEXIST; + else { + rb_link_node(&node->nd_ip_node, parent, p); + rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); + } + write_unlock(&cluster->cl_nodes_lock); + if (ret) + return ret; + + memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); + + return count; +} + +static ssize_t o2nm_node_local_read(struct o2nm_node *node, char *page) +{ + return sprintf(page, "%d\n", node->nd_local); +} + +static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page, + size_t count) +{ + struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node); + unsigned long tmp; + char *p = (char *)page; + ssize_t ret; + + tmp = simple_strtoul(p, &p, 0); + if (!p || (*p && (*p != '\n'))) + return -EINVAL; + + tmp = !!tmp; /* boolean of whether this node wants to be local */ + + /* setting local turns on networking rx for now so we require having + * set everything else first */ + if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || + !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || + !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) + return -EINVAL; /* XXX */ + + /* the only failure case is trying to set a new local node + * when a different one is already set */ + if (tmp && tmp == cluster->cl_has_local && + cluster->cl_local_node != node->nd_num) + return -EBUSY; + + /* bring up the rx thread if we're setting the new local node. */ + if (tmp && !cluster->cl_has_local) { + ret = o2net_start_listening(node); + if (ret) + return ret; + } + + if (!tmp && cluster->cl_has_local && + cluster->cl_local_node == node->nd_num) { + o2net_stop_listening(node); + cluster->cl_local_node = O2NM_INVALID_NODE_NUM; + } + + node->nd_local = tmp; + if (node->nd_local) { + cluster->cl_has_local = tmp; + cluster->cl_local_node = node->nd_num; + } + + return count; +} + +struct o2nm_node_attribute { + struct configfs_attribute attr; + ssize_t (*show)(struct o2nm_node *, char *); + ssize_t (*store)(struct o2nm_node *, const char *, size_t); +}; + +static struct o2nm_node_attribute o2nm_node_attr_num = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "num", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2nm_node_num_read, + .store = o2nm_node_num_write, +}; + +static struct o2nm_node_attribute o2nm_node_attr_ipv4_port = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "ipv4_port", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2nm_node_ipv4_port_read, + .store = o2nm_node_ipv4_port_write, +}; + +static struct o2nm_node_attribute o2nm_node_attr_ipv4_address = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "ipv4_address", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2nm_node_ipv4_address_read, + .store = o2nm_node_ipv4_address_write, +}; + +static struct o2nm_node_attribute o2nm_node_attr_local = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "local", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2nm_node_local_read, + .store = o2nm_node_local_write, +}; + +static struct configfs_attribute *o2nm_node_attrs[] = { + [O2NM_NODE_ATTR_NUM] = &o2nm_node_attr_num.attr, + [O2NM_NODE_ATTR_PORT] = &o2nm_node_attr_ipv4_port.attr, + [O2NM_NODE_ATTR_ADDRESS] = &o2nm_node_attr_ipv4_address.attr, + [O2NM_NODE_ATTR_LOCAL] = &o2nm_node_attr_local.attr, + NULL, +}; + +static int o2nm_attr_index(struct configfs_attribute *attr) +{ + int i; + for (i = 0; i < ARRAY_SIZE(o2nm_node_attrs); i++) { + if (attr == o2nm_node_attrs[i]) + return i; + } + BUG(); + return 0; +} + +static ssize_t o2nm_node_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct o2nm_node *node = to_o2nm_node(item); + struct o2nm_node_attribute *o2nm_node_attr = + container_of(attr, struct o2nm_node_attribute, attr); + ssize_t ret = 0; + + if (o2nm_node_attr->show) + ret = o2nm_node_attr->show(node, page); + return ret; +} + +static ssize_t o2nm_node_store(struct config_item *item, + struct configfs_attribute *attr, + const char *page, size_t count) +{ + struct o2nm_node *node = to_o2nm_node(item); + struct o2nm_node_attribute *o2nm_node_attr = + container_of(attr, struct o2nm_node_attribute, attr); + ssize_t ret; + int attr_index = o2nm_attr_index(attr); + + if (o2nm_node_attr->store == NULL) { + ret = -EINVAL; + goto out; + } + + if (test_bit(attr_index, &node->nd_set_attributes)) + return -EBUSY; + + ret = o2nm_node_attr->store(node, page, count); + if (ret < count) + goto out; + + set_bit(attr_index, &node->nd_set_attributes); +out: + return ret; +} + +static struct configfs_item_operations o2nm_node_item_ops = { + .release = o2nm_node_release, + .show_attribute = o2nm_node_show, + .store_attribute = o2nm_node_store, +}; + +static struct config_item_type o2nm_node_type = { + .ct_item_ops = &o2nm_node_item_ops, + .ct_attrs = o2nm_node_attrs, + .ct_owner = THIS_MODULE, +}; + +/* node set */ + +struct o2nm_node_group { + struct config_group ns_group; + /* some stuff? */ +}; + +#if 0 +static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) +{ + return group ? + container_of(group, struct o2nm_node_group, ns_group) + : NULL; +} +#endif + +struct o2nm_cluster_attribute { + struct configfs_attribute attr; + ssize_t (*show)(struct o2nm_cluster *, char *); + ssize_t (*store)(struct o2nm_cluster *, const char *, size_t); +}; + +static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, + unsigned int *val) +{ + unsigned long tmp; + char *p = (char *)page; + + tmp = simple_strtoul(p, &p, 0); + if (!p || (*p && (*p != '\n'))) + return -EINVAL; + + if (tmp == 0) + return -EINVAL; + if (tmp >= (u32)-1) + return -ERANGE; + + *val = tmp; + + return count; +} + +static ssize_t o2nm_cluster_attr_idle_timeout_ms_read( + struct o2nm_cluster *cluster, char *page) +{ + return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms); +} + +static ssize_t o2nm_cluster_attr_idle_timeout_ms_write( + struct o2nm_cluster *cluster, const char *page, size_t count) +{ + ssize_t ret; + unsigned int val; + + ret = o2nm_cluster_attr_write(page, count, &val); + + if (ret > 0) { + if (cluster->cl_idle_timeout_ms != val + && o2net_num_connected_peers()) { + mlog(ML_NOTICE, + "o2net: cannot change idle timeout after " + "the first peer has agreed to it." + " %d connected peers\n", + o2net_num_connected_peers()); + ret = -EINVAL; + } else if (val <= cluster->cl_keepalive_delay_ms) { + mlog(ML_NOTICE, "o2net: idle timeout must be larger " + "than keepalive delay\n"); + ret = -EINVAL; + } else { + cluster->cl_idle_timeout_ms = val; + } + } + + return ret; +} + +static ssize_t o2nm_cluster_attr_keepalive_delay_ms_read( + struct o2nm_cluster *cluster, char *page) +{ + return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms); +} + +static ssize_t o2nm_cluster_attr_keepalive_delay_ms_write( + struct o2nm_cluster *cluster, const char *page, size_t count) +{ + ssize_t ret; + unsigned int val; + + ret = o2nm_cluster_attr_write(page, count, &val); + + if (ret > 0) { + if (cluster->cl_keepalive_delay_ms != val + && o2net_num_connected_peers()) { + mlog(ML_NOTICE, + "o2net: cannot change keepalive delay after" + " the first peer has agreed to it." + " %d connected peers\n", + o2net_num_connected_peers()); + ret = -EINVAL; + } else if (val >= cluster->cl_idle_timeout_ms) { + mlog(ML_NOTICE, "o2net: keepalive delay must be " + "smaller than idle timeout\n"); + ret = -EINVAL; + } else { + cluster->cl_keepalive_delay_ms = val; + } + } + + return ret; +} + +static ssize_t o2nm_cluster_attr_reconnect_delay_ms_read( + struct o2nm_cluster *cluster, char *page) +{ + return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms); +} + +static ssize_t o2nm_cluster_attr_reconnect_delay_ms_write( + struct o2nm_cluster *cluster, const char *page, size_t count) +{ + return o2nm_cluster_attr_write(page, count, + &cluster->cl_reconnect_delay_ms); +} +static struct o2nm_cluster_attribute o2nm_cluster_attr_idle_timeout_ms = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "idle_timeout_ms", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2nm_cluster_attr_idle_timeout_ms_read, + .store = o2nm_cluster_attr_idle_timeout_ms_write, +}; + +static struct o2nm_cluster_attribute o2nm_cluster_attr_keepalive_delay_ms = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "keepalive_delay_ms", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2nm_cluster_attr_keepalive_delay_ms_read, + .store = o2nm_cluster_attr_keepalive_delay_ms_write, +}; + +static struct o2nm_cluster_attribute o2nm_cluster_attr_reconnect_delay_ms = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "reconnect_delay_ms", + .ca_mode = S_IRUGO | S_IWUSR }, + .show = o2nm_cluster_attr_reconnect_delay_ms_read, + .store = o2nm_cluster_attr_reconnect_delay_ms_write, +}; + +static struct configfs_attribute *o2nm_cluster_attrs[] = { + &o2nm_cluster_attr_idle_timeout_ms.attr, + &o2nm_cluster_attr_keepalive_delay_ms.attr, + &o2nm_cluster_attr_reconnect_delay_ms.attr, + NULL, +}; +static ssize_t o2nm_cluster_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct o2nm_cluster *cluster = to_o2nm_cluster(item); + struct o2nm_cluster_attribute *o2nm_cluster_attr = + container_of(attr, struct o2nm_cluster_attribute, attr); + ssize_t ret = 0; + + if (o2nm_cluster_attr->show) + ret = o2nm_cluster_attr->show(cluster, page); + return ret; +} + +static ssize_t o2nm_cluster_store(struct config_item *item, + struct configfs_attribute *attr, + const char *page, size_t count) +{ + struct o2nm_cluster *cluster = to_o2nm_cluster(item); + struct o2nm_cluster_attribute *o2nm_cluster_attr = + container_of(attr, struct o2nm_cluster_attribute, attr); + ssize_t ret; + + if (o2nm_cluster_attr->store == NULL) { + ret = -EINVAL; + goto out; + } + + ret = o2nm_cluster_attr->store(cluster, page, count); + if (ret < count) + goto out; +out: + return ret; +} + +static struct config_item *o2nm_node_group_make_item(struct config_group *group, + const char *name) +{ + struct o2nm_node *node = NULL; + + if (strlen(name) > O2NM_MAX_NAME_LEN) + return ERR_PTR(-ENAMETOOLONG); + + node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); + if (node == NULL) + return ERR_PTR(-ENOMEM); + + strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ + config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); + spin_lock_init(&node->nd_lock); + + return &node->nd_item; +} + +static void o2nm_node_group_drop_item(struct config_group *group, + struct config_item *item) +{ + struct o2nm_node *node = to_o2nm_node(item); + struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); + + o2net_disconnect_node(node); + + if (cluster->cl_has_local && + (cluster->cl_local_node == node->nd_num)) { + cluster->cl_has_local = 0; + cluster->cl_local_node = O2NM_INVALID_NODE_NUM; + o2net_stop_listening(node); + } + + /* XXX call into net to stop this node from trading messages */ + + write_lock(&cluster->cl_nodes_lock); + + /* XXX sloppy */ + if (node->nd_ipv4_address) + rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); + + /* nd_num might be 0 if the node number hasn't been set.. */ + if (cluster->cl_nodes[node->nd_num] == node) { + cluster->cl_nodes[node->nd_num] = NULL; + clear_bit(node->nd_num, cluster->cl_nodes_bitmap); + } + write_unlock(&cluster->cl_nodes_lock); + + config_item_put(item); +} + +static struct configfs_group_operations o2nm_node_group_group_ops = { + .make_item = o2nm_node_group_make_item, + .drop_item = o2nm_node_group_drop_item, +}; + +static struct config_item_type o2nm_node_group_type = { + .ct_group_ops = &o2nm_node_group_group_ops, + .ct_owner = THIS_MODULE, +}; + +/* cluster */ + +static void o2nm_cluster_release(struct config_item *item) +{ + struct o2nm_cluster *cluster = to_o2nm_cluster(item); + + kfree(cluster->cl_group.default_groups); + kfree(cluster); +} + +static struct configfs_item_operations o2nm_cluster_item_ops = { + .release = o2nm_cluster_release, + .show_attribute = o2nm_cluster_show, + .store_attribute = o2nm_cluster_store, +}; + +static struct config_item_type o2nm_cluster_type = { + .ct_item_ops = &o2nm_cluster_item_ops, + .ct_attrs = o2nm_cluster_attrs, + .ct_owner = THIS_MODULE, +}; + +/* cluster set */ + +struct o2nm_cluster_group { + struct configfs_subsystem cs_subsys; + /* some stuff? */ +}; + +#if 0 +static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) +{ + return group ? + container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) + : NULL; +} +#endif + +static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, + const char *name) +{ + struct o2nm_cluster *cluster = NULL; + struct o2nm_node_group *ns = NULL; + struct config_group *o2hb_group = NULL, *ret = NULL; + void *defs = NULL; + + /* this runs under the parent dir's i_mutex; there can be only + * one caller in here at a time */ + if (o2nm_single_cluster) + return ERR_PTR(-ENOSPC); + + cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); + ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); + defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL); + o2hb_group = o2hb_alloc_hb_set(); + if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL) + goto out; + + config_group_init_type_name(&cluster->cl_group, name, + &o2nm_cluster_type); + config_group_init_type_name(&ns->ns_group, "node", + &o2nm_node_group_type); + + cluster->cl_group.default_groups = defs; + cluster->cl_group.default_groups[0] = &ns->ns_group; + cluster->cl_group.default_groups[1] = o2hb_group; + cluster->cl_group.default_groups[2] = NULL; + rwlock_init(&cluster->cl_nodes_lock); + cluster->cl_node_ip_tree = RB_ROOT; + cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; + cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; + cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; + + ret = &cluster->cl_group; + o2nm_single_cluster = cluster; + +out: + if (ret == NULL) { + kfree(cluster); + kfree(ns); + o2hb_free_hb_set(o2hb_group); + kfree(defs); + ret = ERR_PTR(-ENOMEM); + } + + return ret; +} + +static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) +{ + struct o2nm_cluster *cluster = to_o2nm_cluster(item); + int i; + struct config_item *killme; + + BUG_ON(o2nm_single_cluster != cluster); + o2nm_single_cluster = NULL; + + for (i = 0; cluster->cl_group.default_groups[i]; i++) { + killme = &cluster->cl_group.default_groups[i]->cg_item; + cluster->cl_group.default_groups[i] = NULL; + config_item_put(killme); + } + + config_item_put(item); +} + +static struct configfs_group_operations o2nm_cluster_group_group_ops = { + .make_group = o2nm_cluster_group_make_group, + .drop_item = o2nm_cluster_group_drop_item, +}; + +static struct config_item_type o2nm_cluster_group_type = { + .ct_group_ops = &o2nm_cluster_group_group_ops, + .ct_owner = THIS_MODULE, +}; + +static struct o2nm_cluster_group o2nm_cluster_group = { + .cs_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "cluster", + .ci_type = &o2nm_cluster_group_type, + }, + }, + }, +}; + +int o2nm_depend_item(struct config_item *item) +{ + return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); +} + +void o2nm_undepend_item(struct config_item *item) +{ + configfs_undepend_item(&o2nm_cluster_group.cs_subsys, item); +} + +int o2nm_depend_this_node(void) +{ + int ret = 0; + struct o2nm_node *local_node; + + local_node = o2nm_get_node_by_num(o2nm_this_node()); + if (!local_node) { + ret = -EINVAL; + goto out; + } + + ret = o2nm_depend_item(&local_node->nd_item); + o2nm_node_put(local_node); + +out: + return ret; +} + +void o2nm_undepend_this_node(void) +{ + struct o2nm_node *local_node; + + local_node = o2nm_get_node_by_num(o2nm_this_node()); + BUG_ON(!local_node); + + o2nm_undepend_item(&local_node->nd_item); + o2nm_node_put(local_node); +} + + +static void __exit exit_o2nm(void) +{ + /* XXX sync with hb callbacks and shut down hb? */ + o2net_unregister_hb_callbacks(); + configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); + o2cb_sys_shutdown(); + + o2net_exit(); +} + +static int __init init_o2nm(void) +{ + int ret = -1; + + cluster_print_version(); + + o2hb_init(); + + ret = o2net_init(); + if (ret) + goto out; + + ret = o2net_register_hb_callbacks(); + if (ret) + goto out_o2net; + + config_group_init(&o2nm_cluster_group.cs_subsys.su_group); + mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); + ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); + if (ret) { + printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); + goto out_callbacks; + } + + ret = o2cb_sys_init(); + if (!ret) + goto out; + + configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); +out_callbacks: + o2net_unregister_hb_callbacks(); +out_o2net: + o2net_exit(); +out: + return ret; +} + +MODULE_AUTHOR("Oracle"); +MODULE_LICENSE("GPL"); + +module_init(init_o2nm) +module_exit(exit_o2nm) diff --git a/fs/ocfs2/cluster/nodemanager.h b/fs/ocfs2/cluster/nodemanager.h new file mode 100644 index 0000000..c992ea0 --- /dev/null +++ b/fs/ocfs2/cluster/nodemanager.h @@ -0,0 +1,81 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * nodemanager.h + * + * Function prototypes + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef O2CLUSTER_NODEMANAGER_H +#define O2CLUSTER_NODEMANAGER_H + +#include "ocfs2_nodemanager.h" + +/* This totally doesn't belong here. */ +#include <linux/configfs.h> +#include <linux/rbtree.h> + +struct o2nm_node { + spinlock_t nd_lock; + struct config_item nd_item; + char nd_name[O2NM_MAX_NAME_LEN+1]; /* replace? */ + __u8 nd_num; + /* only one address per node, as attributes, for now. */ + __be32 nd_ipv4_address; + __be16 nd_ipv4_port; + struct rb_node nd_ip_node; + /* there can be only one local node for now */ + int nd_local; + + unsigned long nd_set_attributes; +}; + +struct o2nm_cluster { + struct config_group cl_group; + unsigned cl_has_local:1; + u8 cl_local_node; + rwlock_t cl_nodes_lock; + struct o2nm_node *cl_nodes[O2NM_MAX_NODES]; + struct rb_root cl_node_ip_tree; + unsigned int cl_idle_timeout_ms; + unsigned int cl_keepalive_delay_ms; + unsigned int cl_reconnect_delay_ms; + + /* this bitmap is part of a hack for disk bitmap.. will go eventually. - zab */ + unsigned long cl_nodes_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; +}; + +extern struct o2nm_cluster *o2nm_single_cluster; + +u8 o2nm_this_node(void); + +int o2nm_configured_node_map(unsigned long *map, unsigned bytes); +struct o2nm_node *o2nm_get_node_by_num(u8 node_num); +struct o2nm_node *o2nm_get_node_by_ip(__be32 addr); +void o2nm_node_get(struct o2nm_node *node); +void o2nm_node_put(struct o2nm_node *node); + +int o2nm_depend_item(struct config_item *item); +void o2nm_undepend_item(struct config_item *item); +int o2nm_depend_this_node(void); +void o2nm_undepend_this_node(void); + +#endif /* O2CLUSTER_NODEMANAGER_H */ diff --git a/fs/ocfs2/cluster/ocfs2_heartbeat.h b/fs/ocfs2/cluster/ocfs2_heartbeat.h new file mode 100644 index 0000000..3f4151d --- /dev/null +++ b/fs/ocfs2/cluster/ocfs2_heartbeat.h @@ -0,0 +1,38 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs2_heartbeat.h + * + * On-disk structures for ocfs2_heartbeat + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef _OCFS2_HEARTBEAT_H +#define _OCFS2_HEARTBEAT_H + +struct o2hb_disk_heartbeat_block { + __le64 hb_seq; + __u8 hb_node; + __u8 hb_pad1[3]; + __le32 hb_cksum; + __le64 hb_generation; + __le32 hb_dead_ms; +}; + +#endif /* _OCFS2_HEARTBEAT_H */ diff --git a/fs/ocfs2/cluster/ocfs2_nodemanager.h b/fs/ocfs2/cluster/ocfs2_nodemanager.h new file mode 100644 index 0000000..5b9854b --- /dev/null +++ b/fs/ocfs2/cluster/ocfs2_nodemanager.h @@ -0,0 +1,39 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs2_nodemanager.h + * + * Header describing the interface between userspace and the kernel + * for the ocfs2_nodemanager module. + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef _OCFS2_NODEMANAGER_H +#define _OCFS2_NODEMANAGER_H + +#define O2NM_API_VERSION 5 + +#define O2NM_MAX_NODES 255 +#define O2NM_INVALID_NODE_NUM 255 + +/* host name, group name, cluster name all 64 bytes */ +#define O2NM_MAX_NAME_LEN 64 // __NEW_UTS_LEN + +#endif /* _OCFS2_NODEMANAGER_H */ diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c new file mode 100644 index 0000000..bbacf7d --- /dev/null +++ b/fs/ocfs2/cluster/quorum.c @@ -0,0 +1,318 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +/* This quorum hack is only here until we transition to some more rational + * approach that is driven from userspace. Honest. No foolin'. + * + * Imagine two nodes lose network connectivity to each other but they're still + * up and operating in every other way. Presumably a network timeout indicates + * that a node is broken and should be recovered. They can't both recover each + * other and both carry on without serialising their access to the file system. + * They need to decide who is authoritative. Now extend that problem to + * arbitrary groups of nodes losing connectivity between each other. + * + * So we declare that a node which has given up on connecting to a majority + * of nodes who are still heartbeating will fence itself. + * + * There are huge opportunities for races here. After we give up on a node's + * connection we need to wait long enough to give heartbeat an opportunity + * to declare the node as truly dead. We also need to be careful with the + * race between when we see a node start heartbeating and when we connect + * to it. + * + * So nodes that are in this transtion put a hold on the quorum decision + * with a counter. As they fall out of this transition they drop the count + * and if they're the last, they fire off the decision. + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/reboot.h> + +#include "heartbeat.h" +#include "nodemanager.h" +#define MLOG_MASK_PREFIX ML_QUORUM +#include "masklog.h" +#include "quorum.h" + +static struct o2quo_state { + spinlock_t qs_lock; + struct work_struct qs_work; + int qs_pending; + int qs_heartbeating; + unsigned long qs_hb_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; + int qs_connected; + unsigned long qs_conn_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; + int qs_holds; + unsigned long qs_hold_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; +} o2quo_state; + +/* this is horribly heavy-handed. It should instead flip the file + * system RO and call some userspace script. */ +static void o2quo_fence_self(void) +{ + /* panic spins with interrupts enabled. with preempt + * threads can still schedule, etc, etc */ + o2hb_stop_all_regions(); + + printk("ocfs2 is very sorry to be fencing this system by restarting\n"); + emergency_restart(); +} + +/* Indicate that a timeout occured on a hearbeat region write. The + * other nodes in the cluster may consider us dead at that time so we + * want to "fence" ourselves so that we don't scribble on the disk + * after they think they've recovered us. This can't solve all + * problems related to writeout after recovery but this hack can at + * least close some of those gaps. When we have real fencing, this can + * go away as our node would be fenced externally before other nodes + * begin recovery. */ +void o2quo_disk_timeout(void) +{ + o2quo_fence_self(); +} + +static void o2quo_make_decision(struct work_struct *work) +{ + int quorum; + int lowest_hb, lowest_reachable = 0, fence = 0; + struct o2quo_state *qs = &o2quo_state; + + spin_lock(&qs->qs_lock); + + lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES); + if (lowest_hb != O2NM_MAX_NODES) + lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm); + + mlog(0, "heartbeating: %d, connected: %d, " + "lowest: %d (%sreachable)\n", qs->qs_heartbeating, + qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un"); + + if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) || + qs->qs_heartbeating == 1) + goto out; + + if (qs->qs_heartbeating & 1) { + /* the odd numbered cluster case is straight forward -- + * if we can't talk to the majority we're hosed */ + quorum = (qs->qs_heartbeating + 1)/2; + if (qs->qs_connected < quorum) { + mlog(ML_ERROR, "fencing this node because it is " + "only connected to %u nodes and %u is needed " + "to make a quorum out of %u heartbeating nodes\n", + qs->qs_connected, quorum, + qs->qs_heartbeating); + fence = 1; + } + } else { + /* the even numbered cluster adds the possibility of each half + * of the cluster being able to talk amongst themselves.. in + * that case we're hosed if we can't talk to the group that has + * the lowest numbered node */ + quorum = qs->qs_heartbeating / 2; + if (qs->qs_connected < quorum) { + mlog(ML_ERROR, "fencing this node because it is " + "only connected to %u nodes and %u is needed " + "to make a quorum out of %u heartbeating nodes\n", + qs->qs_connected, quorum, + qs->qs_heartbeating); + fence = 1; + } + else if ((qs->qs_connected == quorum) && + !lowest_reachable) { + mlog(ML_ERROR, "fencing this node because it is " + "connected to a half-quorum of %u out of %u " + "nodes which doesn't include the lowest active " + "node %u\n", quorum, qs->qs_heartbeating, + lowest_hb); + fence = 1; + } + } + +out: + spin_unlock(&qs->qs_lock); + if (fence) + o2quo_fence_self(); +} + +static void o2quo_set_hold(struct o2quo_state *qs, u8 node) +{ + assert_spin_locked(&qs->qs_lock); + + if (!test_and_set_bit(node, qs->qs_hold_bm)) { + qs->qs_holds++; + mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES, + "node %u\n", node); + mlog(0, "node %u, %d total\n", node, qs->qs_holds); + } +} + +static void o2quo_clear_hold(struct o2quo_state *qs, u8 node) +{ + assert_spin_locked(&qs->qs_lock); + + if (test_and_clear_bit(node, qs->qs_hold_bm)) { + mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1); + if (--qs->qs_holds == 0) { + if (qs->qs_pending) { + qs->qs_pending = 0; + schedule_work(&qs->qs_work); + } + } + mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n", + node, qs->qs_holds); + } +} + +/* as a node comes up we delay the quorum decision until we know the fate of + * the connection. the hold will be droped in conn_up or hb_down. it might be + * perpetuated by con_err until hb_down. if we already have a conn, we might + * be dropping a hold that conn_up got. */ +void o2quo_hb_up(u8 node) +{ + struct o2quo_state *qs = &o2quo_state; + + spin_lock(&qs->qs_lock); + + qs->qs_heartbeating++; + mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES, + "node %u\n", node); + mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node); + set_bit(node, qs->qs_hb_bm); + + mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); + + if (!test_bit(node, qs->qs_conn_bm)) + o2quo_set_hold(qs, node); + else + o2quo_clear_hold(qs, node); + + spin_unlock(&qs->qs_lock); +} + +/* hb going down releases any holds we might have had due to this node from + * conn_up, conn_err, or hb_up */ +void o2quo_hb_down(u8 node) +{ + struct o2quo_state *qs = &o2quo_state; + + spin_lock(&qs->qs_lock); + + qs->qs_heartbeating--; + mlog_bug_on_msg(qs->qs_heartbeating < 0, + "node %u, %d heartbeating\n", + node, qs->qs_heartbeating); + mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node); + clear_bit(node, qs->qs_hb_bm); + + mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); + + o2quo_clear_hold(qs, node); + + spin_unlock(&qs->qs_lock); +} + +/* this tells us that we've decided that the node is still heartbeating + * even though we've lost it's conn. it must only be called after conn_err + * and indicates that we must now make a quorum decision in the future, + * though we might be doing so after waiting for holds to drain. Here + * we'll be dropping the hold from conn_err. */ +void o2quo_hb_still_up(u8 node) +{ + struct o2quo_state *qs = &o2quo_state; + + spin_lock(&qs->qs_lock); + + mlog(0, "node %u\n", node); + + qs->qs_pending = 1; + o2quo_clear_hold(qs, node); + + spin_unlock(&qs->qs_lock); +} + +/* This is analagous to hb_up. as a node's connection comes up we delay the + * quorum decision until we see it heartbeating. the hold will be droped in + * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if + * it's already heartbeating we we might be dropping a hold that conn_up got. + * */ +void o2quo_conn_up(u8 node) +{ + struct o2quo_state *qs = &o2quo_state; + + spin_lock(&qs->qs_lock); + + qs->qs_connected++; + mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES, + "node %u\n", node); + mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node); + set_bit(node, qs->qs_conn_bm); + + mlog(0, "node %u, %d total\n", node, qs->qs_connected); + + if (!test_bit(node, qs->qs_hb_bm)) + o2quo_set_hold(qs, node); + else + o2quo_clear_hold(qs, node); + + spin_unlock(&qs->qs_lock); +} + +/* we've decided that we won't ever be connecting to the node again. if it's + * still heartbeating we grab a hold that will delay decisions until either the + * node stops heartbeating from hb_down or the caller decides that the node is + * still up and calls still_up */ +void o2quo_conn_err(u8 node) +{ + struct o2quo_state *qs = &o2quo_state; + + spin_lock(&qs->qs_lock); + + if (test_bit(node, qs->qs_conn_bm)) { + qs->qs_connected--; + mlog_bug_on_msg(qs->qs_connected < 0, + "node %u, connected %d\n", + node, qs->qs_connected); + + clear_bit(node, qs->qs_conn_bm); + } + + mlog(0, "node %u, %d total\n", node, qs->qs_connected); + + if (test_bit(node, qs->qs_hb_bm)) + o2quo_set_hold(qs, node); + + spin_unlock(&qs->qs_lock); +} + +void o2quo_init(void) +{ + struct o2quo_state *qs = &o2quo_state; + + spin_lock_init(&qs->qs_lock); + INIT_WORK(&qs->qs_work, o2quo_make_decision); +} + +void o2quo_exit(void) +{ + flush_scheduled_work(); +} diff --git a/fs/ocfs2/cluster/quorum.h b/fs/ocfs2/cluster/quorum.h new file mode 100644 index 0000000..6649cc6 --- /dev/null +++ b/fs/ocfs2/cluster/quorum.h @@ -0,0 +1,36 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef O2CLUSTER_QUORUM_H +#define O2CLUSTER_QUORUM_H + +void o2quo_init(void); +void o2quo_exit(void); + +void o2quo_hb_up(u8 node); +void o2quo_hb_down(u8 node); +void o2quo_hb_still_up(u8 node); +void o2quo_conn_up(u8 node); +void o2quo_conn_err(u8 node); +void o2quo_disk_timeout(void); + +#endif /* O2CLUSTER_QUORUM_H */ diff --git a/fs/ocfs2/cluster/sys.c b/fs/ocfs2/cluster/sys.c new file mode 100644 index 0000000..bc702da --- /dev/null +++ b/fs/ocfs2/cluster/sys.c @@ -0,0 +1,91 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * sys.c + * + * OCFS2 cluster sysfs interface + * + * Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation, + * version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/kobject.h> +#include <linux/sysfs.h> +#include <linux/fs.h> + +#include "ocfs2_nodemanager.h" +#include "masklog.h" +#include "sys.h" + + +static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", O2NM_API_VERSION); +} +static struct kobj_attribute attr_version = + __ATTR(interface_revision, S_IFREG | S_IRUGO, version_show, NULL); + +static struct attribute *o2cb_attrs[] = { + &attr_version.attr, + NULL, +}; + +static struct attribute_group o2cb_attr_group = { + .attrs = o2cb_attrs, +}; + +static struct kset *o2cb_kset; + +void o2cb_sys_shutdown(void) +{ + mlog_sys_shutdown(); + sysfs_remove_link(NULL, "o2cb"); + kset_unregister(o2cb_kset); +} + +int o2cb_sys_init(void) +{ + int ret; + + o2cb_kset = kset_create_and_add("o2cb", NULL, fs_kobj); + if (!o2cb_kset) + return -ENOMEM; + + /* + * Create this symlink for backwards compatibility with old + * versions of ocfs2-tools which look for things in /sys/o2cb. + */ + ret = sysfs_create_link(NULL, &o2cb_kset->kobj, "o2cb"); + if (ret) + goto error; + + ret = sysfs_create_group(&o2cb_kset->kobj, &o2cb_attr_group); + if (ret) + goto error; + + ret = mlog_sys_init(o2cb_kset); + if (ret) + goto error; + return 0; +error: + kset_unregister(o2cb_kset); + return ret; +} diff --git a/fs/ocfs2/cluster/sys.h b/fs/ocfs2/cluster/sys.h new file mode 100644 index 0000000..d66b8ab --- /dev/null +++ b/fs/ocfs2/cluster/sys.h @@ -0,0 +1,33 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * sys.h + * + * Function prototypes for o2cb sysfs interface + * + * Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation, + * version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef O2CLUSTER_SYS_H +#define O2CLUSTER_SYS_H + +void o2cb_sys_shutdown(void); +int o2cb_sys_init(void); + +#endif /* O2CLUSTER_SYS_H */ diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c new file mode 100644 index 0000000..2bcf706 --- /dev/null +++ b/fs/ocfs2/cluster/tcp.c @@ -0,0 +1,2067 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + * ---- + * + * Callers for this were originally written against a very simple synchronus + * API. This implementation reflects those simple callers. Some day I'm sure + * we'll need to move to a more robust posting/callback mechanism. + * + * Transmit calls pass in kernel virtual addresses and block copying this into + * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting + * for a failed socket to timeout. TX callers can also pass in a poniter to an + * 'int' which gets filled with an errno off the wire in response to the + * message they send. + * + * Handlers for unsolicited messages are registered. Each socket has a page + * that incoming data is copied into. First the header, then the data. + * Handlers are called from only one thread with a reference to this per-socket + * page. This page is destroyed after the handler call, so it can't be + * referenced beyond the call. Handlers may block but are discouraged from + * doing so. + * + * Any framing errors (bad magic, large payload lengths) close a connection. + * + * Our sock_container holds the state we associate with a socket. It's current + * framing state is held there as well as the refcounting we do around when it + * is safe to tear down the socket. The socket is only finally torn down from + * the container when the container loses all of its references -- so as long + * as you hold a ref on the container you can trust that the socket is valid + * for use with kernel socket APIs. + * + * Connections are initiated between a pair of nodes when the node with the + * higher node number gets a heartbeat callback which indicates that the lower + * numbered node has started heartbeating. The lower numbered node is passive + * and only accepts the connection if the higher numbered node is heartbeating. + */ + +#include <linux/kernel.h> +#include <linux/jiffies.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/kref.h> +#include <linux/net.h> +#include <net/tcp.h> + +#include <asm/uaccess.h> + +#include "heartbeat.h" +#include "tcp.h" +#include "nodemanager.h" +#define MLOG_MASK_PREFIX ML_TCP +#include "masklog.h" +#include "quorum.h" + +#include "tcp_internal.h" + +#define SC_NODEF_FMT "node %s (num %u) at %u.%u.%u.%u:%u" +#define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \ + NIPQUAD(sc->sc_node->nd_ipv4_address), \ + ntohs(sc->sc_node->nd_ipv4_port) + +/* + * In the following two log macros, the whitespace after the ',' just + * before ##args is intentional. Otherwise, gcc 2.95 will eat the + * previous token if args expands to nothing. + */ +#define msglog(hdr, fmt, args...) do { \ + typeof(hdr) __hdr = (hdr); \ + mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \ + "key %08x num %u] " fmt, \ + be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \ + be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \ + be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \ + be32_to_cpu(__hdr->msg_num) , ##args); \ +} while (0) + +#define sclog(sc, fmt, args...) do { \ + typeof(sc) __sc = (sc); \ + mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \ + "pg_off %zu] " fmt, __sc, \ + atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \ + __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \ + ##args); \ +} while (0) + +static DEFINE_RWLOCK(o2net_handler_lock); +static struct rb_root o2net_handler_tree = RB_ROOT; + +static struct o2net_node o2net_nodes[O2NM_MAX_NODES]; + +/* XXX someday we'll need better accounting */ +static struct socket *o2net_listen_sock = NULL; + +/* + * listen work is only queued by the listening socket callbacks on the + * o2net_wq. teardown detaches the callbacks before destroying the workqueue. + * quorum work is queued as sock containers are shutdown.. stop_listening + * tears down all the node's sock containers, preventing future shutdowns + * and queued quroum work, before canceling delayed quorum work and + * destroying the work queue. + */ +static struct workqueue_struct *o2net_wq; +static struct work_struct o2net_listen_work; + +static struct o2hb_callback_func o2net_hb_up, o2net_hb_down; +#define O2NET_HB_PRI 0x1 + +static struct o2net_handshake *o2net_hand; +static struct o2net_msg *o2net_keep_req, *o2net_keep_resp; + +static int o2net_sys_err_translations[O2NET_ERR_MAX] = + {[O2NET_ERR_NONE] = 0, + [O2NET_ERR_NO_HNDLR] = -ENOPROTOOPT, + [O2NET_ERR_OVERFLOW] = -EOVERFLOW, + [O2NET_ERR_DIED] = -EHOSTDOWN,}; + +/* can't quite avoid *all* internal declarations :/ */ +static void o2net_sc_connect_completed(struct work_struct *work); +static void o2net_rx_until_empty(struct work_struct *work); +static void o2net_shutdown_sc(struct work_struct *work); +static void o2net_listen_data_ready(struct sock *sk, int bytes); +static void o2net_sc_send_keep_req(struct work_struct *work); +static void o2net_idle_timer(unsigned long data); +static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); +static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc); + +#ifdef CONFIG_DEBUG_FS +static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, + u32 msgkey, struct task_struct *task, u8 node) +{ + INIT_LIST_HEAD(&nst->st_net_debug_item); + nst->st_task = task; + nst->st_msg_type = msgtype; + nst->st_msg_key = msgkey; + nst->st_node = node; +} + +static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) +{ + do_gettimeofday(&nst->st_sock_time); +} + +static void o2net_set_nst_send_time(struct o2net_send_tracking *nst) +{ + do_gettimeofday(&nst->st_send_time); +} + +static void o2net_set_nst_status_time(struct o2net_send_tracking *nst) +{ + do_gettimeofday(&nst->st_status_time); +} + +static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, + struct o2net_sock_container *sc) +{ + nst->st_sc = sc; +} + +static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id) +{ + nst->st_id = msg_id; +} + +#else /* CONFIG_DEBUG_FS */ + +static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, + u32 msgkey, struct task_struct *task, u8 node) +{ +} + +static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) +{ +} + +static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst) +{ +} + +static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst) +{ +} + +static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, + struct o2net_sock_container *sc) +{ +} + +static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, + u32 msg_id) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +static inline int o2net_reconnect_delay(void) +{ + return o2nm_single_cluster->cl_reconnect_delay_ms; +} + +static inline int o2net_keepalive_delay(void) +{ + return o2nm_single_cluster->cl_keepalive_delay_ms; +} + +static inline int o2net_idle_timeout(void) +{ + return o2nm_single_cluster->cl_idle_timeout_ms; +} + +static inline int o2net_sys_err_to_errno(enum o2net_system_error err) +{ + int trans; + BUG_ON(err >= O2NET_ERR_MAX); + trans = o2net_sys_err_translations[err]; + + /* Just in case we mess up the translation table above */ + BUG_ON(err != O2NET_ERR_NONE && trans == 0); + return trans; +} + +static struct o2net_node * o2net_nn_from_num(u8 node_num) +{ + BUG_ON(node_num >= ARRAY_SIZE(o2net_nodes)); + return &o2net_nodes[node_num]; +} + +static u8 o2net_num_from_nn(struct o2net_node *nn) +{ + BUG_ON(nn == NULL); + return nn - o2net_nodes; +} + +/* ------------------------------------------------------------ */ + +static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw) +{ + int ret = 0; + + do { + if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) { + ret = -EAGAIN; + break; + } + spin_lock(&nn->nn_lock); + ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id); + if (ret == 0) + list_add_tail(&nsw->ns_node_item, + &nn->nn_status_list); + spin_unlock(&nn->nn_lock); + } while (ret == -EAGAIN); + + if (ret == 0) { + init_waitqueue_head(&nsw->ns_wq); + nsw->ns_sys_status = O2NET_ERR_NONE; + nsw->ns_status = 0; + } + + return ret; +} + +static void o2net_complete_nsw_locked(struct o2net_node *nn, + struct o2net_status_wait *nsw, + enum o2net_system_error sys_status, + s32 status) +{ + assert_spin_locked(&nn->nn_lock); + + if (!list_empty(&nsw->ns_node_item)) { + list_del_init(&nsw->ns_node_item); + nsw->ns_sys_status = sys_status; + nsw->ns_status = status; + idr_remove(&nn->nn_status_idr, nsw->ns_id); + wake_up(&nsw->ns_wq); + } +} + +static void o2net_complete_nsw(struct o2net_node *nn, + struct o2net_status_wait *nsw, + u64 id, enum o2net_system_error sys_status, + s32 status) +{ + spin_lock(&nn->nn_lock); + if (nsw == NULL) { + if (id > INT_MAX) + goto out; + + nsw = idr_find(&nn->nn_status_idr, id); + if (nsw == NULL) + goto out; + } + + o2net_complete_nsw_locked(nn, nsw, sys_status, status); + +out: + spin_unlock(&nn->nn_lock); + return; +} + +static void o2net_complete_nodes_nsw(struct o2net_node *nn) +{ + struct o2net_status_wait *nsw, *tmp; + unsigned int num_kills = 0; + + assert_spin_locked(&nn->nn_lock); + + list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) { + o2net_complete_nsw_locked(nn, nsw, O2NET_ERR_DIED, 0); + num_kills++; + } + + mlog(0, "completed %d messages for node %u\n", num_kills, + o2net_num_from_nn(nn)); +} + +static int o2net_nsw_completed(struct o2net_node *nn, + struct o2net_status_wait *nsw) +{ + int completed; + spin_lock(&nn->nn_lock); + completed = list_empty(&nsw->ns_node_item); + spin_unlock(&nn->nn_lock); + return completed; +} + +/* ------------------------------------------------------------ */ + +static void sc_kref_release(struct kref *kref) +{ + struct o2net_sock_container *sc = container_of(kref, + struct o2net_sock_container, sc_kref); + BUG_ON(timer_pending(&sc->sc_idle_timeout)); + + sclog(sc, "releasing\n"); + + if (sc->sc_sock) { + sock_release(sc->sc_sock); + sc->sc_sock = NULL; + } + + o2nm_node_put(sc->sc_node); + sc->sc_node = NULL; + + o2net_debug_del_sc(sc); + kfree(sc); +} + +static void sc_put(struct o2net_sock_container *sc) +{ + sclog(sc, "put\n"); + kref_put(&sc->sc_kref, sc_kref_release); +} +static void sc_get(struct o2net_sock_container *sc) +{ + sclog(sc, "get\n"); + kref_get(&sc->sc_kref); +} +static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) +{ + struct o2net_sock_container *sc, *ret = NULL; + struct page *page = NULL; + + page = alloc_page(GFP_NOFS); + sc = kzalloc(sizeof(*sc), GFP_NOFS); + if (sc == NULL || page == NULL) + goto out; + + kref_init(&sc->sc_kref); + o2nm_node_get(node); + sc->sc_node = node; + + INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); + INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); + INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); + INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req); + + init_timer(&sc->sc_idle_timeout); + sc->sc_idle_timeout.function = o2net_idle_timer; + sc->sc_idle_timeout.data = (unsigned long)sc; + + sclog(sc, "alloced\n"); + + ret = sc; + sc->sc_page = page; + o2net_debug_add_sc(sc); + sc = NULL; + page = NULL; + +out: + if (page) + __free_page(page); + kfree(sc); + + return ret; +} + +/* ------------------------------------------------------------ */ + +static void o2net_sc_queue_work(struct o2net_sock_container *sc, + struct work_struct *work) +{ + sc_get(sc); + if (!queue_work(o2net_wq, work)) + sc_put(sc); +} +static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, + struct delayed_work *work, + int delay) +{ + sc_get(sc); + if (!queue_delayed_work(o2net_wq, work, delay)) + sc_put(sc); +} +static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, + struct delayed_work *work) +{ + if (cancel_delayed_work(work)) + sc_put(sc); +} + +static atomic_t o2net_connected_peers = ATOMIC_INIT(0); + +int o2net_num_connected_peers(void) +{ + return atomic_read(&o2net_connected_peers); +} + +static void o2net_set_nn_state(struct o2net_node *nn, + struct o2net_sock_container *sc, + unsigned valid, int err) +{ + int was_valid = nn->nn_sc_valid; + int was_err = nn->nn_persistent_error; + struct o2net_sock_container *old_sc = nn->nn_sc; + + assert_spin_locked(&nn->nn_lock); + + if (old_sc && !sc) + atomic_dec(&o2net_connected_peers); + else if (!old_sc && sc) + atomic_inc(&o2net_connected_peers); + + /* the node num comparison and single connect/accept path should stop + * an non-null sc from being overwritten with another */ + BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc); + mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid); + mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc); + + if (was_valid && !valid && err == 0) + err = -ENOTCONN; + + mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n", + o2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid, + nn->nn_persistent_error, err); + + nn->nn_sc = sc; + nn->nn_sc_valid = valid ? 1 : 0; + nn->nn_persistent_error = err; + + /* mirrors o2net_tx_can_proceed() */ + if (nn->nn_persistent_error || nn->nn_sc_valid) + wake_up(&nn->nn_sc_wq); + + if (!was_err && nn->nn_persistent_error) { + o2quo_conn_err(o2net_num_from_nn(nn)); + queue_delayed_work(o2net_wq, &nn->nn_still_up, + msecs_to_jiffies(O2NET_QUORUM_DELAY_MS)); + } + + if (was_valid && !valid) { + printk(KERN_INFO "o2net: no longer connected to " + SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); + o2net_complete_nodes_nsw(nn); + } + + if (!was_valid && valid) { + o2quo_conn_up(o2net_num_from_nn(nn)); + cancel_delayed_work(&nn->nn_connect_expired); + printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", + o2nm_this_node() > sc->sc_node->nd_num ? + "connected to" : "accepted connection from", + SC_NODEF_ARGS(sc)); + } + + /* trigger the connecting worker func as long as we're not valid, + * it will back off if it shouldn't connect. This can be called + * from node config teardown and so needs to be careful about + * the work queue actually being up. */ + if (!valid && o2net_wq) { + unsigned long delay; + /* delay if we're withing a RECONNECT_DELAY of the + * last attempt */ + delay = (nn->nn_last_connect_attempt + + msecs_to_jiffies(o2net_reconnect_delay())) + - jiffies; + if (delay > msecs_to_jiffies(o2net_reconnect_delay())) + delay = 0; + mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay); + queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay); + + /* + * Delay the expired work after idle timeout. + * + * We might have lots of failed connection attempts that run + * through here but we only cancel the connect_expired work when + * a connection attempt succeeds. So only the first enqueue of + * the connect_expired work will do anything. The rest will see + * that it's already queued and do nothing. + */ + delay += msecs_to_jiffies(o2net_idle_timeout()); + queue_delayed_work(o2net_wq, &nn->nn_connect_expired, delay); + } + + /* keep track of the nn's sc ref for the caller */ + if ((old_sc == NULL) && sc) + sc_get(sc); + if (old_sc && (old_sc != sc)) { + o2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work); + sc_put(old_sc); + } +} + +/* see o2net_register_callbacks() */ +static void o2net_data_ready(struct sock *sk, int bytes) +{ + void (*ready)(struct sock *sk, int bytes); + + read_lock(&sk->sk_callback_lock); + if (sk->sk_user_data) { + struct o2net_sock_container *sc = sk->sk_user_data; + sclog(sc, "data_ready hit\n"); + do_gettimeofday(&sc->sc_tv_data_ready); + o2net_sc_queue_work(sc, &sc->sc_rx_work); + ready = sc->sc_data_ready; + } else { + ready = sk->sk_data_ready; + } + read_unlock(&sk->sk_callback_lock); + + ready(sk, bytes); +} + +/* see o2net_register_callbacks() */ +static void o2net_state_change(struct sock *sk) +{ + void (*state_change)(struct sock *sk); + struct o2net_sock_container *sc; + + read_lock(&sk->sk_callback_lock); + sc = sk->sk_user_data; + if (sc == NULL) { + state_change = sk->sk_state_change; + goto out; + } + + sclog(sc, "state_change to %d\n", sk->sk_state); + + state_change = sc->sc_state_change; + + switch(sk->sk_state) { + /* ignore connecting sockets as they make progress */ + case TCP_SYN_SENT: + case TCP_SYN_RECV: + break; + case TCP_ESTABLISHED: + o2net_sc_queue_work(sc, &sc->sc_connect_work); + break; + default: + o2net_sc_queue_work(sc, &sc->sc_shutdown_work); + break; + } +out: + read_unlock(&sk->sk_callback_lock); + state_change(sk); +} + +/* + * we register callbacks so we can queue work on events before calling + * the original callbacks. our callbacks our careful to test user_data + * to discover when they've reaced with o2net_unregister_callbacks(). + */ +static void o2net_register_callbacks(struct sock *sk, + struct o2net_sock_container *sc) +{ + write_lock_bh(&sk->sk_callback_lock); + + /* accepted sockets inherit the old listen socket data ready */ + if (sk->sk_data_ready == o2net_listen_data_ready) { + sk->sk_data_ready = sk->sk_user_data; + sk->sk_user_data = NULL; + } + + BUG_ON(sk->sk_user_data != NULL); + sk->sk_user_data = sc; + sc_get(sc); + + sc->sc_data_ready = sk->sk_data_ready; + sc->sc_state_change = sk->sk_state_change; + sk->sk_data_ready = o2net_data_ready; + sk->sk_state_change = o2net_state_change; + + mutex_init(&sc->sc_send_lock); + + write_unlock_bh(&sk->sk_callback_lock); +} + +static int o2net_unregister_callbacks(struct sock *sk, + struct o2net_sock_container *sc) +{ + int ret = 0; + + write_lock_bh(&sk->sk_callback_lock); + if (sk->sk_user_data == sc) { + ret = 1; + sk->sk_user_data = NULL; + sk->sk_data_ready = sc->sc_data_ready; + sk->sk_state_change = sc->sc_state_change; + } + write_unlock_bh(&sk->sk_callback_lock); + + return ret; +} + +/* + * this is a little helper that is called by callers who have seen a problem + * with an sc and want to detach it from the nn if someone already hasn't beat + * them to it. if an error is given then the shutdown will be persistent + * and pending transmits will be canceled. + */ +static void o2net_ensure_shutdown(struct o2net_node *nn, + struct o2net_sock_container *sc, + int err) +{ + spin_lock(&nn->nn_lock); + if (nn->nn_sc == sc) + o2net_set_nn_state(nn, NULL, 0, err); + spin_unlock(&nn->nn_lock); +} + +/* + * This work queue function performs the blocking parts of socket shutdown. A + * few paths lead here. set_nn_state will trigger this callback if it sees an + * sc detached from the nn. state_change will also trigger this callback + * directly when it sees errors. In that case we need to call set_nn_state + * ourselves as state_change couldn't get the nn_lock and call set_nn_state + * itself. + */ +static void o2net_shutdown_sc(struct work_struct *work) +{ + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, + sc_shutdown_work); + struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); + + sclog(sc, "shutting down\n"); + + /* drop the callbacks ref and call shutdown only once */ + if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) { + /* we shouldn't flush as we're in the thread, the + * races with pending sc work structs are harmless */ + del_timer_sync(&sc->sc_idle_timeout); + o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); + sc_put(sc); + kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR); + } + + /* not fatal so failed connects before the other guy has our + * heartbeat can be retried */ + o2net_ensure_shutdown(nn, sc, 0); + sc_put(sc); +} + +/* ------------------------------------------------------------ */ + +static int o2net_handler_cmp(struct o2net_msg_handler *nmh, u32 msg_type, + u32 key) +{ + int ret = memcmp(&nmh->nh_key, &key, sizeof(key)); + + if (ret == 0) + ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type)); + + return ret; +} + +static struct o2net_msg_handler * +o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p, + struct rb_node **ret_parent) +{ + struct rb_node **p = &o2net_handler_tree.rb_node; + struct rb_node *parent = NULL; + struct o2net_msg_handler *nmh, *ret = NULL; + int cmp; + + while (*p) { + parent = *p; + nmh = rb_entry(parent, struct o2net_msg_handler, nh_node); + cmp = o2net_handler_cmp(nmh, msg_type, key); + + if (cmp < 0) + p = &(*p)->rb_left; + else if (cmp > 0) + p = &(*p)->rb_right; + else { + ret = nmh; + break; + } + } + + if (ret_p != NULL) + *ret_p = p; + if (ret_parent != NULL) + *ret_parent = parent; + + return ret; +} + +static void o2net_handler_kref_release(struct kref *kref) +{ + struct o2net_msg_handler *nmh; + nmh = container_of(kref, struct o2net_msg_handler, nh_kref); + + kfree(nmh); +} + +static void o2net_handler_put(struct o2net_msg_handler *nmh) +{ + kref_put(&nmh->nh_kref, o2net_handler_kref_release); +} + +/* max_len is protection for the handler func. incoming messages won't + * be given to the handler if their payload is longer than the max. */ +int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, + o2net_msg_handler_func *func, void *data, + o2net_post_msg_handler_func *post_func, + struct list_head *unreg_list) +{ + struct o2net_msg_handler *nmh = NULL; + struct rb_node **p, *parent; + int ret = 0; + + if (max_len > O2NET_MAX_PAYLOAD_BYTES) { + mlog(0, "max_len for message handler out of range: %u\n", + max_len); + ret = -EINVAL; + goto out; + } + + if (!msg_type) { + mlog(0, "no message type provided: %u, %p\n", msg_type, func); + ret = -EINVAL; + goto out; + + } + if (!func) { + mlog(0, "no message handler provided: %u, %p\n", + msg_type, func); + ret = -EINVAL; + goto out; + } + + nmh = kzalloc(sizeof(struct o2net_msg_handler), GFP_NOFS); + if (nmh == NULL) { + ret = -ENOMEM; + goto out; + } + + nmh->nh_func = func; + nmh->nh_func_data = data; + nmh->nh_post_func = post_func; + nmh->nh_msg_type = msg_type; + nmh->nh_max_len = max_len; + nmh->nh_key = key; + /* the tree and list get this ref.. they're both removed in + * unregister when this ref is dropped */ + kref_init(&nmh->nh_kref); + INIT_LIST_HEAD(&nmh->nh_unregister_item); + + write_lock(&o2net_handler_lock); + if (o2net_handler_tree_lookup(msg_type, key, &p, &parent)) + ret = -EEXIST; + else { + rb_link_node(&nmh->nh_node, parent, p); + rb_insert_color(&nmh->nh_node, &o2net_handler_tree); + list_add_tail(&nmh->nh_unregister_item, unreg_list); + + mlog(ML_TCP, "registered handler func %p type %u key %08x\n", + func, msg_type, key); + /* we've had some trouble with handlers seemingly vanishing. */ + mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p, + &parent) == NULL, + "couldn't find handler we *just* registerd " + "for type %u key %08x\n", msg_type, key); + } + write_unlock(&o2net_handler_lock); + if (ret) + goto out; + +out: + if (ret) + kfree(nmh); + + return ret; +} +EXPORT_SYMBOL_GPL(o2net_register_handler); + +void o2net_unregister_handler_list(struct list_head *list) +{ + struct o2net_msg_handler *nmh, *n; + + write_lock(&o2net_handler_lock); + list_for_each_entry_safe(nmh, n, list, nh_unregister_item) { + mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n", + nmh->nh_func, nmh->nh_msg_type, nmh->nh_key); + rb_erase(&nmh->nh_node, &o2net_handler_tree); + list_del_init(&nmh->nh_unregister_item); + kref_put(&nmh->nh_kref, o2net_handler_kref_release); + } + write_unlock(&o2net_handler_lock); +} +EXPORT_SYMBOL_GPL(o2net_unregister_handler_list); + +static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key) +{ + struct o2net_msg_handler *nmh; + + read_lock(&o2net_handler_lock); + nmh = o2net_handler_tree_lookup(msg_type, key, NULL, NULL); + if (nmh) + kref_get(&nmh->nh_kref); + read_unlock(&o2net_handler_lock); + + return nmh; +} + +/* ------------------------------------------------------------ */ + +static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len) +{ + int ret; + mm_segment_t oldfs; + struct kvec vec = { + .iov_len = len, + .iov_base = data, + }; + struct msghdr msg = { + .msg_iovlen = 1, + .msg_iov = (struct iovec *)&vec, + .msg_flags = MSG_DONTWAIT, + }; + + oldfs = get_fs(); + set_fs(get_ds()); + ret = sock_recvmsg(sock, &msg, len, msg.msg_flags); + set_fs(oldfs); + + return ret; +} + +static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec, + size_t veclen, size_t total) +{ + int ret; + mm_segment_t oldfs; + struct msghdr msg = { + .msg_iov = (struct iovec *)vec, + .msg_iovlen = veclen, + }; + + if (sock == NULL) { + ret = -EINVAL; + goto out; + } + + oldfs = get_fs(); + set_fs(get_ds()); + ret = sock_sendmsg(sock, &msg, total); + set_fs(oldfs); + if (ret != total) { + mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret, + total); + if (ret >= 0) + ret = -EPIPE; /* should be smarter, I bet */ + goto out; + } + + ret = 0; +out: + if (ret < 0) + mlog(0, "returning error: %d\n", ret); + return ret; +} + +static void o2net_sendpage(struct o2net_sock_container *sc, + void *kmalloced_virt, + size_t size) +{ + struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); + ssize_t ret; + + while (1) { + mutex_lock(&sc->sc_send_lock); + ret = sc->sc_sock->ops->sendpage(sc->sc_sock, + virt_to_page(kmalloced_virt), + (long)kmalloced_virt & ~PAGE_MASK, + size, MSG_DONTWAIT); + mutex_unlock(&sc->sc_send_lock); + if (ret == size) + break; + if (ret == (ssize_t)-EAGAIN) { + mlog(0, "sendpage of size %zu to " SC_NODEF_FMT + " returned EAGAIN\n", size, SC_NODEF_ARGS(sc)); + cond_resched(); + continue; + } + mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT + " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); + o2net_ensure_shutdown(nn, sc, 0); + break; + } +} + +static void o2net_init_msg(struct o2net_msg *msg, u16 data_len, u16 msg_type, u32 key) +{ + memset(msg, 0, sizeof(struct o2net_msg)); + msg->magic = cpu_to_be16(O2NET_MSG_MAGIC); + msg->data_len = cpu_to_be16(data_len); + msg->msg_type = cpu_to_be16(msg_type); + msg->sys_status = cpu_to_be32(O2NET_ERR_NONE); + msg->status = 0; + msg->key = cpu_to_be32(key); +} + +static int o2net_tx_can_proceed(struct o2net_node *nn, + struct o2net_sock_container **sc_ret, + int *error) +{ + int ret = 0; + + spin_lock(&nn->nn_lock); + if (nn->nn_persistent_error) { + ret = 1; + *sc_ret = NULL; + *error = nn->nn_persistent_error; + } else if (nn->nn_sc_valid) { + kref_get(&nn->nn_sc->sc_kref); + + ret = 1; + *sc_ret = nn->nn_sc; + *error = 0; + } + spin_unlock(&nn->nn_lock); + + return ret; +} + +int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, + size_t caller_veclen, u8 target_node, int *status) +{ + int ret, error = 0; + struct o2net_msg *msg = NULL; + size_t veclen, caller_bytes = 0; + struct kvec *vec = NULL; + struct o2net_sock_container *sc = NULL; + struct o2net_node *nn = o2net_nn_from_num(target_node); + struct o2net_status_wait nsw = { + .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item), + }; + struct o2net_send_tracking nst; + + o2net_init_nst(&nst, msg_type, key, current, target_node); + + if (o2net_wq == NULL) { + mlog(0, "attempt to tx without o2netd running\n"); + ret = -ESRCH; + goto out; + } + + if (caller_veclen == 0) { + mlog(0, "bad kvec array length\n"); + ret = -EINVAL; + goto out; + } + + caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen); + if (caller_bytes > O2NET_MAX_PAYLOAD_BYTES) { + mlog(0, "total payload len %zu too large\n", caller_bytes); + ret = -EINVAL; + goto out; + } + + if (target_node == o2nm_this_node()) { + ret = -ELOOP; + goto out; + } + + o2net_debug_add_nst(&nst); + + o2net_set_nst_sock_time(&nst); + + ret = wait_event_interruptible(nn->nn_sc_wq, + o2net_tx_can_proceed(nn, &sc, &error)); + if (!ret && error) + ret = error; + if (ret) + goto out; + + o2net_set_nst_sock_container(&nst, sc); + + veclen = caller_veclen + 1; + vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC); + if (vec == NULL) { + mlog(0, "failed to %zu element kvec!\n", veclen); + ret = -ENOMEM; + goto out; + } + + msg = kmalloc(sizeof(struct o2net_msg), GFP_ATOMIC); + if (!msg) { + mlog(0, "failed to allocate a o2net_msg!\n"); + ret = -ENOMEM; + goto out; + } + + o2net_init_msg(msg, caller_bytes, msg_type, key); + + vec[0].iov_len = sizeof(struct o2net_msg); + vec[0].iov_base = msg; + memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec)); + + ret = o2net_prep_nsw(nn, &nsw); + if (ret) + goto out; + + msg->msg_num = cpu_to_be32(nsw.ns_id); + o2net_set_nst_msg_id(&nst, nsw.ns_id); + + o2net_set_nst_send_time(&nst); + + /* finally, convert the message header to network byte-order + * and send */ + mutex_lock(&sc->sc_send_lock); + ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen, + sizeof(struct o2net_msg) + caller_bytes); + mutex_unlock(&sc->sc_send_lock); + msglog(msg, "sending returned %d\n", ret); + if (ret < 0) { + mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret); + goto out; + } + + /* wait on other node's handler */ + o2net_set_nst_status_time(&nst); + wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw)); + + /* Note that we avoid overwriting the callers status return + * variable if a system error was reported on the other + * side. Callers beware. */ + ret = o2net_sys_err_to_errno(nsw.ns_sys_status); + if (status && !ret) + *status = nsw.ns_status; + + mlog(0, "woken, returning system status %d, user status %d\n", + ret, nsw.ns_status); +out: + o2net_debug_del_nst(&nst); /* must be before dropping sc and node */ + if (sc) + sc_put(sc); + if (vec) + kfree(vec); + if (msg) + kfree(msg); + o2net_complete_nsw(nn, &nsw, 0, 0, 0); + return ret; +} +EXPORT_SYMBOL_GPL(o2net_send_message_vec); + +int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len, + u8 target_node, int *status) +{ + struct kvec vec = { + .iov_base = data, + .iov_len = len, + }; + return o2net_send_message_vec(msg_type, key, &vec, 1, + target_node, status); +} +EXPORT_SYMBOL_GPL(o2net_send_message); + +static int o2net_send_status_magic(struct socket *sock, struct o2net_msg *hdr, + enum o2net_system_error syserr, int err) +{ + struct kvec vec = { + .iov_base = hdr, + .iov_len = sizeof(struct o2net_msg), + }; + + BUG_ON(syserr >= O2NET_ERR_MAX); + + /* leave other fields intact from the incoming message, msg_num + * in particular */ + hdr->sys_status = cpu_to_be32(syserr); + hdr->status = cpu_to_be32(err); + hdr->magic = cpu_to_be16(O2NET_MSG_STATUS_MAGIC); // twiddle the magic + hdr->data_len = 0; + + msglog(hdr, "about to send status magic %d\n", err); + /* hdr has been in host byteorder this whole time */ + return o2net_send_tcp_msg(sock, &vec, 1, sizeof(struct o2net_msg)); +} + +/* this returns -errno if the header was unknown or too large, etc. + * after this is called the buffer us reused for the next message */ +static int o2net_process_message(struct o2net_sock_container *sc, + struct o2net_msg *hdr) +{ + struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); + int ret = 0, handler_status; + enum o2net_system_error syserr; + struct o2net_msg_handler *nmh = NULL; + void *ret_data = NULL; + + msglog(hdr, "processing message\n"); + + o2net_sc_postpone_idle(sc); + + switch(be16_to_cpu(hdr->magic)) { + case O2NET_MSG_STATUS_MAGIC: + /* special type for returning message status */ + o2net_complete_nsw(nn, NULL, + be32_to_cpu(hdr->msg_num), + be32_to_cpu(hdr->sys_status), + be32_to_cpu(hdr->status)); + goto out; + case O2NET_MSG_KEEP_REQ_MAGIC: + o2net_sendpage(sc, o2net_keep_resp, + sizeof(*o2net_keep_resp)); + goto out; + case O2NET_MSG_KEEP_RESP_MAGIC: + goto out; + case O2NET_MSG_MAGIC: + break; + default: + msglog(hdr, "bad magic\n"); + ret = -EINVAL; + goto out; + break; + } + + /* find a handler for it */ + handler_status = 0; + nmh = o2net_handler_get(be16_to_cpu(hdr->msg_type), + be32_to_cpu(hdr->key)); + if (!nmh) { + mlog(ML_TCP, "couldn't find handler for type %u key %08x\n", + be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key)); + syserr = O2NET_ERR_NO_HNDLR; + goto out_respond; + } + + syserr = O2NET_ERR_NONE; + + if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len) + syserr = O2NET_ERR_OVERFLOW; + + if (syserr != O2NET_ERR_NONE) + goto out_respond; + + do_gettimeofday(&sc->sc_tv_func_start); + sc->sc_msg_key = be32_to_cpu(hdr->key); + sc->sc_msg_type = be16_to_cpu(hdr->msg_type); + handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) + + be16_to_cpu(hdr->data_len), + nmh->nh_func_data, &ret_data); + do_gettimeofday(&sc->sc_tv_func_stop); + +out_respond: + /* this destroys the hdr, so don't use it after this */ + mutex_lock(&sc->sc_send_lock); + ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr, + handler_status); + mutex_unlock(&sc->sc_send_lock); + hdr = NULL; + mlog(0, "sending handler status %d, syserr %d returned %d\n", + handler_status, syserr, ret); + + if (nmh) { + BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL); + if (nmh->nh_post_func) + (nmh->nh_post_func)(handler_status, nmh->nh_func_data, + ret_data); + } + +out: + if (nmh) + o2net_handler_put(nmh); + return ret; +} + +static int o2net_check_handshake(struct o2net_sock_container *sc) +{ + struct o2net_handshake *hand = page_address(sc->sc_page); + struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); + + if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { + mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol " + "version %llu but %llu is required, disconnecting\n", + SC_NODEF_ARGS(sc), + (unsigned long long)be64_to_cpu(hand->protocol_version), + O2NET_PROTOCOL_VERSION); + + /* don't bother reconnecting if its the wrong version. */ + o2net_ensure_shutdown(nn, sc, -ENOTCONN); + return -1; + } + + /* + * Ensure timeouts are consistent with other nodes, otherwise + * we can end up with one node thinking that the other must be down, + * but isn't. This can ultimately cause corruption. + */ + if (be32_to_cpu(hand->o2net_idle_timeout_ms) != + o2net_idle_timeout()) { + mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of " + "%u ms, but we use %u ms locally. disconnecting\n", + SC_NODEF_ARGS(sc), + be32_to_cpu(hand->o2net_idle_timeout_ms), + o2net_idle_timeout()); + o2net_ensure_shutdown(nn, sc, -ENOTCONN); + return -1; + } + + if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != + o2net_keepalive_delay()) { + mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of " + "%u ms, but we use %u ms locally. disconnecting\n", + SC_NODEF_ARGS(sc), + be32_to_cpu(hand->o2net_keepalive_delay_ms), + o2net_keepalive_delay()); + o2net_ensure_shutdown(nn, sc, -ENOTCONN); + return -1; + } + + if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != + O2HB_MAX_WRITE_TIMEOUT_MS) { + mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of " + "%u ms, but we use %u ms locally. disconnecting\n", + SC_NODEF_ARGS(sc), + be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), + O2HB_MAX_WRITE_TIMEOUT_MS); + o2net_ensure_shutdown(nn, sc, -ENOTCONN); + return -1; + } + + sc->sc_handshake_ok = 1; + + spin_lock(&nn->nn_lock); + /* set valid and queue the idle timers only if it hasn't been + * shut down already */ + if (nn->nn_sc == sc) { + o2net_sc_reset_idle_timer(sc); + atomic_set(&nn->nn_timeout, 0); + o2net_set_nn_state(nn, sc, 1, 0); + } + spin_unlock(&nn->nn_lock); + + /* shift everything up as though it wasn't there */ + sc->sc_page_off -= sizeof(struct o2net_handshake); + if (sc->sc_page_off) + memmove(hand, hand + 1, sc->sc_page_off); + + return 0; +} + +/* this demuxes the queued rx bytes into header or payload bits and calls + * handlers as each full message is read off the socket. it returns -error, + * == 0 eof, or > 0 for progress made.*/ +static int o2net_advance_rx(struct o2net_sock_container *sc) +{ + struct o2net_msg *hdr; + int ret = 0; + void *data; + size_t datalen; + + sclog(sc, "receiving\n"); + do_gettimeofday(&sc->sc_tv_advance_start); + + if (unlikely(sc->sc_handshake_ok == 0)) { + if(sc->sc_page_off < sizeof(struct o2net_handshake)) { + data = page_address(sc->sc_page) + sc->sc_page_off; + datalen = sizeof(struct o2net_handshake) - sc->sc_page_off; + ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen); + if (ret > 0) + sc->sc_page_off += ret; + } + + if (sc->sc_page_off == sizeof(struct o2net_handshake)) { + o2net_check_handshake(sc); + if (unlikely(sc->sc_handshake_ok == 0)) + ret = -EPROTO; + } + goto out; + } + + /* do we need more header? */ + if (sc->sc_page_off < sizeof(struct o2net_msg)) { + data = page_address(sc->sc_page) + sc->sc_page_off; + datalen = sizeof(struct o2net_msg) - sc->sc_page_off; + ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen); + if (ret > 0) { + sc->sc_page_off += ret; + /* only swab incoming here.. we can + * only get here once as we cross from + * being under to over */ + if (sc->sc_page_off == sizeof(struct o2net_msg)) { + hdr = page_address(sc->sc_page); + if (be16_to_cpu(hdr->data_len) > + O2NET_MAX_PAYLOAD_BYTES) + ret = -EOVERFLOW; + } + } + if (ret <= 0) + goto out; + } + + if (sc->sc_page_off < sizeof(struct o2net_msg)) { + /* oof, still don't have a header */ + goto out; + } + + /* this was swabbed above when we first read it */ + hdr = page_address(sc->sc_page); + + msglog(hdr, "at page_off %zu\n", sc->sc_page_off); + + /* do we need more payload? */ + if (sc->sc_page_off - sizeof(struct o2net_msg) < be16_to_cpu(hdr->data_len)) { + /* need more payload */ + data = page_address(sc->sc_page) + sc->sc_page_off; + datalen = (sizeof(struct o2net_msg) + be16_to_cpu(hdr->data_len)) - + sc->sc_page_off; + ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen); + if (ret > 0) + sc->sc_page_off += ret; + if (ret <= 0) + goto out; + } + + if (sc->sc_page_off - sizeof(struct o2net_msg) == be16_to_cpu(hdr->data_len)) { + /* we can only get here once, the first time we read + * the payload.. so set ret to progress if the handler + * works out. after calling this the message is toast */ + ret = o2net_process_message(sc, hdr); + if (ret == 0) + ret = 1; + sc->sc_page_off = 0; + } + +out: + sclog(sc, "ret = %d\n", ret); + do_gettimeofday(&sc->sc_tv_advance_stop); + return ret; +} + +/* this work func is triggerd by data ready. it reads until it can read no + * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing + * our work the work struct will be marked and we'll be called again. */ +static void o2net_rx_until_empty(struct work_struct *work) +{ + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, sc_rx_work); + int ret; + + do { + ret = o2net_advance_rx(sc); + } while (ret > 0); + + if (ret <= 0 && ret != -EAGAIN) { + struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); + sclog(sc, "saw error %d, closing\n", ret); + /* not permanent so read failed handshake can retry */ + o2net_ensure_shutdown(nn, sc, 0); + } + + sc_put(sc); +} + +static int o2net_set_nodelay(struct socket *sock) +{ + int ret, val = 1; + mm_segment_t oldfs; + + oldfs = get_fs(); + set_fs(KERNEL_DS); + + /* + * Dear unsuspecting programmer, + * + * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level + * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will + * silently turn into SO_DEBUG. + * + * Yours, + * Keeper of hilariously fragile interfaces. + */ + ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, + (char __user *)&val, sizeof(val)); + + set_fs(oldfs); + return ret; +} + +static void o2net_initialize_handshake(void) +{ + o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32( + O2HB_MAX_WRITE_TIMEOUT_MS); + o2net_hand->o2net_idle_timeout_ms = cpu_to_be32(o2net_idle_timeout()); + o2net_hand->o2net_keepalive_delay_ms = cpu_to_be32( + o2net_keepalive_delay()); + o2net_hand->o2net_reconnect_delay_ms = cpu_to_be32( + o2net_reconnect_delay()); +} + +/* ------------------------------------------------------------ */ + +/* called when a connect completes and after a sock is accepted. the + * rx path will see the response and mark the sc valid */ +static void o2net_sc_connect_completed(struct work_struct *work) +{ + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, + sc_connect_work); + + mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", + (unsigned long long)O2NET_PROTOCOL_VERSION, + (unsigned long long)be64_to_cpu(o2net_hand->connector_id)); + + o2net_initialize_handshake(); + o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand)); + sc_put(sc); +} + +/* this is called as a work_struct func. */ +static void o2net_sc_send_keep_req(struct work_struct *work) +{ + struct o2net_sock_container *sc = + container_of(work, struct o2net_sock_container, + sc_keepalive_work.work); + + o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); + sc_put(sc); +} + +/* socket shutdown does a del_timer_sync against this as it tears down. + * we can't start this timer until we've got to the point in sc buildup + * where shutdown is going to be involved */ +static void o2net_idle_timer(unsigned long data) +{ + struct o2net_sock_container *sc = (struct o2net_sock_container *)data; + struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); + struct timeval now; + + do_gettimeofday(&now); + + printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " + "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), + o2net_idle_timeout() / 1000, + o2net_idle_timeout() % 1000); + mlog(ML_NOTICE, "here are some times that might help debug the " + "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " + "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", + sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, + now.tv_sec, (long) now.tv_usec, + sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, + sc->sc_tv_advance_start.tv_sec, + (long) sc->sc_tv_advance_start.tv_usec, + sc->sc_tv_advance_stop.tv_sec, + (long) sc->sc_tv_advance_stop.tv_usec, + sc->sc_msg_key, sc->sc_msg_type, + sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec, + sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec); + + /* + * Initialize the nn_timeout so that the next connection attempt + * will continue in o2net_start_connect. + */ + atomic_set(&nn->nn_timeout, 1); + + o2net_sc_queue_work(sc, &sc->sc_shutdown_work); +} + +static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc) +{ + o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); + o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work, + msecs_to_jiffies(o2net_keepalive_delay())); + do_gettimeofday(&sc->sc_tv_timer); + mod_timer(&sc->sc_idle_timeout, + jiffies + msecs_to_jiffies(o2net_idle_timeout())); +} + +static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) +{ + /* Only push out an existing timer */ + if (timer_pending(&sc->sc_idle_timeout)) + o2net_sc_reset_idle_timer(sc); +} + +/* this work func is kicked whenever a path sets the nn state which doesn't + * have valid set. This includes seeing hb come up, losing a connection, + * having a connect attempt fail, etc. This centralizes the logic which decides + * if a connect attempt should be made or if we should give up and all future + * transmit attempts should fail */ +static void o2net_start_connect(struct work_struct *work) +{ + struct o2net_node *nn = + container_of(work, struct o2net_node, nn_connect_work.work); + struct o2net_sock_container *sc = NULL; + struct o2nm_node *node = NULL, *mynode = NULL; + struct socket *sock = NULL; + struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; + int ret = 0, stop; + unsigned int timeout; + + /* if we're greater we initiate tx, otherwise we accept */ + if (o2nm_this_node() <= o2net_num_from_nn(nn)) + goto out; + + /* watch for racing with tearing a node down */ + node = o2nm_get_node_by_num(o2net_num_from_nn(nn)); + if (node == NULL) { + ret = 0; + goto out; + } + + mynode = o2nm_get_node_by_num(o2nm_this_node()); + if (mynode == NULL) { + ret = 0; + goto out; + } + + spin_lock(&nn->nn_lock); + /* + * see if we already have one pending or have given up. + * For nn_timeout, it is set when we close the connection + * because of the idle time out. So it means that we have + * at least connected to that node successfully once, + * now try to connect to it again. + */ + timeout = atomic_read(&nn->nn_timeout); + stop = (nn->nn_sc || + (nn->nn_persistent_error && + (nn->nn_persistent_error != -ENOTCONN || timeout == 0))); + spin_unlock(&nn->nn_lock); + if (stop) + goto out; + + nn->nn_last_connect_attempt = jiffies; + + sc = sc_alloc(node); + if (sc == NULL) { + mlog(0, "couldn't allocate sc\n"); + ret = -ENOMEM; + goto out; + } + + ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); + if (ret < 0) { + mlog(0, "can't create socket: %d\n", ret); + goto out; + } + sc->sc_sock = sock; /* freed by sc_kref_release */ + + sock->sk->sk_allocation = GFP_ATOMIC; + + myaddr.sin_family = AF_INET; + myaddr.sin_addr.s_addr = mynode->nd_ipv4_address; + myaddr.sin_port = htons(0); /* any port */ + + ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr, + sizeof(myaddr)); + if (ret) { + mlog(ML_ERROR, "bind failed with %d at address %u.%u.%u.%u\n", + ret, NIPQUAD(mynode->nd_ipv4_address)); + goto out; + } + + ret = o2net_set_nodelay(sc->sc_sock); + if (ret) { + mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret); + goto out; + } + + o2net_register_callbacks(sc->sc_sock->sk, sc); + + spin_lock(&nn->nn_lock); + /* handshake completion will set nn->nn_sc_valid */ + o2net_set_nn_state(nn, sc, 0, 0); + spin_unlock(&nn->nn_lock); + + remoteaddr.sin_family = AF_INET; + remoteaddr.sin_addr.s_addr = node->nd_ipv4_address; + remoteaddr.sin_port = node->nd_ipv4_port; + + ret = sc->sc_sock->ops->connect(sc->sc_sock, + (struct sockaddr *)&remoteaddr, + sizeof(remoteaddr), + O_NONBLOCK); + if (ret == -EINPROGRESS) + ret = 0; + +out: + if (ret) { + mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed " + "with errno %d\n", SC_NODEF_ARGS(sc), ret); + /* 0 err so that another will be queued and attempted + * from set_nn_state */ + if (sc) + o2net_ensure_shutdown(nn, sc, 0); + } + if (sc) + sc_put(sc); + if (node) + o2nm_node_put(node); + if (mynode) + o2nm_node_put(mynode); + + return; +} + +static void o2net_connect_expired(struct work_struct *work) +{ + struct o2net_node *nn = + container_of(work, struct o2net_node, nn_connect_expired.work); + + spin_lock(&nn->nn_lock); + if (!nn->nn_sc_valid) { + mlog(ML_ERROR, "no connection established with node %u after " + "%u.%u seconds, giving up and returning errors.\n", + o2net_num_from_nn(nn), + o2net_idle_timeout() / 1000, + o2net_idle_timeout() % 1000); + + o2net_set_nn_state(nn, NULL, 0, -ENOTCONN); + } + spin_unlock(&nn->nn_lock); +} + +static void o2net_still_up(struct work_struct *work) +{ + struct o2net_node *nn = + container_of(work, struct o2net_node, nn_still_up.work); + + o2quo_hb_still_up(o2net_num_from_nn(nn)); +} + +/* ------------------------------------------------------------ */ + +void o2net_disconnect_node(struct o2nm_node *node) +{ + struct o2net_node *nn = o2net_nn_from_num(node->nd_num); + + /* don't reconnect until it's heartbeating again */ + spin_lock(&nn->nn_lock); + atomic_set(&nn->nn_timeout, 0); + o2net_set_nn_state(nn, NULL, 0, -ENOTCONN); + spin_unlock(&nn->nn_lock); + + if (o2net_wq) { + cancel_delayed_work(&nn->nn_connect_expired); + cancel_delayed_work(&nn->nn_connect_work); + cancel_delayed_work(&nn->nn_still_up); + flush_workqueue(o2net_wq); + } +} + +static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num, + void *data) +{ + o2quo_hb_down(node_num); + + if (node_num != o2nm_this_node()) + o2net_disconnect_node(node); + + BUG_ON(atomic_read(&o2net_connected_peers) < 0); +} + +static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num, + void *data) +{ + struct o2net_node *nn = o2net_nn_from_num(node_num); + + o2quo_hb_up(node_num); + + /* ensure an immediate connect attempt */ + nn->nn_last_connect_attempt = jiffies - + (msecs_to_jiffies(o2net_reconnect_delay()) + 1); + + if (node_num != o2nm_this_node()) { + /* believe it or not, accept and node hearbeating testing + * can succeed for this node before we got here.. so + * only use set_nn_state to clear the persistent error + * if that hasn't already happened */ + spin_lock(&nn->nn_lock); + atomic_set(&nn->nn_timeout, 0); + if (nn->nn_persistent_error) + o2net_set_nn_state(nn, NULL, 0, 0); + spin_unlock(&nn->nn_lock); + } +} + +void o2net_unregister_hb_callbacks(void) +{ + o2hb_unregister_callback(NULL, &o2net_hb_up); + o2hb_unregister_callback(NULL, &o2net_hb_down); +} + +int o2net_register_hb_callbacks(void) +{ + int ret; + + o2hb_setup_callback(&o2net_hb_down, O2HB_NODE_DOWN_CB, + o2net_hb_node_down_cb, NULL, O2NET_HB_PRI); + o2hb_setup_callback(&o2net_hb_up, O2HB_NODE_UP_CB, + o2net_hb_node_up_cb, NULL, O2NET_HB_PRI); + + ret = o2hb_register_callback(NULL, &o2net_hb_up); + if (ret == 0) + ret = o2hb_register_callback(NULL, &o2net_hb_down); + + if (ret) + o2net_unregister_hb_callbacks(); + + return ret; +} + +/* ------------------------------------------------------------ */ + +static int o2net_accept_one(struct socket *sock) +{ + int ret, slen; + struct sockaddr_in sin; + struct socket *new_sock = NULL; + struct o2nm_node *node = NULL; + struct o2net_sock_container *sc = NULL; + struct o2net_node *nn; + + BUG_ON(sock == NULL); + ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, + sock->sk->sk_protocol, &new_sock); + if (ret) + goto out; + + new_sock->type = sock->type; + new_sock->ops = sock->ops; + ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); + if (ret < 0) + goto out; + + new_sock->sk->sk_allocation = GFP_ATOMIC; + + ret = o2net_set_nodelay(new_sock); + if (ret) { + mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret); + goto out; + } + + slen = sizeof(sin); + ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin, + &slen, 1); + if (ret < 0) + goto out; + + node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); + if (node == NULL) { + mlog(ML_NOTICE, "attempt to connect from unknown node at " + "%u.%u.%u.%u:%d\n", NIPQUAD(sin.sin_addr.s_addr), + ntohs(sin.sin_port)); + ret = -EINVAL; + goto out; + } + + if (o2nm_this_node() > node->nd_num) { + mlog(ML_NOTICE, "unexpected connect attempted from a lower " + "numbered node '%s' at " "%u.%u.%u.%u:%d with num %u\n", + node->nd_name, NIPQUAD(sin.sin_addr.s_addr), + ntohs(sin.sin_port), node->nd_num); + ret = -EINVAL; + goto out; + } + + /* this happens all the time when the other node sees our heartbeat + * and tries to connect before we see their heartbeat */ + if (!o2hb_check_node_heartbeating_from_callback(node->nd_num)) { + mlog(ML_CONN, "attempt to connect from node '%s' at " + "%u.%u.%u.%u:%d but it isn't heartbeating\n", + node->nd_name, NIPQUAD(sin.sin_addr.s_addr), + ntohs(sin.sin_port)); + ret = -EINVAL; + goto out; + } + + nn = o2net_nn_from_num(node->nd_num); + + spin_lock(&nn->nn_lock); + if (nn->nn_sc) + ret = -EBUSY; + else + ret = 0; + spin_unlock(&nn->nn_lock); + if (ret) { + mlog(ML_NOTICE, "attempt to connect from node '%s' at " + "%u.%u.%u.%u:%d but it already has an open connection\n", + node->nd_name, NIPQUAD(sin.sin_addr.s_addr), + ntohs(sin.sin_port)); + goto out; + } + + sc = sc_alloc(node); + if (sc == NULL) { + ret = -ENOMEM; + goto out; + } + + sc->sc_sock = new_sock; + new_sock = NULL; + + spin_lock(&nn->nn_lock); + atomic_set(&nn->nn_timeout, 0); + o2net_set_nn_state(nn, sc, 0, 0); + spin_unlock(&nn->nn_lock); + + o2net_register_callbacks(sc->sc_sock->sk, sc); + o2net_sc_queue_work(sc, &sc->sc_rx_work); + + o2net_initialize_handshake(); + o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand)); + +out: + if (new_sock) + sock_release(new_sock); + if (node) + o2nm_node_put(node); + if (sc) + sc_put(sc); + return ret; +} + +static void o2net_accept_many(struct work_struct *work) +{ + struct socket *sock = o2net_listen_sock; + while (o2net_accept_one(sock) == 0) + cond_resched(); +} + +static void o2net_listen_data_ready(struct sock *sk, int bytes) +{ + void (*ready)(struct sock *sk, int bytes); + + read_lock(&sk->sk_callback_lock); + ready = sk->sk_user_data; + if (ready == NULL) { /* check for teardown race */ + ready = sk->sk_data_ready; + goto out; + } + + /* ->sk_data_ready is also called for a newly established child socket + * before it has been accepted and the acceptor has set up their + * data_ready.. we only want to queue listen work for our listening + * socket */ + if (sk->sk_state == TCP_LISTEN) { + mlog(ML_TCP, "bytes: %d\n", bytes); + queue_work(o2net_wq, &o2net_listen_work); + } + +out: + read_unlock(&sk->sk_callback_lock); + ready(sk, bytes); +} + +static int o2net_open_listening_sock(__be32 addr, __be16 port) +{ + struct socket *sock = NULL; + int ret; + struct sockaddr_in sin = { + .sin_family = PF_INET, + .sin_addr = { .s_addr = addr }, + .sin_port = port, + }; + + ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); + if (ret < 0) { + mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret); + goto out; + } + + sock->sk->sk_allocation = GFP_ATOMIC; + + write_lock_bh(&sock->sk->sk_callback_lock); + sock->sk->sk_user_data = sock->sk->sk_data_ready; + sock->sk->sk_data_ready = o2net_listen_data_ready; + write_unlock_bh(&sock->sk->sk_callback_lock); + + o2net_listen_sock = sock; + INIT_WORK(&o2net_listen_work, o2net_accept_many); + + sock->sk->sk_reuse = 1; + ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); + if (ret < 0) { + mlog(ML_ERROR, "unable to bind socket at %u.%u.%u.%u:%u, " + "ret=%d\n", NIPQUAD(addr), ntohs(port), ret); + goto out; + } + + ret = sock->ops->listen(sock, 64); + if (ret < 0) { + mlog(ML_ERROR, "unable to listen on %u.%u.%u.%u:%u, ret=%d\n", + NIPQUAD(addr), ntohs(port), ret); + } + +out: + if (ret) { + o2net_listen_sock = NULL; + if (sock) + sock_release(sock); + } + return ret; +} + +/* + * called from node manager when we should bring up our network listening + * socket. node manager handles all the serialization to only call this + * once and to match it with o2net_stop_listening(). note, + * o2nm_this_node() doesn't work yet as we're being called while it + * is being set up. + */ +int o2net_start_listening(struct o2nm_node *node) +{ + int ret = 0; + + BUG_ON(o2net_wq != NULL); + BUG_ON(o2net_listen_sock != NULL); + + mlog(ML_KTHREAD, "starting o2net thread...\n"); + o2net_wq = create_singlethread_workqueue("o2net"); + if (o2net_wq == NULL) { + mlog(ML_ERROR, "unable to launch o2net thread\n"); + return -ENOMEM; /* ? */ + } + + ret = o2net_open_listening_sock(node->nd_ipv4_address, + node->nd_ipv4_port); + if (ret) { + destroy_workqueue(o2net_wq); + o2net_wq = NULL; + } else + o2quo_conn_up(node->nd_num); + + return ret; +} + +/* again, o2nm_this_node() doesn't work here as we're involved in + * tearing it down */ +void o2net_stop_listening(struct o2nm_node *node) +{ + struct socket *sock = o2net_listen_sock; + size_t i; + + BUG_ON(o2net_wq == NULL); + BUG_ON(o2net_listen_sock == NULL); + + /* stop the listening socket from generating work */ + write_lock_bh(&sock->sk->sk_callback_lock); + sock->sk->sk_data_ready = sock->sk->sk_user_data; + sock->sk->sk_user_data = NULL; + write_unlock_bh(&sock->sk->sk_callback_lock); + + for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) { + struct o2nm_node *node = o2nm_get_node_by_num(i); + if (node) { + o2net_disconnect_node(node); + o2nm_node_put(node); + } + } + + /* finish all work and tear down the work queue */ + mlog(ML_KTHREAD, "waiting for o2net thread to exit....\n"); + destroy_workqueue(o2net_wq); + o2net_wq = NULL; + + sock_release(o2net_listen_sock); + o2net_listen_sock = NULL; + + o2quo_conn_err(node->nd_num); +} + +/* ------------------------------------------------------------ */ + +int o2net_init(void) +{ + unsigned long i; + + o2quo_init(); + + if (o2net_debugfs_init()) + return -ENOMEM; + + o2net_hand = kzalloc(sizeof(struct o2net_handshake), GFP_KERNEL); + o2net_keep_req = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL); + o2net_keep_resp = kzalloc(sizeof(struct o2net_msg), GFP_KERNEL); + if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) { + kfree(o2net_hand); + kfree(o2net_keep_req); + kfree(o2net_keep_resp); + return -ENOMEM; + } + + o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION); + o2net_hand->connector_id = cpu_to_be64(1); + + o2net_keep_req->magic = cpu_to_be16(O2NET_MSG_KEEP_REQ_MAGIC); + o2net_keep_resp->magic = cpu_to_be16(O2NET_MSG_KEEP_RESP_MAGIC); + + for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) { + struct o2net_node *nn = o2net_nn_from_num(i); + + atomic_set(&nn->nn_timeout, 0); + spin_lock_init(&nn->nn_lock); + INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect); + INIT_DELAYED_WORK(&nn->nn_connect_expired, + o2net_connect_expired); + INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up); + /* until we see hb from a node we'll return einval */ + nn->nn_persistent_error = -ENOTCONN; + init_waitqueue_head(&nn->nn_sc_wq); + idr_init(&nn->nn_status_idr); + INIT_LIST_HEAD(&nn->nn_status_list); + } + + return 0; +} + +void o2net_exit(void) +{ + o2quo_exit(); + kfree(o2net_hand); + kfree(o2net_keep_req); + kfree(o2net_keep_resp); + o2net_debugfs_exit(); +} diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h new file mode 100644 index 0000000..fd6179e --- /dev/null +++ b/fs/ocfs2/cluster/tcp.h @@ -0,0 +1,152 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * tcp.h + * + * Function prototypes + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef O2CLUSTER_TCP_H +#define O2CLUSTER_TCP_H + +#include <linux/socket.h> +#ifdef __KERNEL__ +#include <net/sock.h> +#include <linux/tcp.h> +#else +#include <sys/socket.h> +#endif +#include <linux/inet.h> +#include <linux/in.h> + +struct o2net_msg +{ + __be16 magic; + __be16 data_len; + __be16 msg_type; + __be16 pad1; + __be32 sys_status; + __be32 status; + __be32 key; + __be32 msg_num; + __u8 buf[0]; +}; + +typedef int (o2net_msg_handler_func)(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +typedef void (o2net_post_msg_handler_func)(int status, void *data, + void *ret_data); + +#define O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct o2net_msg)) + +/* same as hb delay, we're waiting for another node to recognize our hb */ +#define O2NET_RECONNECT_DELAY_MS_DEFAULT 2000 + +#define O2NET_KEEPALIVE_DELAY_MS_DEFAULT 2000 +#define O2NET_IDLE_TIMEOUT_MS_DEFAULT 30000 + + +/* TODO: figure this out.... */ +static inline int o2net_link_down(int err, struct socket *sock) +{ + if (sock) { + if (sock->sk->sk_state != TCP_ESTABLISHED && + sock->sk->sk_state != TCP_CLOSE_WAIT) + return 1; + } + + if (err >= 0) + return 0; + switch (err) { + /* ????????????????????????? */ + case -ERESTARTSYS: + case -EBADF: + /* When the server has died, an ICMP port unreachable + * message prompts ECONNREFUSED. */ + case -ECONNREFUSED: + case -ENOTCONN: + case -ECONNRESET: + case -EPIPE: + return 1; + } + return 0; +} + +enum { + O2NET_DRIVER_UNINITED, + O2NET_DRIVER_READY, +}; + +int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len, + u8 target_node, int *status); +int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec, + size_t veclen, u8 target_node, int *status); + +int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, + o2net_msg_handler_func *func, void *data, + o2net_post_msg_handler_func *post_func, + struct list_head *unreg_list); +void o2net_unregister_handler_list(struct list_head *list); + +struct o2nm_node; +int o2net_register_hb_callbacks(void); +void o2net_unregister_hb_callbacks(void); +int o2net_start_listening(struct o2nm_node *node); +void o2net_stop_listening(struct o2nm_node *node); +void o2net_disconnect_node(struct o2nm_node *node); +int o2net_num_connected_peers(void); + +int o2net_init(void); +void o2net_exit(void); + +struct o2net_send_tracking; +struct o2net_sock_container; + +#ifdef CONFIG_DEBUG_FS +int o2net_debugfs_init(void); +void o2net_debugfs_exit(void); +void o2net_debug_add_nst(struct o2net_send_tracking *nst); +void o2net_debug_del_nst(struct o2net_send_tracking *nst); +void o2net_debug_add_sc(struct o2net_sock_container *sc); +void o2net_debug_del_sc(struct o2net_sock_container *sc); +#else +static inline int o2net_debugfs_init(void) +{ + return 0; +} +static inline void o2net_debugfs_exit(void) +{ +} +static inline void o2net_debug_add_nst(struct o2net_send_tracking *nst) +{ +} +static inline void o2net_debug_del_nst(struct o2net_send_tracking *nst) +{ +} +static inline void o2net_debug_add_sc(struct o2net_sock_container *sc) +{ +} +static inline void o2net_debug_del_sc(struct o2net_sock_container *sc) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* O2CLUSTER_TCP_H */ diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h new file mode 100644 index 0000000..8d58cfe --- /dev/null +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -0,0 +1,233 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef O2CLUSTER_TCP_INTERNAL_H +#define O2CLUSTER_TCP_INTERNAL_H + +#define O2NET_MSG_MAGIC ((u16)0xfa55) +#define O2NET_MSG_STATUS_MAGIC ((u16)0xfa56) +#define O2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57) +#define O2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58) + +/* we're delaying our quorum decision so that heartbeat will have timed + * out truly dead nodes by the time we come around to making decisions + * on their number */ +#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) + +/* + * This version number represents quite a lot, unfortunately. It not + * only represents the raw network message protocol on the wire but also + * locking semantics of the file system using the protocol. It should + * be somewhere else, I'm sure, but right now it isn't. + * + * With version 11, we separate out the filesystem locking portion. The + * filesystem now has a major.minor version it negotiates. Version 11 + * introduces this negotiation to the o2dlm protocol, and as such the + * version here in tcp_internal.h should not need to be bumped for + * filesystem locking changes. + * + * New in version 11 + * - Negotiation of filesystem locking in the dlm join. + * + * New in version 10: + * - Meta/data locks combined + * + * New in version 9: + * - All votes removed + * + * New in version 8: + * - Replace delete inode votes with a cluster lock + * + * New in version 7: + * - DLM join domain includes the live nodemap + * + * New in version 6: + * - DLM lockres remote refcount fixes. + * + * New in version 5: + * - Network timeout checking protocol + * + * New in version 4: + * - Remove i_generation from lock names for better stat performance. + * + * New in version 3: + * - Replace dentry votes with a cluster lock + * + * New in version 2: + * - full 64 bit i_size in the metadata lock lvbs + * - introduction of "rw" lock and pushing meta/data locking down + */ +#define O2NET_PROTOCOL_VERSION 11ULL +struct o2net_handshake { + __be64 protocol_version; + __be64 connector_id; + __be32 o2hb_heartbeat_timeout_ms; + __be32 o2net_idle_timeout_ms; + __be32 o2net_keepalive_delay_ms; + __be32 o2net_reconnect_delay_ms; +}; + +struct o2net_node { + /* this is never called from int/bh */ + spinlock_t nn_lock; + + /* set the moment an sc is allocated and a connect is started */ + struct o2net_sock_container *nn_sc; + /* _valid is only set after the handshake passes and tx can happen */ + unsigned nn_sc_valid:1; + /* if this is set tx just returns it */ + int nn_persistent_error; + /* It is only set to 1 after the idle time out. */ + atomic_t nn_timeout; + + /* threads waiting for an sc to arrive wait on the wq for generation + * to increase. it is increased when a connecting socket succeeds + * or fails or when an accepted socket is attached. */ + wait_queue_head_t nn_sc_wq; + + struct idr nn_status_idr; + struct list_head nn_status_list; + + /* connects are attempted from when heartbeat comes up until either hb + * goes down, the node is unconfigured, no connect attempts succeed + * before O2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work + * is queued from set_nn_state both from hb up and from itself if a + * connect attempt fails and so can be self-arming. shutdown is + * careful to first mark the nn such that no connects will be attempted + * before canceling delayed connect work and flushing the queue. */ + struct delayed_work nn_connect_work; + unsigned long nn_last_connect_attempt; + + /* this is queued as nodes come up and is canceled when a connection is + * established. this expiring gives up on the node and errors out + * transmits */ + struct delayed_work nn_connect_expired; + + /* after we give up on a socket we wait a while before deciding + * that it is still heartbeating and that we should do some + * quorum work */ + struct delayed_work nn_still_up; +}; + +struct o2net_sock_container { + struct kref sc_kref; + /* the next two are vaild for the life time of the sc */ + struct socket *sc_sock; + struct o2nm_node *sc_node; + + /* all of these sc work structs hold refs on the sc while they are + * queued. they should not be able to ref a freed sc. the teardown + * race is with o2net_wq destruction in o2net_stop_listening() */ + + /* rx and connect work are generated from socket callbacks. sc + * shutdown removes the callbacks and then flushes the work queue */ + struct work_struct sc_rx_work; + struct work_struct sc_connect_work; + /* shutdown work is triggered in two ways. the simple way is + * for a code path calls ensure_shutdown which gets a lock, removes + * the sc from the nn, and queues the work. in this case the + * work is single-shot. the work is also queued from a sock + * callback, though, and in this case the work will find the sc + * still on the nn and will call ensure_shutdown itself.. this + * ends up triggering the shutdown work again, though nothing + * will be done in that second iteration. so work queue teardown + * has to be careful to remove the sc from the nn before waiting + * on the work queue so that the shutdown work doesn't remove the + * sc and rearm itself. + */ + struct work_struct sc_shutdown_work; + + struct timer_list sc_idle_timeout; + struct delayed_work sc_keepalive_work; + + unsigned sc_handshake_ok:1; + + struct page *sc_page; + size_t sc_page_off; + + /* original handlers for the sockets */ + void (*sc_state_change)(struct sock *sk); + void (*sc_data_ready)(struct sock *sk, int bytes); +#ifdef CONFIG_DEBUG_FS + struct list_head sc_net_debug_item; +#endif + struct timeval sc_tv_timer; + struct timeval sc_tv_data_ready; + struct timeval sc_tv_advance_start; + struct timeval sc_tv_advance_stop; + struct timeval sc_tv_func_start; + struct timeval sc_tv_func_stop; + u32 sc_msg_key; + u16 sc_msg_type; + + struct mutex sc_send_lock; +}; + +struct o2net_msg_handler { + struct rb_node nh_node; + u32 nh_max_len; + u32 nh_msg_type; + u32 nh_key; + o2net_msg_handler_func *nh_func; + o2net_msg_handler_func *nh_func_data; + o2net_post_msg_handler_func + *nh_post_func; + struct kref nh_kref; + struct list_head nh_unregister_item; +}; + +enum o2net_system_error { + O2NET_ERR_NONE = 0, + O2NET_ERR_NO_HNDLR, + O2NET_ERR_OVERFLOW, + O2NET_ERR_DIED, + O2NET_ERR_MAX +}; + +struct o2net_status_wait { + enum o2net_system_error ns_sys_status; + s32 ns_status; + int ns_id; + wait_queue_head_t ns_wq; + struct list_head ns_node_item; +}; + +#ifdef CONFIG_DEBUG_FS +/* just for state dumps */ +struct o2net_send_tracking { + struct list_head st_net_debug_item; + struct task_struct *st_task; + struct o2net_sock_container *st_sc; + u32 st_id; + u32 st_msg_type; + u32 st_msg_key; + u8 st_node; + struct timeval st_sock_time; + struct timeval st_send_time; + struct timeval st_status_time; +}; +#else +struct o2net_send_tracking { + u32 dummy; +}; +#endif /* CONFIG_DEBUG_FS */ + +#endif /* O2CLUSTER_TCP_INTERNAL_H */ diff --git a/fs/ocfs2/cluster/ver.c b/fs/ocfs2/cluster/ver.c new file mode 100644 index 0000000..a56eee6 --- /dev/null +++ b/fs/ocfs2/cluster/ver.c @@ -0,0 +1,42 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ver.c + * + * version string + * + * Copyright (C) 2002, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/module.h> +#include <linux/kernel.h> + +#include "ver.h" + +#define CLUSTER_BUILD_VERSION "1.5.0" + +#define VERSION_STR "OCFS2 Node Manager " CLUSTER_BUILD_VERSION + +void cluster_print_version(void) +{ + printk(KERN_INFO "%s\n", VERSION_STR); +} + +MODULE_DESCRIPTION(VERSION_STR); + +MODULE_VERSION(CLUSTER_BUILD_VERSION); diff --git a/fs/ocfs2/cluster/ver.h b/fs/ocfs2/cluster/ver.h new file mode 100644 index 0000000..32554c3 --- /dev/null +++ b/fs/ocfs2/cluster/ver.h @@ -0,0 +1,31 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ver.h + * + * Function prototypes + * + * Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef O2CLUSTER_VER_H +#define O2CLUSTER_VER_H + +void cluster_print_version(void); + +#endif /* O2CLUSTER_VER_H */ diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c new file mode 100644 index 0000000..b1cc7c3 --- /dev/null +++ b/fs/ocfs2/dcache.c @@ -0,0 +1,425 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dcache.c + * + * dentry cache handling code + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/namei.h> + +#define MLOG_MASK_PREFIX ML_DCACHE +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dcache.h" +#include "dlmglue.h" +#include "file.h" +#include "inode.h" + + +static int ocfs2_dentry_revalidate(struct dentry *dentry, + struct nameidata *nd) +{ + struct inode *inode = dentry->d_inode; + int ret = 0; /* if all else fails, just return false */ + struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); + + mlog_entry("(0x%p, '%.*s')\n", dentry, + dentry->d_name.len, dentry->d_name.name); + + /* Never trust a negative dentry - force a new lookup. */ + if (inode == NULL) { + mlog(0, "negative dentry: %.*s\n", dentry->d_name.len, + dentry->d_name.name); + goto bail; + } + + BUG_ON(!osb); + + if (inode == osb->root_inode || is_bad_inode(inode)) + goto bail; + + spin_lock(&OCFS2_I(inode)->ip_lock); + /* did we or someone else delete this inode? */ + if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { + spin_unlock(&OCFS2_I(inode)->ip_lock); + mlog(0, "inode (%llu) deleted, returning false\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + goto bail; + } + spin_unlock(&OCFS2_I(inode)->ip_lock); + + /* + * We don't need a cluster lock to test this because once an + * inode nlink hits zero, it never goes back. + */ + if (inode->i_nlink == 0) { + mlog(0, "Inode %llu orphaned, returning false " + "dir = %d\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + S_ISDIR(inode->i_mode)); + goto bail; + } + + ret = 1; + +bail: + mlog_exit(ret); + + return ret; +} + +static int ocfs2_match_dentry(struct dentry *dentry, + u64 parent_blkno, + int skip_unhashed) +{ + struct inode *parent; + + /* + * ocfs2_lookup() does a d_splice_alias() _before_ attaching + * to the lock data, so we skip those here, otherwise + * ocfs2_dentry_attach_lock() will get its original dentry + * back. + */ + if (!dentry->d_fsdata) + return 0; + + if (!dentry->d_parent) + return 0; + + if (skip_unhashed && d_unhashed(dentry)) + return 0; + + parent = dentry->d_parent->d_inode; + /* Negative parent dentry? */ + if (!parent) + return 0; + + /* Name is in a different directory. */ + if (OCFS2_I(parent)->ip_blkno != parent_blkno) + return 0; + + return 1; +} + +/* + * Walk the inode alias list, and find a dentry which has a given + * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it + * is looking for a dentry_lock reference. The downconvert thread is + * looking to unhash aliases, so we allow it to skip any that already + * have that property. + */ +struct dentry *ocfs2_find_local_alias(struct inode *inode, + u64 parent_blkno, + int skip_unhashed) +{ + struct list_head *p; + struct dentry *dentry = NULL; + + spin_lock(&dcache_lock); + + list_for_each(p, &inode->i_dentry) { + dentry = list_entry(p, struct dentry, d_alias); + + if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { + mlog(0, "dentry found: %.*s\n", + dentry->d_name.len, dentry->d_name.name); + + dget_locked(dentry); + break; + } + + dentry = NULL; + } + + spin_unlock(&dcache_lock); + + return dentry; +} + +DEFINE_SPINLOCK(dentry_attach_lock); + +/* + * Attach this dentry to a cluster lock. + * + * Dentry locks cover all links in a given directory to a particular + * inode. We do this so that ocfs2 can build a lock name which all + * nodes in the cluster can agree on at all times. Shoving full names + * in the cluster lock won't work due to size restrictions. Covering + * links inside of a directory is a good compromise because it still + * allows us to use the parent directory lock to synchronize + * operations. + * + * Call this function with the parent dir semaphore and the parent dir + * cluster lock held. + * + * The dir semaphore will protect us from having to worry about + * concurrent processes on our node trying to attach a lock at the + * same time. + * + * The dir cluster lock (held at either PR or EX mode) protects us + * from unlink and rename on other nodes. + * + * A dput() can happen asynchronously due to pruning, so we cover + * attaching and detaching the dentry lock with a + * dentry_attach_lock. + * + * A node which has done lookup on a name retains a protected read + * lock until final dput. If the user requests and unlink or rename, + * the protected read is upgraded to an exclusive lock. Other nodes + * who have seen the dentry will then be informed that they need to + * downgrade their lock, which will involve d_delete on the + * dentry. This happens in ocfs2_dentry_convert_worker(). + */ +int ocfs2_dentry_attach_lock(struct dentry *dentry, + struct inode *inode, + u64 parent_blkno) +{ + int ret; + struct dentry *alias; + struct ocfs2_dentry_lock *dl = dentry->d_fsdata; + + mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n", + dentry->d_name.len, dentry->d_name.name, + (unsigned long long)parent_blkno, dl); + + /* + * Negative dentry. We ignore these for now. + * + * XXX: Could we can improve ocfs2_dentry_revalidate() by + * tracking these? + */ + if (!inode) + return 0; + + if (dl) { + mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, + " \"%.*s\": old parent: %llu, new: %llu\n", + dentry->d_name.len, dentry->d_name.name, + (unsigned long long)parent_blkno, + (unsigned long long)dl->dl_parent_blkno); + return 0; + } + + alias = ocfs2_find_local_alias(inode, parent_blkno, 0); + if (alias) { + /* + * Great, an alias exists, which means we must have a + * dentry lock already. We can just grab the lock off + * the alias and add it to the list. + * + * We're depending here on the fact that this dentry + * was found and exists in the dcache and so must have + * a reference to the dentry_lock because we can't + * race creates. Final dput() cannot happen on it + * since we have it pinned, so our reference is safe. + */ + dl = alias->d_fsdata; + mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n", + (unsigned long long)parent_blkno, + (unsigned long long)OCFS2_I(inode)->ip_blkno); + + mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, + " \"%.*s\": old parent: %llu, new: %llu\n", + dentry->d_name.len, dentry->d_name.name, + (unsigned long long)parent_blkno, + (unsigned long long)dl->dl_parent_blkno); + + mlog(0, "Found: %s\n", dl->dl_lockres.l_name); + + goto out_attach; + } + + /* + * There are no other aliases + */ + dl = kmalloc(sizeof(*dl), GFP_NOFS); + if (!dl) { + ret = -ENOMEM; + mlog_errno(ret); + return ret; + } + + dl->dl_count = 0; + /* + * Does this have to happen below, for all attaches, in case + * the struct inode gets blown away by the downconvert thread? + */ + dl->dl_inode = igrab(inode); + dl->dl_parent_blkno = parent_blkno; + ocfs2_dentry_lock_res_init(dl, parent_blkno, inode); + +out_attach: + spin_lock(&dentry_attach_lock); + dentry->d_fsdata = dl; + dl->dl_count++; + spin_unlock(&dentry_attach_lock); + + /* + * This actually gets us our PRMODE level lock. From now on, + * we'll have a notification if one of these names is + * destroyed on another node. + */ + ret = ocfs2_dentry_lock(dentry, 0); + if (!ret) + ocfs2_dentry_unlock(dentry, 0); + else + mlog_errno(ret); + + dput(alias); + + return ret; +} + +/* + * ocfs2_dentry_iput() and friends. + * + * At this point, our particular dentry is detached from the inodes + * alias list, so there's no way that the locking code can find it. + * + * The interesting stuff happens when we determine that our lock needs + * to go away because this is the last subdir alias in the + * system. This function needs to handle a couple things: + * + * 1) Synchronizing lock shutdown with the downconvert threads. This + * is already handled for us via the lockres release drop function + * called in ocfs2_release_dentry_lock() + * + * 2) A race may occur when we're doing our lock shutdown and + * another process wants to create a new dentry lock. Right now we + * let them race, which means that for a very short while, this + * node might have two locks on a lock resource. This should be a + * problem though because one of them is in the process of being + * thrown out. + */ +static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, + struct ocfs2_dentry_lock *dl) +{ + iput(dl->dl_inode); + ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); + ocfs2_lock_res_free(&dl->dl_lockres); + kfree(dl); +} + +void ocfs2_dentry_lock_put(struct ocfs2_super *osb, + struct ocfs2_dentry_lock *dl) +{ + int unlock = 0; + + BUG_ON(dl->dl_count == 0); + + spin_lock(&dentry_attach_lock); + dl->dl_count--; + unlock = !dl->dl_count; + spin_unlock(&dentry_attach_lock); + + if (unlock) + ocfs2_drop_dentry_lock(osb, dl); +} + +static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode) +{ + struct ocfs2_dentry_lock *dl = dentry->d_fsdata; + + if (!dl) { + /* + * No dentry lock is ok if we're disconnected or + * unhashed. + */ + if (!(dentry->d_flags & DCACHE_DISCONNECTED) && + !d_unhashed(dentry)) { + unsigned long long ino = 0ULL; + if (inode) + ino = (unsigned long long)OCFS2_I(inode)->ip_blkno; + mlog(ML_ERROR, "Dentry is missing cluster lock. " + "inode: %llu, d_flags: 0x%x, d_name: %.*s\n", + ino, dentry->d_flags, dentry->d_name.len, + dentry->d_name.name); + } + + goto out; + } + + mlog_bug_on_msg(dl->dl_count == 0, "dentry: %.*s, count: %u\n", + dentry->d_name.len, dentry->d_name.name, + dl->dl_count); + + ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl); + +out: + iput(inode); +} + +/* + * d_move(), but keep the locks in sync. + * + * When we are done, "dentry" will have the parent dir and name of + * "target", which will be thrown away. + * + * We manually update the lock of "dentry" if need be. + * + * "target" doesn't have it's dentry lock touched - we allow the later + * dput() to handle this for us. + * + * This is called during ocfs2_rename(), while holding parent + * directory locks. The dentries have already been deleted on other + * nodes via ocfs2_remote_dentry_delete(). + * + * Normally, the VFS handles the d_move() for the file system, after + * the ->rename() callback. OCFS2 wants to handle this internally, so + * the new lock can be created atomically with respect to the cluster. + */ +void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, + struct inode *old_dir, struct inode *new_dir) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb); + struct inode *inode = dentry->d_inode; + + /* + * Move within the same directory, so the actual lock info won't + * change. + * + * XXX: Is there any advantage to dropping the lock here? + */ + if (old_dir == new_dir) + goto out_move; + + ocfs2_dentry_lock_put(osb, dentry->d_fsdata); + + dentry->d_fsdata = NULL; + ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno); + if (ret) + mlog_errno(ret); + +out_move: + d_move(dentry, target); +} + +struct dentry_operations ocfs2_dentry_ops = { + .d_revalidate = ocfs2_dentry_revalidate, + .d_iput = ocfs2_dentry_iput, +}; diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h new file mode 100644 index 0000000..c091c34 --- /dev/null +++ b/fs/ocfs2/dcache.h @@ -0,0 +1,58 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dcache.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_DCACHE_H +#define OCFS2_DCACHE_H + +extern struct dentry_operations ocfs2_dentry_ops; + +struct ocfs2_dentry_lock { + unsigned int dl_count; + u64 dl_parent_blkno; + + /* + * The ocfs2_dentry_lock keeps an inode reference until + * dl_lockres has been destroyed. This is usually done in + * ->d_iput() anyway, so there should be minimal impact. + */ + struct inode *dl_inode; + struct ocfs2_lock_res dl_lockres; +}; + +int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode, + u64 parent_blkno); + +void ocfs2_dentry_lock_put(struct ocfs2_super *osb, + struct ocfs2_dentry_lock *dl); + +struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno, + int skip_unhashed); + +void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, + struct inode *old_dir, struct inode *new_dir); + +extern spinlock_t dentry_attach_lock; + +#endif /* OCFS2_DCACHE_H */ diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c new file mode 100644 index 0000000..026e6eb --- /dev/null +++ b/fs/ocfs2/dir.c @@ -0,0 +1,1799 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dir.c + * + * Creates, reads, walks and deletes directory-nodes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * Portions of this code from linux/fs/ext3/dir.c + * + * Copyright (C) 1992, 1993, 1994, 1995 + * Remy Card (card@masi.ibp.fr) + * Laboratoire MASI - Institut Blaise pascal + * Universite Pierre et Marie Curie (Paris VI) + * + * from + * + * linux/fs/minix/dir.c + * + * Copyright (C) 1991, 1992 Linux Torvalds + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> + +#define MLOG_MASK_PREFIX ML_NAMEI +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dir.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "file.h" +#include "inode.h" +#include "journal.h" +#include "namei.h" +#include "suballoc.h" +#include "super.h" +#include "uptodate.h" + +#include "buffer_head_io.h" + +#define NAMEI_RA_CHUNKS 2 +#define NAMEI_RA_BLOCKS 4 +#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) +#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) + +static unsigned char ocfs2_filetype_table[] = { + DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK +}; + +static int ocfs2_extend_dir(struct ocfs2_super *osb, + struct inode *dir, + struct buffer_head *parent_fe_bh, + unsigned int blocks_wanted, + struct buffer_head **new_de_bh); +static int ocfs2_do_extend_dir(struct super_block *sb, + handle_t *handle, + struct inode *dir, + struct buffer_head *parent_fe_bh, + struct ocfs2_alloc_context *data_ac, + struct ocfs2_alloc_context *meta_ac, + struct buffer_head **new_bh); + +static struct buffer_head *ocfs2_bread(struct inode *inode, + int block, int *err, int reada) +{ + struct buffer_head *bh = NULL; + int tmperr; + u64 p_blkno; + int readflags = 0; + + if (reada) + readflags |= OCFS2_BH_READAHEAD; + + if (((u64)block << inode->i_sb->s_blocksize_bits) >= + i_size_read(inode)) { + BUG_ON(!reada); + return NULL; + } + + down_read(&OCFS2_I(inode)->ip_alloc_sem); + tmperr = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, + NULL); + up_read(&OCFS2_I(inode)->ip_alloc_sem); + if (tmperr < 0) { + mlog_errno(tmperr); + goto fail; + } + + tmperr = ocfs2_read_blocks(inode, p_blkno, 1, &bh, readflags); + if (tmperr < 0) + goto fail; + + tmperr = 0; + + *err = 0; + return bh; + +fail: + brelse(bh); + bh = NULL; + + *err = -EIO; + return NULL; +} + +/* + * bh passed here can be an inode block or a dir data block, depending + * on the inode inline data flag. + */ +static int ocfs2_check_dir_entry(struct inode * dir, + struct ocfs2_dir_entry * de, + struct buffer_head * bh, + unsigned long offset) +{ + const char *error_msg = NULL; + const int rlen = le16_to_cpu(de->rec_len); + + if (rlen < OCFS2_DIR_REC_LEN(1)) + error_msg = "rec_len is smaller than minimal"; + else if (rlen % 4 != 0) + error_msg = "rec_len % 4 != 0"; + else if (rlen < OCFS2_DIR_REC_LEN(de->name_len)) + error_msg = "rec_len is too small for name_len"; + else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) + error_msg = "directory entry across blocks"; + + if (error_msg != NULL) + mlog(ML_ERROR, "bad entry in directory #%llu: %s - " + "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n", + (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg, + offset, (unsigned long long)le64_to_cpu(de->inode), rlen, + de->name_len); + return error_msg == NULL ? 1 : 0; +} + +static inline int ocfs2_match(int len, + const char * const name, + struct ocfs2_dir_entry *de) +{ + if (len != de->name_len) + return 0; + if (!de->inode) + return 0; + return !memcmp(name, de->name, len); +} + +/* + * Returns 0 if not found, -1 on failure, and 1 on success + */ +static int inline ocfs2_search_dirblock(struct buffer_head *bh, + struct inode *dir, + const char *name, int namelen, + unsigned long offset, + char *first_de, + unsigned int bytes, + struct ocfs2_dir_entry **res_dir) +{ + struct ocfs2_dir_entry *de; + char *dlimit, *de_buf; + int de_len; + int ret = 0; + + mlog_entry_void(); + + de_buf = first_de; + dlimit = de_buf + bytes; + + while (de_buf < dlimit) { + /* this code is executed quadratically often */ + /* do minimal checking `by hand' */ + + de = (struct ocfs2_dir_entry *) de_buf; + + if (de_buf + namelen <= dlimit && + ocfs2_match(namelen, name, de)) { + /* found a match - just to be sure, do a full check */ + if (!ocfs2_check_dir_entry(dir, de, bh, offset)) { + ret = -1; + goto bail; + } + *res_dir = de; + ret = 1; + goto bail; + } + + /* prevent looping on a bad block */ + de_len = le16_to_cpu(de->rec_len); + if (de_len <= 0) { + ret = -1; + goto bail; + } + + de_buf += de_len; + offset += de_len; + } + +bail: + mlog_exit(ret); + return ret; +} + +static struct buffer_head *ocfs2_find_entry_id(const char *name, + int namelen, + struct inode *dir, + struct ocfs2_dir_entry **res_dir) +{ + int ret, found; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_inline_data *data; + + ret = ocfs2_read_block(dir, OCFS2_I(dir)->ip_blkno, &di_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + di = (struct ocfs2_dinode *)di_bh->b_data; + data = &di->id2.i_data; + + found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0, + data->id_data, i_size_read(dir), res_dir); + if (found == 1) + return di_bh; + + brelse(di_bh); +out: + return NULL; +} + +static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen, + struct inode *dir, + struct ocfs2_dir_entry **res_dir) +{ + struct super_block *sb; + struct buffer_head *bh_use[NAMEI_RA_SIZE]; + struct buffer_head *bh, *ret = NULL; + unsigned long start, block, b; + int ra_max = 0; /* Number of bh's in the readahead + buffer, bh_use[] */ + int ra_ptr = 0; /* Current index into readahead + buffer */ + int num = 0; + int nblocks, i, err; + + mlog_entry_void(); + + sb = dir->i_sb; + + nblocks = i_size_read(dir) >> sb->s_blocksize_bits; + start = OCFS2_I(dir)->ip_dir_start_lookup; + if (start >= nblocks) + start = 0; + block = start; + +restart: + do { + /* + * We deal with the read-ahead logic here. + */ + if (ra_ptr >= ra_max) { + /* Refill the readahead buffer */ + ra_ptr = 0; + b = block; + for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { + /* + * Terminate if we reach the end of the + * directory and must wrap, or if our + * search has finished at this block. + */ + if (b >= nblocks || (num && block == start)) { + bh_use[ra_max] = NULL; + break; + } + num++; + + bh = ocfs2_bread(dir, b++, &err, 1); + bh_use[ra_max] = bh; + } + } + if ((bh = bh_use[ra_ptr++]) == NULL) + goto next; + if (ocfs2_read_block(dir, block, &bh)) { + /* read error, skip block & hope for the best. + * ocfs2_read_block() has released the bh. */ + ocfs2_error(dir->i_sb, "reading directory %llu, " + "offset %lu\n", + (unsigned long long)OCFS2_I(dir)->ip_blkno, + block); + goto next; + } + i = ocfs2_search_dirblock(bh, dir, name, namelen, + block << sb->s_blocksize_bits, + bh->b_data, sb->s_blocksize, + res_dir); + if (i == 1) { + OCFS2_I(dir)->ip_dir_start_lookup = block; + ret = bh; + goto cleanup_and_exit; + } else { + brelse(bh); + if (i < 0) + goto cleanup_and_exit; + } + next: + if (++block >= nblocks) + block = 0; + } while (block != start); + + /* + * If the directory has grown while we were searching, then + * search the last part of the directory before giving up. + */ + block = nblocks; + nblocks = i_size_read(dir) >> sb->s_blocksize_bits; + if (block < nblocks) { + start = 0; + goto restart; + } + +cleanup_and_exit: + /* Clean up the read-ahead blocks */ + for (; ra_ptr < ra_max; ra_ptr++) + brelse(bh_use[ra_ptr]); + + mlog_exit_ptr(ret); + return ret; +} + +/* + * Try to find an entry of the provided name within 'dir'. + * + * If nothing was found, NULL is returned. Otherwise, a buffer_head + * and pointer to the dir entry are passed back. + * + * Caller can NOT assume anything about the contents of the + * buffer_head - it is passed back only so that it can be passed into + * any one of the manipulation functions (add entry, delete entry, + * etc). As an example, bh in the extent directory case is a data + * block, in the inline-data case it actually points to an inode. + */ +struct buffer_head *ocfs2_find_entry(const char *name, int namelen, + struct inode *dir, + struct ocfs2_dir_entry **res_dir) +{ + *res_dir = NULL; + + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + return ocfs2_find_entry_id(name, namelen, dir, res_dir); + + return ocfs2_find_entry_el(name, namelen, dir, res_dir); +} + +/* + * Update inode number and type of a previously found directory entry. + */ +int ocfs2_update_entry(struct inode *dir, handle_t *handle, + struct buffer_head *de_bh, struct ocfs2_dir_entry *de, + struct inode *new_entry_inode) +{ + int ret; + + /* + * The same code works fine for both inline-data and extent + * based directories, so no need to split this up. + */ + + ret = ocfs2_journal_access(handle, dir, de_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno); + ocfs2_set_de_type(de, new_entry_inode->i_mode); + + ocfs2_journal_dirty(handle, de_bh); + +out: + return ret; +} + +static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, + struct ocfs2_dir_entry *de_del, + struct buffer_head *bh, char *first_de, + unsigned int bytes) +{ + struct ocfs2_dir_entry *de, *pde; + int i, status = -ENOENT; + + mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh); + + i = 0; + pde = NULL; + de = (struct ocfs2_dir_entry *) first_de; + while (i < bytes) { + if (!ocfs2_check_dir_entry(dir, de, bh, i)) { + status = -EIO; + mlog_errno(status); + goto bail; + } + if (de == de_del) { + status = ocfs2_journal_access(handle, dir, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + status = -EIO; + mlog_errno(status); + goto bail; + } + if (pde) + le16_add_cpu(&pde->rec_len, + le16_to_cpu(de->rec_len)); + else + de->inode = 0; + dir->i_version++; + status = ocfs2_journal_dirty(handle, bh); + goto bail; + } + i += le16_to_cpu(de->rec_len); + pde = de; + de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len)); + } +bail: + mlog_exit(status); + return status; +} + +static inline int ocfs2_delete_entry_id(handle_t *handle, + struct inode *dir, + struct ocfs2_dir_entry *de_del, + struct buffer_head *bh) +{ + int ret; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_inline_data *data; + + ret = ocfs2_read_block(dir, OCFS2_I(dir)->ip_blkno, &di_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + di = (struct ocfs2_dinode *)di_bh->b_data; + data = &di->id2.i_data; + + ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data, + i_size_read(dir)); + + brelse(di_bh); +out: + return ret; +} + +static inline int ocfs2_delete_entry_el(handle_t *handle, + struct inode *dir, + struct ocfs2_dir_entry *de_del, + struct buffer_head *bh) +{ + return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data, + bh->b_size); +} + +/* + * ocfs2_delete_entry deletes a directory entry by merging it with the + * previous entry + */ +int ocfs2_delete_entry(handle_t *handle, + struct inode *dir, + struct ocfs2_dir_entry *de_del, + struct buffer_head *bh) +{ + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + return ocfs2_delete_entry_id(handle, dir, de_del, bh); + + return ocfs2_delete_entry_el(handle, dir, de_del, bh); +} + +/* + * Check whether 'de' has enough room to hold an entry of + * 'new_rec_len' bytes. + */ +static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de, + unsigned int new_rec_len) +{ + unsigned int de_really_used; + + /* Check whether this is an empty record with enough space */ + if (le64_to_cpu(de->inode) == 0 && + le16_to_cpu(de->rec_len) >= new_rec_len) + return 1; + + /* + * Record might have free space at the end which we can + * use. + */ + de_really_used = OCFS2_DIR_REC_LEN(de->name_len); + if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len)) + return 1; + + return 0; +} + +/* we don't always have a dentry for what we want to add, so people + * like orphan dir can call this instead. + * + * If you pass me insert_bh, I'll skip the search of the other dir + * blocks and put the record in there. + */ +int __ocfs2_add_entry(handle_t *handle, + struct inode *dir, + const char *name, int namelen, + struct inode *inode, u64 blkno, + struct buffer_head *parent_fe_bh, + struct buffer_head *insert_bh) +{ + unsigned long offset; + unsigned short rec_len; + struct ocfs2_dir_entry *de, *de1; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data; + struct super_block *sb = dir->i_sb; + int retval, status; + unsigned int size = sb->s_blocksize; + char *data_start = insert_bh->b_data; + + mlog_entry_void(); + + if (!namelen) + return -EINVAL; + + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + data_start = di->id2.i_data.id_data; + size = i_size_read(dir); + + BUG_ON(insert_bh != parent_fe_bh); + } + + rec_len = OCFS2_DIR_REC_LEN(namelen); + offset = 0; + de = (struct ocfs2_dir_entry *) data_start; + while (1) { + BUG_ON((char *)de >= (size + data_start)); + + /* These checks should've already been passed by the + * prepare function, but I guess we can leave them + * here anyway. */ + if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) { + retval = -ENOENT; + goto bail; + } + if (ocfs2_match(namelen, name, de)) { + retval = -EEXIST; + goto bail; + } + + if (ocfs2_dirent_would_fit(de, rec_len)) { + dir->i_mtime = dir->i_ctime = CURRENT_TIME; + retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh); + if (retval < 0) { + mlog_errno(retval); + goto bail; + } + + status = ocfs2_journal_access(handle, dir, insert_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + /* By now the buffer is marked for journaling */ + offset += le16_to_cpu(de->rec_len); + if (le64_to_cpu(de->inode)) { + de1 = (struct ocfs2_dir_entry *)((char *) de + + OCFS2_DIR_REC_LEN(de->name_len)); + de1->rec_len = + cpu_to_le16(le16_to_cpu(de->rec_len) - + OCFS2_DIR_REC_LEN(de->name_len)); + de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len)); + de = de1; + } + de->file_type = OCFS2_FT_UNKNOWN; + if (blkno) { + de->inode = cpu_to_le64(blkno); + ocfs2_set_de_type(de, inode->i_mode); + } else + de->inode = 0; + de->name_len = namelen; + memcpy(de->name, name, namelen); + + dir->i_version++; + status = ocfs2_journal_dirty(handle, insert_bh); + retval = 0; + goto bail; + } + offset += le16_to_cpu(de->rec_len); + de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len)); + } + + /* when you think about it, the assert above should prevent us + * from ever getting here. */ + retval = -ENOSPC; +bail: + + mlog_exit(retval); + return retval; +} + +static int ocfs2_dir_foreach_blk_id(struct inode *inode, + u64 *f_version, + loff_t *f_pos, void *priv, + filldir_t filldir, int *filldir_err) +{ + int ret, i, filldir_ret; + unsigned long offset = *f_pos; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_inline_data *data; + struct ocfs2_dir_entry *de; + + ret = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &di_bh); + if (ret) { + mlog(ML_ERROR, "Unable to read inode block for dir %llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + goto out; + } + + di = (struct ocfs2_dinode *)di_bh->b_data; + data = &di->id2.i_data; + + while (*f_pos < i_size_read(inode)) { +revalidate: + /* If the dir block has changed since the last call to + * readdir(2), then we might be pointing to an invalid + * dirent right now. Scan from the start of the block + * to make sure. */ + if (*f_version != inode->i_version) { + for (i = 0; i < i_size_read(inode) && i < offset; ) { + de = (struct ocfs2_dir_entry *) + (data->id_data + i); + /* It's too expensive to do a full + * dirent test each time round this + * loop, but we do have to test at + * least that it is non-zero. A + * failure will be detected in the + * dirent test below. */ + if (le16_to_cpu(de->rec_len) < + OCFS2_DIR_REC_LEN(1)) + break; + i += le16_to_cpu(de->rec_len); + } + *f_pos = offset = i; + *f_version = inode->i_version; + } + + de = (struct ocfs2_dir_entry *) (data->id_data + *f_pos); + if (!ocfs2_check_dir_entry(inode, de, di_bh, *f_pos)) { + /* On error, skip the f_pos to the end. */ + *f_pos = i_size_read(inode); + goto out; + } + offset += le16_to_cpu(de->rec_len); + if (le64_to_cpu(de->inode)) { + /* We might block in the next section + * if the data destination is + * currently swapped out. So, use a + * version stamp to detect whether or + * not the directory has been modified + * during the copy operation. + */ + u64 version = *f_version; + unsigned char d_type = DT_UNKNOWN; + + if (de->file_type < OCFS2_FT_MAX) + d_type = ocfs2_filetype_table[de->file_type]; + + filldir_ret = filldir(priv, de->name, + de->name_len, + *f_pos, + le64_to_cpu(de->inode), + d_type); + if (filldir_ret) { + if (filldir_err) + *filldir_err = filldir_ret; + break; + } + if (version != *f_version) + goto revalidate; + } + *f_pos += le16_to_cpu(de->rec_len); + } + +out: + brelse(di_bh); + + return 0; +} + +static int ocfs2_dir_foreach_blk_el(struct inode *inode, + u64 *f_version, + loff_t *f_pos, void *priv, + filldir_t filldir, int *filldir_err) +{ + int error = 0; + unsigned long offset, blk, last_ra_blk = 0; + int i, stored; + struct buffer_head * bh, * tmp; + struct ocfs2_dir_entry * de; + int err; + struct super_block * sb = inode->i_sb; + unsigned int ra_sectors = 16; + + stored = 0; + bh = NULL; + + offset = (*f_pos) & (sb->s_blocksize - 1); + + while (!error && !stored && *f_pos < i_size_read(inode)) { + blk = (*f_pos) >> sb->s_blocksize_bits; + bh = ocfs2_bread(inode, blk, &err, 0); + if (!bh) { + mlog(ML_ERROR, + "directory #%llu contains a hole at offset %lld\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + *f_pos); + *f_pos += sb->s_blocksize - offset; + continue; + } + + /* The idea here is to begin with 8k read-ahead and to stay + * 4k ahead of our current position. + * + * TODO: Use the pagecache for this. We just need to + * make sure it's cluster-safe... */ + if (!last_ra_blk + || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) { + for (i = ra_sectors >> (sb->s_blocksize_bits - 9); + i > 0; i--) { + tmp = ocfs2_bread(inode, ++blk, &err, 1); + brelse(tmp); + } + last_ra_blk = blk; + ra_sectors = 8; + } + +revalidate: + /* If the dir block has changed since the last call to + * readdir(2), then we might be pointing to an invalid + * dirent right now. Scan from the start of the block + * to make sure. */ + if (*f_version != inode->i_version) { + for (i = 0; i < sb->s_blocksize && i < offset; ) { + de = (struct ocfs2_dir_entry *) (bh->b_data + i); + /* It's too expensive to do a full + * dirent test each time round this + * loop, but we do have to test at + * least that it is non-zero. A + * failure will be detected in the + * dirent test below. */ + if (le16_to_cpu(de->rec_len) < + OCFS2_DIR_REC_LEN(1)) + break; + i += le16_to_cpu(de->rec_len); + } + offset = i; + *f_pos = ((*f_pos) & ~(sb->s_blocksize - 1)) + | offset; + *f_version = inode->i_version; + } + + while (!error && *f_pos < i_size_read(inode) + && offset < sb->s_blocksize) { + de = (struct ocfs2_dir_entry *) (bh->b_data + offset); + if (!ocfs2_check_dir_entry(inode, de, bh, offset)) { + /* On error, skip the f_pos to the + next block. */ + *f_pos = ((*f_pos) | (sb->s_blocksize - 1)) + 1; + brelse(bh); + goto out; + } + offset += le16_to_cpu(de->rec_len); + if (le64_to_cpu(de->inode)) { + /* We might block in the next section + * if the data destination is + * currently swapped out. So, use a + * version stamp to detect whether or + * not the directory has been modified + * during the copy operation. + */ + unsigned long version = *f_version; + unsigned char d_type = DT_UNKNOWN; + + if (de->file_type < OCFS2_FT_MAX) + d_type = ocfs2_filetype_table[de->file_type]; + error = filldir(priv, de->name, + de->name_len, + *f_pos, + le64_to_cpu(de->inode), + d_type); + if (error) { + if (filldir_err) + *filldir_err = error; + break; + } + if (version != *f_version) + goto revalidate; + stored ++; + } + *f_pos += le16_to_cpu(de->rec_len); + } + offset = 0; + brelse(bh); + } + + stored = 0; +out: + return stored; +} + +static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version, + loff_t *f_pos, void *priv, filldir_t filldir, + int *filldir_err) +{ + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + return ocfs2_dir_foreach_blk_id(inode, f_version, f_pos, priv, + filldir, filldir_err); + + return ocfs2_dir_foreach_blk_el(inode, f_version, f_pos, priv, filldir, + filldir_err); +} + +/* + * This is intended to be called from inside other kernel functions, + * so we fake some arguments. + */ +int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv, + filldir_t filldir) +{ + int ret = 0, filldir_err = 0; + u64 version = inode->i_version; + + while (*f_pos < i_size_read(inode)) { + ret = ocfs2_dir_foreach_blk(inode, &version, f_pos, priv, + filldir, &filldir_err); + if (ret || filldir_err) + break; + } + + if (ret > 0) + ret = -EIO; + + return 0; +} + +/* + * ocfs2_readdir() + * + */ +int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) +{ + int error = 0; + struct inode *inode = filp->f_path.dentry->d_inode; + int lock_level = 0; + + mlog_entry("dirino=%llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + + error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); + if (lock_level && error >= 0) { + /* We release EX lock which used to update atime + * and get PR lock again to reduce contention + * on commonly accessed directories. */ + ocfs2_inode_unlock(inode, 1); + lock_level = 0; + error = ocfs2_inode_lock(inode, NULL, 0); + } + if (error < 0) { + if (error != -ENOENT) + mlog_errno(error); + /* we haven't got any yet, so propagate the error. */ + goto bail_nolock; + } + + error = ocfs2_dir_foreach_blk(inode, &filp->f_version, &filp->f_pos, + dirent, filldir, NULL); + + ocfs2_inode_unlock(inode, lock_level); + +bail_nolock: + mlog_exit(error); + + return error; +} + +/* + * NOTE: this should always be called with parent dir i_mutex taken. + */ +int ocfs2_find_files_on_disk(const char *name, + int namelen, + u64 *blkno, + struct inode *inode, + struct buffer_head **dirent_bh, + struct ocfs2_dir_entry **dirent) +{ + int status = -ENOENT; + + mlog_entry("(name=%.*s, blkno=%p, inode=%p, dirent_bh=%p, dirent=%p)\n", + namelen, name, blkno, inode, dirent_bh, dirent); + + *dirent_bh = ocfs2_find_entry(name, namelen, inode, dirent); + if (!*dirent_bh || !*dirent) { + status = -ENOENT; + goto leave; + } + + *blkno = le64_to_cpu((*dirent)->inode); + + status = 0; +leave: + if (status < 0) { + *dirent = NULL; + brelse(*dirent_bh); + *dirent_bh = NULL; + } + + mlog_exit(status); + return status; +} + +/* + * Convenience function for callers which just want the block number + * mapped to a name and don't require the full dirent info, etc. + */ +int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, + int namelen, u64 *blkno) +{ + int ret; + struct buffer_head *bh = NULL; + struct ocfs2_dir_entry *dirent = NULL; + + ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &bh, &dirent); + brelse(bh); + + return ret; +} + +/* Check for a name within a directory. + * + * Return 0 if the name does not exist + * Return -EEXIST if the directory contains the name + * + * Callers should have i_mutex + a cluster lock on dir + */ +int ocfs2_check_dir_for_entry(struct inode *dir, + const char *name, + int namelen) +{ + int ret; + struct buffer_head *dirent_bh = NULL; + struct ocfs2_dir_entry *dirent = NULL; + + mlog_entry("dir %llu, name '%.*s'\n", + (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); + + ret = -EEXIST; + dirent_bh = ocfs2_find_entry(name, namelen, dir, &dirent); + if (dirent_bh) + goto bail; + + ret = 0; +bail: + brelse(dirent_bh); + + mlog_exit(ret); + return ret; +} + +struct ocfs2_empty_dir_priv { + unsigned seen_dot; + unsigned seen_dot_dot; + unsigned seen_other; +}; +static int ocfs2_empty_dir_filldir(void *priv, const char *name, int name_len, + loff_t pos, u64 ino, unsigned type) +{ + struct ocfs2_empty_dir_priv *p = priv; + + /* + * Check the positions of "." and ".." records to be sure + * they're in the correct place. + */ + if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) { + p->seen_dot = 1; + return 0; + } + + if (name_len == 2 && !strncmp("..", name, 2) && + pos == OCFS2_DIR_REC_LEN(1)) { + p->seen_dot_dot = 1; + return 0; + } + + p->seen_other = 1; + return 1; +} +/* + * routine to check that the specified directory is empty (for rmdir) + * + * Returns 1 if dir is empty, zero otherwise. + */ +int ocfs2_empty_dir(struct inode *inode) +{ + int ret; + loff_t start = 0; + struct ocfs2_empty_dir_priv priv; + + memset(&priv, 0, sizeof(priv)); + + ret = ocfs2_dir_foreach(inode, &start, &priv, ocfs2_empty_dir_filldir); + if (ret) + mlog_errno(ret); + + if (!priv.seen_dot || !priv.seen_dot_dot) { + mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + /* + * XXX: Is it really safe to allow an unlink to continue? + */ + return 1; + } + + return !priv.seen_other; +} + +static void ocfs2_fill_initial_dirents(struct inode *inode, + struct inode *parent, + char *start, unsigned int size) +{ + struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start; + + de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno); + de->name_len = 1; + de->rec_len = + cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len)); + strcpy(de->name, "."); + ocfs2_set_de_type(de, S_IFDIR); + + de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len)); + de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno); + de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1)); + de->name_len = 2; + strcpy(de->name, ".."); + ocfs2_set_de_type(de, S_IFDIR); +} + +/* + * This works together with code in ocfs2_mknod_locked() which sets + * the inline-data flag and initializes the inline-data section. + */ +static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb, + handle_t *handle, + struct inode *parent, + struct inode *inode, + struct buffer_head *di_bh) +{ + int ret; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_inline_data *data = &di->id2.i_data; + unsigned int size = le16_to_cpu(data->id_count); + + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_fill_initial_dirents(inode, parent, data->id_data, size); + + ocfs2_journal_dirty(handle, di_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + i_size_write(inode, size); + inode->i_nlink = 2; + inode->i_blocks = ocfs2_inode_sector_count(inode); + + ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); + if (ret < 0) + mlog_errno(ret); + +out: + return ret; +} + +static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, + handle_t *handle, + struct inode *parent, + struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_alloc_context *data_ac) +{ + int status; + struct buffer_head *new_bh = NULL; + + mlog_entry_void(); + + status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh, + data_ac, NULL, &new_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + ocfs2_set_new_buffer_uptodate(inode, new_bh); + + status = ocfs2_journal_access(handle, inode, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + memset(new_bh->b_data, 0, osb->sb->s_blocksize); + + ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, + osb->sb->s_blocksize); + + status = ocfs2_journal_dirty(handle, new_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + i_size_write(inode, inode->i_sb->s_blocksize); + inode->i_nlink = 2; + inode->i_blocks = ocfs2_inode_sector_count(inode); + status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = 0; +bail: + brelse(new_bh); + + mlog_exit(status); + return status; +} + +int ocfs2_fill_new_dir(struct ocfs2_super *osb, + handle_t *handle, + struct inode *parent, + struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_alloc_context *data_ac) +{ + BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL); + + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh); + + return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh, + data_ac); +} + +static void ocfs2_expand_last_dirent(char *start, unsigned int old_size, + unsigned int new_size) +{ + struct ocfs2_dir_entry *de; + struct ocfs2_dir_entry *prev_de; + char *de_buf, *limit; + unsigned int bytes = new_size - old_size; + + limit = start + old_size; + de_buf = start; + de = (struct ocfs2_dir_entry *)de_buf; + do { + prev_de = de; + de_buf += le16_to_cpu(de->rec_len); + de = (struct ocfs2_dir_entry *)de_buf; + } while (de_buf < limit); + + le16_add_cpu(&prev_de->rec_len, bytes); +} + +/* + * We allocate enough clusters to fulfill "blocks_wanted", but set + * i_size to exactly one block. Ocfs2_extend_dir() will handle the + * rest automatically for us. + * + * *first_block_bh is a pointer to the 1st data block allocated to the + * directory. + */ +static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, + unsigned int blocks_wanted, + struct buffer_head **first_block_bh) +{ + int ret, credits = OCFS2_INLINE_TO_EXTENTS_CREDITS; + u32 alloc, bit_off, len; + struct super_block *sb = dir->i_sb; + u64 blkno, bytes = blocks_wanted << sb->s_blocksize_bits; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); + struct ocfs2_inode_info *oi = OCFS2_I(dir); + struct ocfs2_alloc_context *data_ac; + struct buffer_head *dirdata_bh = NULL; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + handle_t *handle; + struct ocfs2_extent_tree et; + + ocfs2_init_dinode_extent_tree(&et, dir, di_bh); + + alloc = ocfs2_clusters_for_bytes(sb, bytes); + + /* + * We should never need more than 2 clusters for this - + * maximum dirent size is far less than one block. In fact, + * the only time we'd need more than one cluster is if + * blocksize == clustersize and the dirent won't fit in the + * extra space that the expansion to a single block gives. As + * of today, that only happens on 4k/4k file systems. + */ + BUG_ON(alloc > 2); + + ret = ocfs2_reserve_clusters(osb, alloc, &data_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + down_write(&oi->ip_alloc_sem); + + /* + * Prepare for worst case allocation scenario of two separate + * extents. + */ + if (alloc == 2) + credits += OCFS2_SUBALLOC_ALLOC; + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out_sem; + } + + /* + * Try to claim as many clusters as the bitmap can give though + * if we only get one now, that's enough to continue. The rest + * will be claimed after the conversion to extents. + */ + ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, &len); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* + * Operations are carefully ordered so that we set up the new + * data block first. The conversion from inline data to + * extents follows. + */ + blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off); + dirdata_bh = sb_getblk(sb, blkno); + if (!dirdata_bh) { + ret = -EIO; + mlog_errno(ret); + goto out_commit; + } + + ocfs2_set_new_buffer_uptodate(dir, dirdata_bh); + + ret = ocfs2_journal_access(handle, dir, dirdata_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir)); + memset(dirdata_bh->b_data + i_size_read(dir), 0, + sb->s_blocksize - i_size_read(dir)); + ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), + sb->s_blocksize); + + ret = ocfs2_journal_dirty(handle, dirdata_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* + * Set extent, i_size, etc on the directory. After this, the + * inode should contain the same exact dirents as before and + * be fully accessible from system calls. + * + * We let the later dirent insert modify c/mtime - to the user + * the data hasn't changed. + */ + ret = ocfs2_journal_access(handle, dir, di_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + spin_lock(&oi->ip_lock); + oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL; + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + spin_unlock(&oi->ip_lock); + + ocfs2_dinode_new_extent_list(dir, di); + + i_size_write(dir, sb->s_blocksize); + dir->i_mtime = dir->i_ctime = CURRENT_TIME; + + di->i_size = cpu_to_le64(sb->s_blocksize); + di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec); + di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec); + + /* + * This should never fail as our extent list is empty and all + * related blocks have been journaled already. + */ + ret = ocfs2_insert_extent(osb, handle, dir, &et, 0, blkno, len, + 0, NULL); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* + * Set i_blocks after the extent insert for the most up to + * date ip_clusters value. + */ + dir->i_blocks = ocfs2_inode_sector_count(dir); + + ret = ocfs2_journal_dirty(handle, di_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* + * We asked for two clusters, but only got one in the 1st + * pass. Claim the 2nd cluster as a separate extent. + */ + if (alloc > len) { + ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, + &len); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off); + + ret = ocfs2_insert_extent(osb, handle, dir, &et, 1, + blkno, len, 0, NULL); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + } + + *first_block_bh = dirdata_bh; + dirdata_bh = NULL; + +out_commit: + ocfs2_commit_trans(osb, handle); + +out_sem: + up_write(&oi->ip_alloc_sem); + +out: + if (data_ac) + ocfs2_free_alloc_context(data_ac); + + brelse(dirdata_bh); + + return ret; +} + +/* returns a bh of the 1st new block in the allocation. */ +static int ocfs2_do_extend_dir(struct super_block *sb, + handle_t *handle, + struct inode *dir, + struct buffer_head *parent_fe_bh, + struct ocfs2_alloc_context *data_ac, + struct ocfs2_alloc_context *meta_ac, + struct buffer_head **new_bh) +{ + int status; + int extend; + u64 p_blkno, v_blkno; + + spin_lock(&OCFS2_I(dir)->ip_lock); + extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)); + spin_unlock(&OCFS2_I(dir)->ip_lock); + + if (extend) { + u32 offset = OCFS2_I(dir)->ip_clusters; + + status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset, + 1, 0, parent_fe_bh, handle, + data_ac, meta_ac, NULL); + BUG_ON(status == -EAGAIN); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir)); + status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + *new_bh = sb_getblk(sb, p_blkno); + if (!*new_bh) { + status = -EIO; + mlog_errno(status); + goto bail; + } + status = 0; +bail: + mlog_exit(status); + return status; +} + +/* + * Assumes you already have a cluster lock on the directory. + * + * 'blocks_wanted' is only used if we have an inline directory which + * is to be turned into an extent based one. The size of the dirent to + * insert might be larger than the space gained by growing to just one + * block, so we may have to grow the inode by two blocks in that case. + */ +static int ocfs2_extend_dir(struct ocfs2_super *osb, + struct inode *dir, + struct buffer_head *parent_fe_bh, + unsigned int blocks_wanted, + struct buffer_head **new_de_bh) +{ + int status = 0; + int credits, num_free_extents, drop_alloc_sem = 0; + loff_t dir_i_size; + struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data; + struct ocfs2_extent_list *el = &fe->id2.i_list; + struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *meta_ac = NULL; + handle_t *handle = NULL; + struct buffer_head *new_bh = NULL; + struct ocfs2_dir_entry * de; + struct super_block *sb = osb->sb; + struct ocfs2_extent_tree et; + + mlog_entry_void(); + + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + status = ocfs2_expand_inline_dir(dir, parent_fe_bh, + blocks_wanted, &new_bh); + if (status) { + mlog_errno(status); + goto bail; + } + + if (blocks_wanted == 1) { + /* + * If the new dirent will fit inside the space + * created by pushing out to one block, then + * we can complete the operation + * here. Otherwise we have to expand i_size + * and format the 2nd block below. + */ + BUG_ON(new_bh == NULL); + goto bail_bh; + } + + /* + * Get rid of 'new_bh' - we want to format the 2nd + * data block and return that instead. + */ + brelse(new_bh); + new_bh = NULL; + + dir_i_size = i_size_read(dir); + credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS; + goto do_extend; + } + + dir_i_size = i_size_read(dir); + mlog(0, "extending dir %llu (i_size = %lld)\n", + (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size); + + /* dir->i_size is always block aligned. */ + spin_lock(&OCFS2_I(dir)->ip_lock); + if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) { + spin_unlock(&OCFS2_I(dir)->ip_lock); + ocfs2_init_dinode_extent_tree(&et, dir, parent_fe_bh); + num_free_extents = ocfs2_num_free_extents(osb, dir, &et); + if (num_free_extents < 0) { + status = num_free_extents; + mlog_errno(status); + goto bail; + } + + if (!num_free_extents) { + status = ocfs2_reserve_new_metadata(osb, el, &meta_ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + } + + status = ocfs2_reserve_clusters(osb, 1, &data_ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + credits = ocfs2_calc_extend_credits(sb, el, 1); + } else { + spin_unlock(&OCFS2_I(dir)->ip_lock); + credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS; + } + +do_extend: + down_write(&OCFS2_I(dir)->ip_alloc_sem); + drop_alloc_sem = 1; + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto bail; + } + + status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh, + data_ac, meta_ac, &new_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + ocfs2_set_new_buffer_uptodate(dir, new_bh); + + status = ocfs2_journal_access(handle, dir, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + memset(new_bh->b_data, 0, sb->s_blocksize); + de = (struct ocfs2_dir_entry *) new_bh->b_data; + de->inode = 0; + de->rec_len = cpu_to_le16(sb->s_blocksize); + status = ocfs2_journal_dirty(handle, new_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + dir_i_size += dir->i_sb->s_blocksize; + i_size_write(dir, dir_i_size); + dir->i_blocks = ocfs2_inode_sector_count(dir); + status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + +bail_bh: + *new_de_bh = new_bh; + get_bh(*new_de_bh); +bail: + if (drop_alloc_sem) + up_write(&OCFS2_I(dir)->ip_alloc_sem); + if (handle) + ocfs2_commit_trans(osb, handle); + + if (data_ac) + ocfs2_free_alloc_context(data_ac); + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + + brelse(new_bh); + + mlog_exit(status); + return status; +} + +static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh, + const char *name, int namelen, + struct buffer_head **ret_de_bh, + unsigned int *blocks_wanted) +{ + int ret; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_dir_entry *de, *last_de = NULL; + char *de_buf, *limit; + unsigned long offset = 0; + unsigned int rec_len, new_rec_len; + + de_buf = di->id2.i_data.id_data; + limit = de_buf + i_size_read(dir); + rec_len = OCFS2_DIR_REC_LEN(namelen); + + while (de_buf < limit) { + de = (struct ocfs2_dir_entry *)de_buf; + + if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) { + ret = -ENOENT; + goto out; + } + if (ocfs2_match(namelen, name, de)) { + ret = -EEXIST; + goto out; + } + if (ocfs2_dirent_would_fit(de, rec_len)) { + /* Ok, we found a spot. Return this bh and let + * the caller actually fill it in. */ + *ret_de_bh = di_bh; + get_bh(*ret_de_bh); + ret = 0; + goto out; + } + + last_de = de; + de_buf += le16_to_cpu(de->rec_len); + offset += le16_to_cpu(de->rec_len); + } + + /* + * We're going to require expansion of the directory - figure + * out how many blocks we'll need so that a place for the + * dirent can be found. + */ + *blocks_wanted = 1; + new_rec_len = le16_to_cpu(last_de->rec_len) + (dir->i_sb->s_blocksize - i_size_read(dir)); + if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len))) + *blocks_wanted = 2; + + ret = -ENOSPC; +out: + return ret; +} + +static int ocfs2_find_dir_space_el(struct inode *dir, const char *name, + int namelen, struct buffer_head **ret_de_bh) +{ + unsigned long offset; + struct buffer_head *bh = NULL; + unsigned short rec_len; + struct ocfs2_dir_entry *de; + struct super_block *sb = dir->i_sb; + int status; + + bh = ocfs2_bread(dir, 0, &status, 0); + if (!bh) { + mlog_errno(status); + goto bail; + } + + rec_len = OCFS2_DIR_REC_LEN(namelen); + offset = 0; + de = (struct ocfs2_dir_entry *) bh->b_data; + while (1) { + if ((char *)de >= sb->s_blocksize + bh->b_data) { + brelse(bh); + bh = NULL; + + if (i_size_read(dir) <= offset) { + /* + * Caller will have to expand this + * directory. + */ + status = -ENOSPC; + goto bail; + } + bh = ocfs2_bread(dir, + offset >> sb->s_blocksize_bits, + &status, + 0); + if (!bh) { + mlog_errno(status); + goto bail; + } + /* move to next block */ + de = (struct ocfs2_dir_entry *) bh->b_data; + } + if (!ocfs2_check_dir_entry(dir, de, bh, offset)) { + status = -ENOENT; + goto bail; + } + if (ocfs2_match(namelen, name, de)) { + status = -EEXIST; + goto bail; + } + if (ocfs2_dirent_would_fit(de, rec_len)) { + /* Ok, we found a spot. Return this bh and let + * the caller actually fill it in. */ + *ret_de_bh = bh; + get_bh(*ret_de_bh); + status = 0; + goto bail; + } + offset += le16_to_cpu(de->rec_len); + de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len)); + } + + status = 0; +bail: + brelse(bh); + + mlog_exit(status); + return status; +} + +int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb, + struct inode *dir, + struct buffer_head *parent_fe_bh, + const char *name, + int namelen, + struct buffer_head **ret_de_bh) +{ + int ret; + unsigned int blocks_wanted = 1; + struct buffer_head *bh = NULL; + + mlog(0, "getting ready to insert namelen %d into dir %llu\n", + namelen, (unsigned long long)OCFS2_I(dir)->ip_blkno); + + *ret_de_bh = NULL; + + if (!namelen) { + ret = -EINVAL; + mlog_errno(ret); + goto out; + } + + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name, + namelen, &bh, &blocks_wanted); + } else + ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh); + + if (ret && ret != -ENOSPC) { + mlog_errno(ret); + goto out; + } + + if (ret == -ENOSPC) { + /* + * We have to expand the directory to add this name. + */ + BUG_ON(bh); + + ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted, + &bh); + if (ret) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + + BUG_ON(!bh); + } + + *ret_de_bh = bh; + bh = NULL; +out: + brelse(bh); + return ret; +} diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h new file mode 100644 index 0000000..ce48b90 --- /dev/null +++ b/fs/ocfs2/dir.h @@ -0,0 +1,86 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dir.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_DIR_H +#define OCFS2_DIR_H + +struct buffer_head *ocfs2_find_entry(const char *name, + int namelen, + struct inode *dir, + struct ocfs2_dir_entry **res_dir); +int ocfs2_delete_entry(handle_t *handle, + struct inode *dir, + struct ocfs2_dir_entry *de_del, + struct buffer_head *bh); +int __ocfs2_add_entry(handle_t *handle, + struct inode *dir, + const char *name, int namelen, + struct inode *inode, u64 blkno, + struct buffer_head *parent_fe_bh, + struct buffer_head *insert_bh); +static inline int ocfs2_add_entry(handle_t *handle, + struct dentry *dentry, + struct inode *inode, u64 blkno, + struct buffer_head *parent_fe_bh, + struct buffer_head *insert_bh) +{ + return __ocfs2_add_entry(handle, dentry->d_parent->d_inode, + dentry->d_name.name, dentry->d_name.len, + inode, blkno, parent_fe_bh, insert_bh); +} +int ocfs2_update_entry(struct inode *dir, handle_t *handle, + struct buffer_head *de_bh, struct ocfs2_dir_entry *de, + struct inode *new_entry_inode); + +int ocfs2_check_dir_for_entry(struct inode *dir, + const char *name, + int namelen); +int ocfs2_empty_dir(struct inode *inode); +int ocfs2_find_files_on_disk(const char *name, + int namelen, + u64 *blkno, + struct inode *inode, + struct buffer_head **dirent_bh, + struct ocfs2_dir_entry **dirent); +int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, + int namelen, u64 *blkno); +int ocfs2_readdir(struct file *filp, void *dirent, filldir_t filldir); +int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv, + filldir_t filldir); +int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb, + struct inode *dir, + struct buffer_head *parent_fe_bh, + const char *name, + int namelen, + struct buffer_head **ret_de_bh); +struct ocfs2_alloc_context; +int ocfs2_fill_new_dir(struct ocfs2_super *osb, + handle_t *handle, + struct inode *parent, + struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_alloc_context *data_ac); + +#endif /* OCFS2_DIR_H */ diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile new file mode 100644 index 0000000..1903613 --- /dev/null +++ b/fs/ocfs2/dlm/Makefile @@ -0,0 +1,8 @@ +EXTRA_CFLAGS += -Ifs/ocfs2 + +obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o ocfs2_dlmfs.o + +ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \ + dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o + +ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h new file mode 100644 index 0000000..b5786a7 --- /dev/null +++ b/fs/ocfs2/dlm/dlmapi.h @@ -0,0 +1,220 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmapi.h + * + * externally exported dlm interfaces + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef DLMAPI_H +#define DLMAPI_H + +struct dlm_lock; +struct dlm_ctxt; + +/* NOTE: changes made to this enum should be reflected in dlmdebug.c */ +enum dlm_status { + DLM_NORMAL = 0, /* 0: request in progress */ + DLM_GRANTED, /* 1: request granted */ + DLM_DENIED, /* 2: request denied */ + DLM_DENIED_NOLOCKS, /* 3: request denied, out of system resources */ + DLM_WORKING, /* 4: async request in progress */ + DLM_BLOCKED, /* 5: lock request blocked */ + DLM_BLOCKED_ORPHAN, /* 6: lock request blocked by a orphan lock*/ + DLM_DENIED_GRACE_PERIOD, /* 7: topological change in progress */ + DLM_SYSERR, /* 8: system error */ + DLM_NOSUPPORT, /* 9: unsupported */ + DLM_CANCELGRANT, /* 10: can't cancel convert: already granted */ + DLM_IVLOCKID, /* 11: bad lockid */ + DLM_SYNC, /* 12: synchronous request granted */ + DLM_BADTYPE, /* 13: bad resource type */ + DLM_BADRESOURCE, /* 14: bad resource handle */ + DLM_MAXHANDLES, /* 15: no more resource handles */ + DLM_NOCLINFO, /* 16: can't contact cluster manager */ + DLM_NOLOCKMGR, /* 17: can't contact lock manager */ + DLM_NOPURGED, /* 18: can't contact purge daemon */ + DLM_BADARGS, /* 19: bad api args */ + DLM_VOID, /* 20: no status */ + DLM_NOTQUEUED, /* 21: NOQUEUE was specified and request failed */ + DLM_IVBUFLEN, /* 22: invalid resource name length */ + DLM_CVTUNGRANT, /* 23: attempted to convert ungranted lock */ + DLM_BADPARAM, /* 24: invalid lock mode specified */ + DLM_VALNOTVALID, /* 25: value block has been invalidated */ + DLM_REJECTED, /* 26: request rejected, unrecognized client */ + DLM_ABORT, /* 27: blocked lock request cancelled */ + DLM_CANCEL, /* 28: conversion request cancelled */ + DLM_IVRESHANDLE, /* 29: invalid resource handle */ + DLM_DEADLOCK, /* 30: deadlock recovery refused this request */ + DLM_DENIED_NOASTS, /* 31: failed to allocate AST */ + DLM_FORWARD, /* 32: request must wait for primary's response */ + DLM_TIMEOUT, /* 33: timeout value for lock has expired */ + DLM_IVGROUPID, /* 34: invalid group specification */ + DLM_VERS_CONFLICT, /* 35: version conflicts prevent request handling */ + DLM_BAD_DEVICE_PATH, /* 36: Locks device does not exist or path wrong */ + DLM_NO_DEVICE_PERMISSION, /* 37: Client has insufficient pers for device */ + DLM_NO_CONTROL_DEVICE, /* 38: Cannot set options on opened device */ + + DLM_RECOVERING, /* 39: extension, allows caller to fail a lock + request if it is being recovered */ + DLM_MIGRATING, /* 40: extension, allows caller to fail a lock + request if it is being migrated */ + DLM_MAXSTATS, /* 41: upper limit for return code validation */ +}; + +/* for pretty-printing dlm_status error messages */ +const char *dlm_errmsg(enum dlm_status err); +/* for pretty-printing dlm_status error names */ +const char *dlm_errname(enum dlm_status err); + +/* Eventually the DLM will use standard errno values, but in the + * meantime this lets us track dlm errors as they bubble up. When we + * bring its error reporting into line with the rest of the stack, + * these can just be replaced with calls to mlog_errno. */ +#define dlm_error(st) do { \ + if ((st) != DLM_RECOVERING && \ + (st) != DLM_MIGRATING && \ + (st) != DLM_FORWARD) \ + mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ +} while (0) + +#define DLM_LKSB_UNUSED1 0x01 +#define DLM_LKSB_PUT_LVB 0x02 +#define DLM_LKSB_GET_LVB 0x04 +#define DLM_LKSB_UNUSED2 0x08 +#define DLM_LKSB_UNUSED3 0x10 +#define DLM_LKSB_UNUSED4 0x20 +#define DLM_LKSB_UNUSED5 0x40 +#define DLM_LKSB_UNUSED6 0x80 + +#define DLM_LVB_LEN 64 + +/* Callers are only allowed access to the lvb and status members of + * this struct. */ +struct dlm_lockstatus { + enum dlm_status status; + u32 flags; + struct dlm_lock *lockid; + char lvb[DLM_LVB_LEN]; +}; + +/* Valid lock modes. */ +#define LKM_IVMODE (-1) /* invalid mode */ +#define LKM_NLMODE 0 /* null lock */ +#define LKM_CRMODE 1 /* concurrent read unsupported */ +#define LKM_CWMODE 2 /* concurrent write unsupported */ +#define LKM_PRMODE 3 /* protected read */ +#define LKM_PWMODE 4 /* protected write unsupported */ +#define LKM_EXMODE 5 /* exclusive */ +#define LKM_MAXMODE 5 +#define LKM_MODEMASK 0xff + +/* Flags passed to dlmlock and dlmunlock: + * reserved: flags used by the "real" dlm + * only a few are supported by this dlm + * (U) = unsupported by ocfs2 dlm */ +#define LKM_ORPHAN 0x00000010 /* this lock is orphanable (U) */ +#define LKM_PARENTABLE 0x00000020 /* this lock was orphaned (U) */ +#define LKM_BLOCK 0x00000040 /* blocking lock request (U) */ +#define LKM_LOCAL 0x00000080 /* local lock request */ +#define LKM_VALBLK 0x00000100 /* lock value block request */ +#define LKM_NOQUEUE 0x00000200 /* non blocking request */ +#define LKM_CONVERT 0x00000400 /* conversion request */ +#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */ +#define LKM_UNLOCK 0x00001000 /* deallocate this lock */ +#define LKM_CANCEL 0x00002000 /* cancel conversion request */ +#define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */ +#define LKM_INVVALBLK 0x00008000 /* invalidate lock value block */ +#define LKM_SYNCSTS 0x00010000 /* return synchronous status if poss (U) */ +#define LKM_TIMEOUT 0x00020000 /* lock request contains timeout (U) */ +#define LKM_SNGLDLCK 0x00040000 /* request can self-deadlock (U) */ +#define LKM_FINDLOCAL 0x00080000 /* find local lock request (U) */ +#define LKM_PROC_OWNED 0x00100000 /* owned by process, not group (U) */ +#define LKM_XID 0x00200000 /* use transaction id for deadlock (U) */ +#define LKM_XID_CONFLICT 0x00400000 /* do not allow lock inheritance (U) */ +#define LKM_FORCE 0x00800000 /* force unlock flag */ +#define LKM_REVVALBLK 0x01000000 /* temporary solution: re-validate + lock value block (U) */ +/* unused */ +#define LKM_UNUSED1 0x00000001 /* unused */ +#define LKM_UNUSED2 0x00000002 /* unused */ +#define LKM_UNUSED3 0x00000004 /* unused */ +#define LKM_UNUSED4 0x00000008 /* unused */ +#define LKM_UNUSED5 0x02000000 /* unused */ +#define LKM_UNUSED6 0x04000000 /* unused */ +#define LKM_UNUSED7 0x08000000 /* unused */ + +/* ocfs2 extensions: internal only + * should never be used by caller */ +#define LKM_MIGRATION 0x10000000 /* extension: lockres is to be migrated + to another node */ +#define LKM_PUT_LVB 0x20000000 /* extension: lvb is being passed + should be applied to lockres */ +#define LKM_GET_LVB 0x40000000 /* extension: lvb should be copied + from lockres when lock is granted */ +#define LKM_RECOVERY 0x80000000 /* extension: flag for recovery lock + used to avoid recovery rwsem */ + + +typedef void (dlm_astlockfunc_t)(void *); +typedef void (dlm_bastlockfunc_t)(void *, int); +typedef void (dlm_astunlockfunc_t)(void *, enum dlm_status); + +enum dlm_status dlmlock(struct dlm_ctxt *dlm, + int mode, + struct dlm_lockstatus *lksb, + int flags, + const char *name, + int namelen, + dlm_astlockfunc_t *ast, + void *data, + dlm_bastlockfunc_t *bast); + +enum dlm_status dlmunlock(struct dlm_ctxt *dlm, + struct dlm_lockstatus *lksb, + int flags, + dlm_astunlockfunc_t *unlockast, + void *data); + +struct dlm_protocol_version { + u8 pv_major; + u8 pv_minor; +}; +struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key, + struct dlm_protocol_version *fs_proto); + +void dlm_unregister_domain(struct dlm_ctxt *dlm); + +void dlm_print_one_lock(struct dlm_lock *lockid); + +typedef void (dlm_eviction_func)(int, void *); +struct dlm_eviction_cb { + struct list_head ec_item; + dlm_eviction_func *ec_func; + void *ec_data; +}; +void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, + dlm_eviction_func *f, + void *data); +void dlm_register_eviction_cb(struct dlm_ctxt *dlm, + struct dlm_eviction_cb *cb); +void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb); + +#endif /* DLMAPI_H */ diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c new file mode 100644 index 0000000..644bee5 --- /dev/null +++ b/fs/ocfs2/dlm/dlmast.c @@ -0,0 +1,474 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmast.c + * + * AST and BAST functionality for local and remote nodes + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/sysctl.h> +#include <linux/random.h> +#include <linux/blkdev.h> +#include <linux/socket.h> +#include <linux/inet.h> +#include <linux/spinlock.h> + + +#include "cluster/heartbeat.h" +#include "cluster/nodemanager.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" +#include "dlmcommon.h" + +#define MLOG_MASK_PREFIX ML_DLM +#include "cluster/masklog.h" + +static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + struct dlm_lock *lock); +static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); + +/* Should be called as an ast gets queued to see if the new + * lock level will obsolete a pending bast. + * For example, if dlm_thread queued a bast for an EX lock that + * was blocking another EX, but before sending the bast the + * lock owner downconverted to NL, the bast is now obsolete. + * Only the ast should be sent. + * This is needed because the lock and convert paths can queue + * asts out-of-band (not waiting for dlm_thread) in order to + * allow for LKM_NOQUEUE to get immediate responses. */ +static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) +{ + assert_spin_locked(&dlm->ast_lock); + assert_spin_locked(&lock->spinlock); + + if (lock->ml.highest_blocked == LKM_IVMODE) + return 0; + BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); + + if (lock->bast_pending && + list_empty(&lock->bast_list)) + /* old bast already sent, ok */ + return 0; + + if (lock->ml.type == LKM_EXMODE) + /* EX blocks anything left, any bast still valid */ + return 0; + else if (lock->ml.type == LKM_NLMODE) + /* NL blocks nothing, no reason to send any bast, cancel it */ + return 1; + else if (lock->ml.highest_blocked != LKM_EXMODE) + /* PR only blocks EX */ + return 1; + + return 0; +} + +static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) +{ + mlog_entry_void(); + + BUG_ON(!dlm); + BUG_ON(!lock); + + assert_spin_locked(&dlm->ast_lock); + if (!list_empty(&lock->ast_list)) { + mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n", + lock->ast_pending, lock->ml.type); + BUG(); + } + BUG_ON(!list_empty(&lock->ast_list)); + if (lock->ast_pending) + mlog(0, "lock has an ast getting flushed right now\n"); + + /* putting lock on list, add a ref */ + dlm_lock_get(lock); + spin_lock(&lock->spinlock); + + /* check to see if this ast obsoletes the bast */ + if (dlm_should_cancel_bast(dlm, lock)) { + struct dlm_lock_resource *res = lock->lockres; + mlog(0, "%s: cancelling bast for %.*s\n", + dlm->name, res->lockname.len, res->lockname.name); + lock->bast_pending = 0; + list_del_init(&lock->bast_list); + lock->ml.highest_blocked = LKM_IVMODE; + /* removing lock from list, remove a ref. guaranteed + * this won't be the last ref because of the get above, + * so res->spinlock will not be taken here */ + dlm_lock_put(lock); + /* free up the reserved bast that we are cancelling. + * guaranteed that this will not be the last reserved + * ast because *both* an ast and a bast were reserved + * to get to this point. the res->spinlock will not be + * taken here */ + dlm_lockres_release_ast(dlm, res); + } + list_add_tail(&lock->ast_list, &dlm->pending_asts); + lock->ast_pending = 1; + spin_unlock(&lock->spinlock); +} + +void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) +{ + mlog_entry_void(); + + BUG_ON(!dlm); + BUG_ON(!lock); + + spin_lock(&dlm->ast_lock); + __dlm_queue_ast(dlm, lock); + spin_unlock(&dlm->ast_lock); +} + + +static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) +{ + mlog_entry_void(); + + BUG_ON(!dlm); + BUG_ON(!lock); + assert_spin_locked(&dlm->ast_lock); + + BUG_ON(!list_empty(&lock->bast_list)); + if (lock->bast_pending) + mlog(0, "lock has a bast getting flushed right now\n"); + + /* putting lock on list, add a ref */ + dlm_lock_get(lock); + spin_lock(&lock->spinlock); + list_add_tail(&lock->bast_list, &dlm->pending_basts); + lock->bast_pending = 1; + spin_unlock(&lock->spinlock); +} + +void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) +{ + mlog_entry_void(); + + BUG_ON(!dlm); + BUG_ON(!lock); + + spin_lock(&dlm->ast_lock); + __dlm_queue_bast(dlm, lock); + spin_unlock(&dlm->ast_lock); +} + +static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + struct dlm_lock *lock) +{ + struct dlm_lockstatus *lksb = lock->lksb; + BUG_ON(!lksb); + + /* only updates if this node masters the lockres */ + if (res->owner == dlm->node_num) { + + spin_lock(&res->spinlock); + /* check the lksb flags for the direction */ + if (lksb->flags & DLM_LKSB_GET_LVB) { + mlog(0, "getting lvb from lockres for %s node\n", + lock->ml.node == dlm->node_num ? "master" : + "remote"); + memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); + } + /* Do nothing for lvb put requests - they should be done in + * place when the lock is downconverted - otherwise we risk + * racing gets and puts which could result in old lvb data + * being propagated. We leave the put flag set and clear it + * here. In the future we might want to clear it at the time + * the put is actually done. + */ + spin_unlock(&res->spinlock); + } + + /* reset any lvb flags on the lksb */ + lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); +} + +void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + struct dlm_lock *lock) +{ + dlm_astlockfunc_t *fn; + struct dlm_lockstatus *lksb; + + mlog_entry_void(); + + lksb = lock->lksb; + fn = lock->ast; + BUG_ON(lock->ml.node != dlm->node_num); + + dlm_update_lvb(dlm, res, lock); + (*fn)(lock->astdata); +} + + +int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + struct dlm_lock *lock) +{ + int ret; + struct dlm_lockstatus *lksb; + int lksbflags; + + mlog_entry_void(); + + lksb = lock->lksb; + BUG_ON(lock->ml.node == dlm->node_num); + + lksbflags = lksb->flags; + dlm_update_lvb(dlm, res, lock); + + /* lock request came from another node + * go do the ast over there */ + ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags); + return ret; +} + +void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + struct dlm_lock *lock, int blocked_type) +{ + dlm_bastlockfunc_t *fn = lock->bast; + + mlog_entry_void(); + BUG_ON(lock->ml.node != dlm->node_num); + + (*fn)(lock->astdata, blocked_type); +} + + + +int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + int ret; + unsigned int locklen; + struct dlm_ctxt *dlm = data; + struct dlm_lock_resource *res = NULL; + struct dlm_lock *lock = NULL; + struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; + char *name; + struct list_head *iter, *head=NULL; + u64 cookie; + u32 flags; + + if (!dlm_grab(dlm)) { + dlm_error(DLM_REJECTED); + return DLM_REJECTED; + } + + mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), + "Domain %s not fully joined!\n", dlm->name); + + name = past->name; + locklen = past->namelen; + cookie = be64_to_cpu(past->cookie); + flags = be32_to_cpu(past->flags); + + if (locklen > DLM_LOCKID_NAME_MAX) { + ret = DLM_IVBUFLEN; + mlog(ML_ERROR, "Invalid name length in proxy ast handler!\n"); + goto leave; + } + + if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == + (LKM_PUT_LVB|LKM_GET_LVB)) { + mlog(ML_ERROR, "both PUT and GET lvb specified\n"); + ret = DLM_BADARGS; + goto leave; + } + + mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : + (flags & LKM_GET_LVB ? "get lvb" : "none")); + + mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type); + + if (past->type != DLM_AST && + past->type != DLM_BAST) { + mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu" + "name=%.*s\n", past->type, + dlm_get_lock_cookie_node(cookie), + dlm_get_lock_cookie_seq(cookie), + locklen, name); + ret = DLM_IVLOCKID; + goto leave; + } + + res = dlm_lookup_lockres(dlm, name, locklen); + if (!res) { + mlog(0, "got %sast for unknown lockres! " + "cookie=%u:%llu, name=%.*s, namelen=%u\n", + past->type == DLM_AST ? "" : "b", + dlm_get_lock_cookie_node(cookie), + dlm_get_lock_cookie_seq(cookie), + locklen, name, locklen); + ret = DLM_IVLOCKID; + goto leave; + } + + /* cannot get a proxy ast message if this node owns it */ + BUG_ON(res->owner == dlm->node_num); + + mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name); + + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_RECOVERING) { + mlog(0, "responding with DLM_RECOVERING!\n"); + ret = DLM_RECOVERING; + goto unlock_out; + } + if (res->state & DLM_LOCK_RES_MIGRATING) { + mlog(0, "responding with DLM_MIGRATING!\n"); + ret = DLM_MIGRATING; + goto unlock_out; + } + /* try convert queue for both ast/bast */ + head = &res->converting; + lock = NULL; + list_for_each(iter, head) { + lock = list_entry (iter, struct dlm_lock, list); + if (be64_to_cpu(lock->ml.cookie) == cookie) + goto do_ast; + } + + /* if not on convert, try blocked for ast, granted for bast */ + if (past->type == DLM_AST) + head = &res->blocked; + else + head = &res->granted; + + list_for_each(iter, head) { + lock = list_entry (iter, struct dlm_lock, list); + if (be64_to_cpu(lock->ml.cookie) == cookie) + goto do_ast; + } + + mlog(0, "got %sast for unknown lock! cookie=%u:%llu, " + "name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b", + dlm_get_lock_cookie_node(cookie), + dlm_get_lock_cookie_seq(cookie), + locklen, name, locklen); + + ret = DLM_NORMAL; +unlock_out: + spin_unlock(&res->spinlock); + goto leave; + +do_ast: + ret = DLM_NORMAL; + if (past->type == DLM_AST) { + /* do not alter lock refcount. switching lists. */ + list_move_tail(&lock->list, &res->granted); + mlog(0, "ast: adding to granted list... type=%d, " + "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); + if (lock->ml.convert_type != LKM_IVMODE) { + lock->ml.type = lock->ml.convert_type; + lock->ml.convert_type = LKM_IVMODE; + } else { + // should already be there.... + } + + lock->lksb->status = DLM_NORMAL; + + /* if we requested the lvb, fetch it into our lksb now */ + if (flags & LKM_GET_LVB) { + BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB)); + memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN); + } + } + spin_unlock(&res->spinlock); + + if (past->type == DLM_AST) + dlm_do_local_ast(dlm, res, lock); + else + dlm_do_local_bast(dlm, res, lock, past->blocked_type); + +leave: + + if (res) + dlm_lockres_put(res); + + dlm_put(dlm); + return ret; +} + + + +int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + struct dlm_lock *lock, int msg_type, + int blocked_type, int flags) +{ + int ret = 0; + struct dlm_proxy_ast past; + struct kvec vec[2]; + size_t veclen = 1; + int status; + + mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n", + res->lockname.len, res->lockname.name, lock->ml.node, + msg_type, blocked_type); + + memset(&past, 0, sizeof(struct dlm_proxy_ast)); + past.node_idx = dlm->node_num; + past.type = msg_type; + past.blocked_type = blocked_type; + past.namelen = res->lockname.len; + memcpy(past.name, res->lockname.name, past.namelen); + past.cookie = lock->ml.cookie; + + vec[0].iov_len = sizeof(struct dlm_proxy_ast); + vec[0].iov_base = &past; + if (flags & DLM_LKSB_GET_LVB) { + mlog(0, "returning requested LVB data\n"); + be32_add_cpu(&past.flags, LKM_GET_LVB); + vec[1].iov_len = DLM_LVB_LEN; + vec[1].iov_base = lock->lksb->lvb; + veclen++; + } + + ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, + lock->ml.node, &status); + if (ret < 0) + mlog_errno(ret); + else { + if (status == DLM_RECOVERING) { + mlog(ML_ERROR, "sent AST to node %u, it thinks this " + "node is dead!\n", lock->ml.node); + BUG(); + } else if (status == DLM_MIGRATING) { + mlog(ML_ERROR, "sent AST to node %u, it returned " + "DLM_MIGRATING!\n", lock->ml.node); + BUG(); + } else if (status != DLM_NORMAL && status != DLM_IVLOCKID) { + mlog(ML_ERROR, "AST to node %u returned %d!\n", + lock->ml.node, status); + /* ignore it */ + } + ret = 0; + } + return ret; +} diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h new file mode 100644 index 0000000..d5a86fb --- /dev/null +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -0,0 +1,1112 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmcommon.h + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef DLMCOMMON_H +#define DLMCOMMON_H + +#include <linux/kref.h> + +#define DLM_HB_NODE_DOWN_PRI (0xf000000) +#define DLM_HB_NODE_UP_PRI (0x8000000) + +#define DLM_LOCKID_NAME_MAX 32 + +#define DLM_DOMAIN_NAME_MAX_LEN 255 +#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES +#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes +#define DLM_THREAD_MS 200 // flush at least every 200 ms + +#define DLM_HASH_SIZE_DEFAULT (1 << 14) +#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE +# define DLM_HASH_PAGES 1 +#else +# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE) +#endif +#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head)) +#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE) + +/* Intended to make it easier for us to switch out hash functions */ +#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) + +enum dlm_mle_type { + DLM_MLE_BLOCK, + DLM_MLE_MASTER, + DLM_MLE_MIGRATION +}; + +struct dlm_lock_name { + u8 len; + u8 name[DLM_LOCKID_NAME_MAX]; +}; + +struct dlm_master_list_entry { + struct list_head list; + struct list_head hb_events; + struct dlm_ctxt *dlm; + spinlock_t spinlock; + wait_queue_head_t wq; + atomic_t woken; + struct kref mle_refs; + int inuse; + unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + u8 master; + u8 new_master; + enum dlm_mle_type type; + struct o2hb_callback_func mle_hb_up; + struct o2hb_callback_func mle_hb_down; + union { + struct dlm_lock_resource *res; + struct dlm_lock_name name; + } u; +}; + +enum dlm_ast_type { + DLM_AST = 0, + DLM_BAST, + DLM_ASTUNLOCK +}; + + +#define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \ + LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \ + LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE) + +#define DLM_RECOVERY_LOCK_NAME "$RECOVERY" +#define DLM_RECOVERY_LOCK_NAME_LEN 9 + +static inline int dlm_is_recovery_lock(const char *lock_name, int name_len) +{ + if (name_len == DLM_RECOVERY_LOCK_NAME_LEN && + memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0) + return 1; + return 0; +} + +#define DLM_RECO_STATE_ACTIVE 0x0001 +#define DLM_RECO_STATE_FINALIZE 0x0002 + +struct dlm_recovery_ctxt +{ + struct list_head resources; + struct list_head received; + struct list_head node_data; + u8 new_master; + u8 dead_node; + u16 state; + unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + wait_queue_head_t event; +}; + +enum dlm_ctxt_state { + DLM_CTXT_NEW = 0, + DLM_CTXT_JOINED, + DLM_CTXT_IN_SHUTDOWN, + DLM_CTXT_LEAVING, +}; + +struct dlm_ctxt +{ + struct list_head list; + struct hlist_head **lockres_hash; + struct list_head dirty_list; + struct list_head purge_list; + struct list_head pending_asts; + struct list_head pending_basts; + struct list_head tracking_list; + unsigned int purge_count; + spinlock_t spinlock; + spinlock_t ast_lock; + char *name; + u8 node_num; + u32 key; + u8 joining_node; + wait_queue_head_t dlm_join_events; + unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + struct dlm_recovery_ctxt reco; + spinlock_t master_lock; + struct list_head master_list; + struct list_head mle_hb_events; + + /* these give a really vague idea of the system load */ + atomic_t local_resources; + atomic_t remote_resources; + atomic_t unknown_resources; + + struct dlm_debug_ctxt *dlm_debug_ctxt; + struct dentry *dlm_debugfs_subroot; + + /* NOTE: Next three are protected by dlm_domain_lock */ + struct kref dlm_refs; + enum dlm_ctxt_state dlm_state; + unsigned int num_joins; + + struct o2hb_callback_func dlm_hb_up; + struct o2hb_callback_func dlm_hb_down; + struct task_struct *dlm_thread_task; + struct task_struct *dlm_reco_thread_task; + struct workqueue_struct *dlm_worker; + wait_queue_head_t dlm_thread_wq; + wait_queue_head_t dlm_reco_thread_wq; + wait_queue_head_t ast_wq; + wait_queue_head_t migration_wq; + + struct work_struct dispatched_work; + struct list_head work_list; + spinlock_t work_lock; + struct list_head dlm_domain_handlers; + struct list_head dlm_eviction_callbacks; + + /* The filesystem specifies this at domain registration. We + * cache it here to know what to tell other nodes. */ + struct dlm_protocol_version fs_locking_proto; + /* This is the inter-dlm communication version */ + struct dlm_protocol_version dlm_locking_proto; +}; + +static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) +{ + return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE); +} + +/* these keventd work queue items are for less-frequently + * called functions that cannot be directly called from the + * net message handlers for some reason, usually because + * they need to send net messages of their own. */ +void dlm_dispatch_work(struct work_struct *work); + +struct dlm_lock_resource; +struct dlm_work_item; + +typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *); + +struct dlm_request_all_locks_priv +{ + u8 reco_master; + u8 dead_node; +}; + +struct dlm_mig_lockres_priv +{ + struct dlm_lock_resource *lockres; + u8 real_master; + u8 extra_ref; +}; + +struct dlm_assert_master_priv +{ + struct dlm_lock_resource *lockres; + u8 request_from; + u32 flags; + unsigned ignore_higher:1; +}; + +struct dlm_deref_lockres_priv +{ + struct dlm_lock_resource *deref_res; + u8 deref_node; +}; + +struct dlm_work_item +{ + struct list_head list; + dlm_workfunc_t *func; + struct dlm_ctxt *dlm; + void *data; + union { + struct dlm_request_all_locks_priv ral; + struct dlm_mig_lockres_priv ml; + struct dlm_assert_master_priv am; + struct dlm_deref_lockres_priv dl; + } u; +}; + +static inline void dlm_init_work_item(struct dlm_ctxt *dlm, + struct dlm_work_item *i, + dlm_workfunc_t *f, void *data) +{ + memset(i, 0, sizeof(*i)); + i->func = f; + INIT_LIST_HEAD(&i->list); + i->data = data; + i->dlm = dlm; /* must have already done a dlm_grab on this! */ +} + + + +static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm, + u8 node) +{ + assert_spin_locked(&dlm->spinlock); + + dlm->joining_node = node; + wake_up(&dlm->dlm_join_events); +} + +#define DLM_LOCK_RES_UNINITED 0x00000001 +#define DLM_LOCK_RES_RECOVERING 0x00000002 +#define DLM_LOCK_RES_READY 0x00000004 +#define DLM_LOCK_RES_DIRTY 0x00000008 +#define DLM_LOCK_RES_IN_PROGRESS 0x00000010 +#define DLM_LOCK_RES_MIGRATING 0x00000020 +#define DLM_LOCK_RES_DROPPING_REF 0x00000040 +#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000 +#define DLM_LOCK_RES_SETREF_INPROG 0x00002000 + +/* max milliseconds to wait to sync up a network failure with a node death */ +#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000) + +#define DLM_PURGE_INTERVAL_MS (8 * 1000) + +struct dlm_lock_resource +{ + /* WARNING: Please see the comment in dlm_init_lockres before + * adding fields here. */ + struct hlist_node hash_node; + struct qstr lockname; + struct kref refs; + + /* + * Please keep granted, converting, and blocked in this order, + * as some funcs want to iterate over all lists. + * + * All four lists are protected by the hash's reference. + */ + struct list_head granted; + struct list_head converting; + struct list_head blocked; + struct list_head purge; + + /* + * These two lists require you to hold an additional reference + * while they are on the list. + */ + struct list_head dirty; + struct list_head recovering; // dlm_recovery_ctxt.resources list + + /* Added during init and removed during release */ + struct list_head tracking; /* dlm->tracking_list */ + + /* unused lock resources have their last_used stamped and are + * put on a list for the dlm thread to run. */ + unsigned long last_used; + + unsigned migration_pending:1; + atomic_t asts_reserved; + spinlock_t spinlock; + wait_queue_head_t wq; + u8 owner; //node which owns the lock resource, or unknown + u16 state; + char lvb[DLM_LVB_LEN]; + unsigned int inflight_locks; + unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; +}; + +struct dlm_migratable_lock +{ + __be64 cookie; + + /* these 3 are just padding for the in-memory structure, but + * list and flags are actually used when sent over the wire */ + __be16 pad1; + u8 list; // 0=granted, 1=converting, 2=blocked + u8 flags; + + s8 type; + s8 convert_type; + s8 highest_blocked; + u8 node; +}; // 16 bytes + +struct dlm_lock +{ + struct dlm_migratable_lock ml; + + struct list_head list; + struct list_head ast_list; + struct list_head bast_list; + struct dlm_lock_resource *lockres; + spinlock_t spinlock; + struct kref lock_refs; + + // ast and bast must be callable while holding a spinlock! + dlm_astlockfunc_t *ast; + dlm_bastlockfunc_t *bast; + void *astdata; + struct dlm_lockstatus *lksb; + unsigned ast_pending:1, + bast_pending:1, + convert_pending:1, + lock_pending:1, + cancel_pending:1, + unlock_pending:1, + lksb_kernel_allocated:1; +}; + + +#define DLM_LKSB_UNUSED1 0x01 +#define DLM_LKSB_PUT_LVB 0x02 +#define DLM_LKSB_GET_LVB 0x04 +#define DLM_LKSB_UNUSED2 0x08 +#define DLM_LKSB_UNUSED3 0x10 +#define DLM_LKSB_UNUSED4 0x20 +#define DLM_LKSB_UNUSED5 0x40 +#define DLM_LKSB_UNUSED6 0x80 + + +enum dlm_lockres_list { + DLM_GRANTED_LIST = 0, + DLM_CONVERTING_LIST, + DLM_BLOCKED_LIST +}; + +static inline int dlm_lvb_is_empty(char *lvb) +{ + int i; + for (i=0; i<DLM_LVB_LEN; i++) + if (lvb[i]) + return 0; + return 1; +} + +static inline struct list_head * +dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) +{ + struct list_head *ret = NULL; + if (idx == DLM_GRANTED_LIST) + ret = &res->granted; + else if (idx == DLM_CONVERTING_LIST) + ret = &res->converting; + else if (idx == DLM_BLOCKED_LIST) + ret = &res->blocked; + else + BUG(); + return ret; +} + + + + +struct dlm_node_iter +{ + unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + int curnode; +}; + + +enum { + DLM_MASTER_REQUEST_MSG = 500, + DLM_UNUSED_MSG1, /* 501 */ + DLM_ASSERT_MASTER_MSG, /* 502 */ + DLM_CREATE_LOCK_MSG, /* 503 */ + DLM_CONVERT_LOCK_MSG, /* 504 */ + DLM_PROXY_AST_MSG, /* 505 */ + DLM_UNLOCK_LOCK_MSG, /* 506 */ + DLM_DEREF_LOCKRES_MSG, /* 507 */ + DLM_MIGRATE_REQUEST_MSG, /* 508 */ + DLM_MIG_LOCKRES_MSG, /* 509 */ + DLM_QUERY_JOIN_MSG, /* 510 */ + DLM_ASSERT_JOINED_MSG, /* 511 */ + DLM_CANCEL_JOIN_MSG, /* 512 */ + DLM_EXIT_DOMAIN_MSG, /* 513 */ + DLM_MASTER_REQUERY_MSG, /* 514 */ + DLM_LOCK_REQUEST_MSG, /* 515 */ + DLM_RECO_DATA_DONE_MSG, /* 516 */ + DLM_BEGIN_RECO_MSG, /* 517 */ + DLM_FINALIZE_RECO_MSG /* 518 */ +}; + +struct dlm_reco_node_data +{ + int state; + u8 node_num; + struct list_head list; +}; + +enum { + DLM_RECO_NODE_DATA_DEAD = -1, + DLM_RECO_NODE_DATA_INIT = 0, + DLM_RECO_NODE_DATA_REQUESTING, + DLM_RECO_NODE_DATA_REQUESTED, + DLM_RECO_NODE_DATA_RECEIVING, + DLM_RECO_NODE_DATA_DONE, + DLM_RECO_NODE_DATA_FINALIZE_SENT, +}; + + +enum { + DLM_MASTER_RESP_NO = 0, + DLM_MASTER_RESP_YES, + DLM_MASTER_RESP_MAYBE, + DLM_MASTER_RESP_ERROR +}; + + +struct dlm_master_request +{ + u8 node_idx; + u8 namelen; + __be16 pad1; + __be32 flags; + + u8 name[O2NM_MAX_NAME_LEN]; +}; + +#define DLM_ASSERT_RESPONSE_REASSERT 0x00000001 +#define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002 + +#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001 +#define DLM_ASSERT_MASTER_REQUERY 0x00000002 +#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004 +struct dlm_assert_master +{ + u8 node_idx; + u8 namelen; + __be16 pad1; + __be32 flags; + + u8 name[O2NM_MAX_NAME_LEN]; +}; + +#define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001 + +struct dlm_migrate_request +{ + u8 master; + u8 new_master; + u8 namelen; + u8 pad1; + __be32 pad2; + u8 name[O2NM_MAX_NAME_LEN]; +}; + +struct dlm_master_requery +{ + u8 pad1; + u8 pad2; + u8 node_idx; + u8 namelen; + __be32 pad3; + u8 name[O2NM_MAX_NAME_LEN]; +}; + +#define DLM_MRES_RECOVERY 0x01 +#define DLM_MRES_MIGRATION 0x02 +#define DLM_MRES_ALL_DONE 0x04 + +/* + * We would like to get one whole lockres into a single network + * message whenever possible. Generally speaking, there will be + * at most one dlm_lock on a lockres for each node in the cluster, + * plus (infrequently) any additional locks coming in from userdlm. + * + * struct _dlm_lockres_page + * { + * dlm_migratable_lockres mres; + * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS]; + * u8 pad[DLM_MIG_LOCKRES_RESERVED]; + * }; + * + * from ../cluster/tcp.h + * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg)) + * (roughly 4080 bytes) + * and sizeof(dlm_migratable_lockres) = 112 bytes + * and sizeof(dlm_migratable_lock) = 16 bytes + * + * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and + * DLM_MIG_LOCKRES_RESERVED=128 means we have this: + * + * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) + + * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED = + * NET_MAX_PAYLOAD_BYTES + * (240 * 16) + 112 + 128 = 4080 + * + * So a lockres would need more than 240 locks before it would + * use more than one network packet to recover. Not too bad. + */ +#define DLM_MAX_MIGRATABLE_LOCKS 240 + +struct dlm_migratable_lockres +{ + u8 master; + u8 lockname_len; + u8 num_locks; // locks sent in this structure + u8 flags; + __be32 total_locks; // locks to be sent for this migration cookie + __be64 mig_cookie; // cookie for this lockres migration + // or zero if not needed + // 16 bytes + u8 lockname[DLM_LOCKID_NAME_MAX]; + // 48 bytes + u8 lvb[DLM_LVB_LEN]; + // 112 bytes + struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112 +}; +#define DLM_MIG_LOCKRES_MAX_LEN \ + (sizeof(struct dlm_migratable_lockres) + \ + (sizeof(struct dlm_migratable_lock) * \ + DLM_MAX_MIGRATABLE_LOCKS) ) + +/* from above, 128 bytes + * for some undetermined future use */ +#define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \ + DLM_MIG_LOCKRES_MAX_LEN) + +struct dlm_create_lock +{ + __be64 cookie; + + __be32 flags; + u8 pad1; + u8 node_idx; + s8 requested_type; + u8 namelen; + + u8 name[O2NM_MAX_NAME_LEN]; +}; + +struct dlm_convert_lock +{ + __be64 cookie; + + __be32 flags; + u8 pad1; + u8 node_idx; + s8 requested_type; + u8 namelen; + + u8 name[O2NM_MAX_NAME_LEN]; + + s8 lvb[0]; +}; +#define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN) + +struct dlm_unlock_lock +{ + __be64 cookie; + + __be32 flags; + __be16 pad1; + u8 node_idx; + u8 namelen; + + u8 name[O2NM_MAX_NAME_LEN]; + + s8 lvb[0]; +}; +#define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN) + +struct dlm_proxy_ast +{ + __be64 cookie; + + __be32 flags; + u8 node_idx; + u8 type; + u8 blocked_type; + u8 namelen; + + u8 name[O2NM_MAX_NAME_LEN]; + + s8 lvb[0]; +}; +#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) + +#define DLM_MOD_KEY (0x666c6172) +enum dlm_query_join_response_code { + JOIN_DISALLOW = 0, + JOIN_OK, + JOIN_OK_NO_MAP, + JOIN_PROTOCOL_MISMATCH, +}; + +struct dlm_query_join_packet { + u8 code; /* Response code. dlm_minor and fs_minor + are only valid if this is JOIN_OK */ + u8 dlm_minor; /* The minor version of the protocol the + dlm is speaking. */ + u8 fs_minor; /* The minor version of the protocol the + filesystem is speaking. */ + u8 reserved; +}; + +union dlm_query_join_response { + u32 intval; + struct dlm_query_join_packet packet; +}; + +struct dlm_lock_request +{ + u8 node_idx; + u8 dead_node; + __be16 pad1; + __be32 pad2; +}; + +struct dlm_reco_data_done +{ + u8 node_idx; + u8 dead_node; + __be16 pad1; + __be32 pad2; + + /* unused for now */ + /* eventually we can use this to attempt + * lvb recovery based on each node's info */ + u8 reco_lvb[DLM_LVB_LEN]; +}; + +struct dlm_begin_reco +{ + u8 node_idx; + u8 dead_node; + __be16 pad1; + __be32 pad2; +}; + + +#define BITS_PER_BYTE 8 +#define BITS_TO_BYTES(bits) (((bits)+BITS_PER_BYTE-1)/BITS_PER_BYTE) + +struct dlm_query_join_request +{ + u8 node_idx; + u8 pad1[2]; + u8 name_len; + struct dlm_protocol_version dlm_proto; + struct dlm_protocol_version fs_proto; + u8 domain[O2NM_MAX_NAME_LEN]; + u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)]; +}; + +struct dlm_assert_joined +{ + u8 node_idx; + u8 pad1[2]; + u8 name_len; + u8 domain[O2NM_MAX_NAME_LEN]; +}; + +struct dlm_cancel_join +{ + u8 node_idx; + u8 pad1[2]; + u8 name_len; + u8 domain[O2NM_MAX_NAME_LEN]; +}; + +struct dlm_exit_domain +{ + u8 node_idx; + u8 pad1[3]; +}; + +struct dlm_finalize_reco +{ + u8 node_idx; + u8 dead_node; + u8 flags; + u8 pad1; + __be32 pad2; +}; + +struct dlm_deref_lockres +{ + u32 pad1; + u16 pad2; + u8 node_idx; + u8 namelen; + + u8 name[O2NM_MAX_NAME_LEN]; +}; + +static inline enum dlm_status +__dlm_lockres_state_to_status(struct dlm_lock_resource *res) +{ + enum dlm_status status = DLM_NORMAL; + + assert_spin_locked(&res->spinlock); + + if (res->state & DLM_LOCK_RES_RECOVERING) + status = DLM_RECOVERING; + else if (res->state & DLM_LOCK_RES_MIGRATING) + status = DLM_MIGRATING; + else if (res->state & DLM_LOCK_RES_IN_PROGRESS) + status = DLM_FORWARD; + + return status; +} + +static inline u8 dlm_get_lock_cookie_node(u64 cookie) +{ + u8 ret; + cookie >>= 56; + ret = (u8)(cookie & 0xffULL); + return ret; +} + +static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie) +{ + unsigned long long ret; + ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL; + return ret; +} + +struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, + struct dlm_lockstatus *lksb); +void dlm_lock_get(struct dlm_lock *lock); +void dlm_lock_put(struct dlm_lock *lock); + +void dlm_lock_attach_lockres(struct dlm_lock *lock, + struct dlm_lock_resource *res); + +int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); + +void dlm_revert_pending_convert(struct dlm_lock_resource *res, + struct dlm_lock *lock); +void dlm_revert_pending_lock(struct dlm_lock_resource *res, + struct dlm_lock *lock); + +int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +void dlm_commit_pending_cancel(struct dlm_lock_resource *res, + struct dlm_lock *lock); +void dlm_commit_pending_unlock(struct dlm_lock_resource *res, + struct dlm_lock *lock); + +int dlm_launch_thread(struct dlm_ctxt *dlm); +void dlm_complete_thread(struct dlm_ctxt *dlm); +int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); +void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); +void dlm_wait_for_recovery(struct dlm_ctxt *dlm); +void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); +int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); +int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); +int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); + +void dlm_put(struct dlm_ctxt *dlm); +struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); +int dlm_domain_fully_joined(struct dlm_ctxt *dlm); + +void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); +void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); +static inline void dlm_lockres_get(struct dlm_lock_resource *res) +{ + /* This is called on every lookup, so it might be worth + * inlining. */ + kref_get(&res->refs); +} +void dlm_lockres_put(struct dlm_lock_resource *res); +void __dlm_unhash_lockres(struct dlm_lock_resource *res); +void __dlm_insert_lockres(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); +struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, + const char *name, + unsigned int len, + unsigned int hash); +struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, + const char *name, + unsigned int len, + unsigned int hash); +struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, + const char *name, + unsigned int len); + +int dlm_is_host_down(int errno); +void dlm_change_lockres_owner(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 owner); +struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, + const char *lockid, + int namelen, + int flags); +struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, + const char *name, + unsigned int namelen); + +#define dlm_lockres_set_refmap_bit(bit,res) \ + __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__) +#define dlm_lockres_clear_refmap_bit(bit,res) \ + __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__) + +static inline void __dlm_lockres_set_refmap_bit(int bit, + struct dlm_lock_resource *res, + const char *file, + int line) +{ + //printk("%s:%d:%.*s: setting bit %d\n", file, line, + // res->lockname.len, res->lockname.name, bit); + set_bit(bit, res->refmap); +} + +static inline void __dlm_lockres_clear_refmap_bit(int bit, + struct dlm_lock_resource *res, + const char *file, + int line) +{ + //printk("%s:%d:%.*s: clearing bit %d\n", file, line, + // res->lockname.len, res->lockname.name, bit); + clear_bit(bit, res->refmap); +} + +void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + const char *file, + int line); +void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + int new_lockres, + const char *file, + int line); +#define dlm_lockres_drop_inflight_ref(d,r) \ + __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__) +#define dlm_lockres_grab_inflight_ref(d,r) \ + __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__) +#define dlm_lockres_grab_inflight_ref_new(d,r) \ + __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__) + +void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); +void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); +void dlm_do_local_ast(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock); +int dlm_do_remote_ast(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock); +void dlm_do_local_bast(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + int blocked_type); +int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + int msg_type, + int blocked_type, int flags); +static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + int blocked_type) +{ + return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST, + blocked_type, 0); +} + +static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + int flags) +{ + return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST, + 0, flags); +} + +void dlm_print_one_lock_resource(struct dlm_lock_resource *res); +void __dlm_print_one_lock_resource(struct dlm_lock_resource *res); + +u8 dlm_nm_this_node(struct dlm_ctxt *dlm); +void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); +void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); + + +int dlm_nm_init(struct dlm_ctxt *dlm); +int dlm_heartbeat_init(struct dlm_ctxt *dlm); +void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); +void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); + +int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); +int dlm_finish_migration(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 old_master); +void dlm_lockres_release_ast(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); +void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res); + +int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +void dlm_assert_master_post_handler(int status, void *data, void *ret_data); +int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + u8 nodenum, u8 *real_master); + + +int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + int ignore_higher, + u8 request_from, + u32 flags); + + +int dlm_send_one_lockres(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_migratable_lockres *mres, + u8 send_to, + u8 flags); +void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); + +/* will exit holding res->spinlock, but may drop in function */ +void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags); +void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags); + +/* will exit holding res->spinlock, but may drop in function */ +static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res) +{ + __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS| + DLM_LOCK_RES_RECOVERING| + DLM_LOCK_RES_MIGRATING)); +} + +/* create/destroy slab caches */ +int dlm_init_master_caches(void); +void dlm_destroy_master_caches(void); + +int dlm_init_lock_cache(void); +void dlm_destroy_lock_cache(void); + +int dlm_init_mle_cache(void); +void dlm_destroy_mle_cache(void); + +void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up); +int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); +void dlm_clean_master_list(struct dlm_ctxt *dlm, + u8 dead_node); +int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); +int __dlm_lockres_has_locks(struct dlm_lock_resource *res); +int __dlm_lockres_unused(struct dlm_lock_resource *res); + +static inline const char * dlm_lock_mode_name(int mode) +{ + switch (mode) { + case LKM_EXMODE: + return "EX"; + case LKM_PRMODE: + return "PR"; + case LKM_NLMODE: + return "NL"; + } + return "UNKNOWN"; +} + + +static inline int dlm_lock_compatible(int existing, int request) +{ + /* NO_LOCK compatible with all */ + if (request == LKM_NLMODE || + existing == LKM_NLMODE) + return 1; + + /* EX incompatible with all non-NO_LOCK */ + if (request == LKM_EXMODE) + return 0; + + /* request must be PR, which is compatible with PR */ + if (existing == LKM_PRMODE) + return 1; + + return 0; +} + +static inline int dlm_lock_on_list(struct list_head *head, + struct dlm_lock *lock) +{ + struct list_head *iter; + struct dlm_lock *tmplock; + + list_for_each(iter, head) { + tmplock = list_entry(iter, struct dlm_lock, list); + if (tmplock == lock) + return 1; + } + return 0; +} + + +static inline enum dlm_status dlm_err_to_dlm_status(int err) +{ + enum dlm_status ret; + if (err == -ENOMEM) + ret = DLM_SYSERR; + else if (err == -ETIMEDOUT || o2net_link_down(err, NULL)) + ret = DLM_NOLOCKMGR; + else if (err == -EINVAL) + ret = DLM_BADPARAM; + else if (err == -ENAMETOOLONG) + ret = DLM_IVBUFLEN; + else + ret = DLM_BADARGS; + return ret; +} + + +static inline void dlm_node_iter_init(unsigned long *map, + struct dlm_node_iter *iter) +{ + memcpy(iter->node_map, map, sizeof(iter->node_map)); + iter->curnode = -1; +} + +static inline int dlm_node_iter_next(struct dlm_node_iter *iter) +{ + int bit; + bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1); + if (bit >= O2NM_MAX_NODES) { + iter->curnode = O2NM_MAX_NODES; + return -ENOENT; + } + iter->curnode = bit; + return bit; +} + + + +#endif /* DLMCOMMON_H */ diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c new file mode 100644 index 0000000..75997b4 --- /dev/null +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -0,0 +1,548 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmconvert.c + * + * underlying calls for lock conversion + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/sysctl.h> +#include <linux/random.h> +#include <linux/blkdev.h> +#include <linux/socket.h> +#include <linux/inet.h> +#include <linux/spinlock.h> + + +#include "cluster/heartbeat.h" +#include "cluster/nodemanager.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" +#include "dlmcommon.h" + +#include "dlmconvert.h" + +#define MLOG_MASK_PREFIX ML_DLM +#include "cluster/masklog.h" + +/* NOTE: __dlmconvert_master is the only function in here that + * needs a spinlock held on entry (res->spinlock) and it is the + * only one that holds a lock on exit (res->spinlock). + * All other functions in here need no locks and drop all of + * the locks that they acquire. */ +static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags, + int type, int *call_ast, + int *kick_thread); +static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags, int type); + +/* + * this is only called directly by dlmlock(), and only when the + * local node is the owner of the lockres + * locking: + * caller needs: none + * taken: takes and drops res->spinlock + * held on exit: none + * returns: see __dlmconvert_master + */ +enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags, int type) +{ + int call_ast = 0, kick_thread = 0; + enum dlm_status status; + + spin_lock(&res->spinlock); + /* we are not in a network handler, this is fine */ + __dlm_wait_on_lockres(res); + __dlm_lockres_reserve_ast(res); + res->state |= DLM_LOCK_RES_IN_PROGRESS; + + status = __dlmconvert_master(dlm, res, lock, flags, type, + &call_ast, &kick_thread); + + res->state &= ~DLM_LOCK_RES_IN_PROGRESS; + spin_unlock(&res->spinlock); + wake_up(&res->wq); + if (status != DLM_NORMAL && status != DLM_NOTQUEUED) + dlm_error(status); + + /* either queue the ast or release it */ + if (call_ast) + dlm_queue_ast(dlm, lock); + else + dlm_lockres_release_ast(dlm, res); + + if (kick_thread) + dlm_kick_thread(dlm, res); + + return status; +} + +/* performs lock conversion at the lockres master site + * locking: + * caller needs: res->spinlock + * taken: takes and drops lock->spinlock + * held on exit: res->spinlock + * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED + * call_ast: whether ast should be called for this lock + * kick_thread: whether dlm_kick_thread should be called + */ +static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags, + int type, int *call_ast, + int *kick_thread) +{ + enum dlm_status status = DLM_NORMAL; + struct list_head *iter; + struct dlm_lock *tmplock=NULL; + + assert_spin_locked(&res->spinlock); + + mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n", + lock->ml.type, lock->ml.convert_type, type); + + spin_lock(&lock->spinlock); + + /* already converting? */ + if (lock->ml.convert_type != LKM_IVMODE) { + mlog(ML_ERROR, "attempted to convert a lock with a lock " + "conversion pending\n"); + status = DLM_DENIED; + goto unlock_exit; + } + + /* must be on grant queue to convert */ + if (!dlm_lock_on_list(&res->granted, lock)) { + mlog(ML_ERROR, "attempted to convert a lock not on grant " + "queue\n"); + status = DLM_DENIED; + goto unlock_exit; + } + + if (flags & LKM_VALBLK) { + switch (lock->ml.type) { + case LKM_EXMODE: + /* EX + LKM_VALBLK + convert == set lvb */ + mlog(0, "will set lvb: converting %s->%s\n", + dlm_lock_mode_name(lock->ml.type), + dlm_lock_mode_name(type)); + lock->lksb->flags |= DLM_LKSB_PUT_LVB; + break; + case LKM_PRMODE: + case LKM_NLMODE: + /* refetch if new level is not NL */ + if (type > LKM_NLMODE) { + mlog(0, "will fetch new value into " + "lvb: converting %s->%s\n", + dlm_lock_mode_name(lock->ml.type), + dlm_lock_mode_name(type)); + lock->lksb->flags |= DLM_LKSB_GET_LVB; + } else { + mlog(0, "will NOT fetch new value " + "into lvb: converting %s->%s\n", + dlm_lock_mode_name(lock->ml.type), + dlm_lock_mode_name(type)); + flags &= ~(LKM_VALBLK); + } + break; + } + } + + + /* in-place downconvert? */ + if (type <= lock->ml.type) + goto grant; + + /* upconvert from here on */ + status = DLM_NORMAL; + list_for_each(iter, &res->granted) { + tmplock = list_entry(iter, struct dlm_lock, list); + if (tmplock == lock) + continue; + if (!dlm_lock_compatible(tmplock->ml.type, type)) + goto switch_queues; + } + + list_for_each(iter, &res->converting) { + tmplock = list_entry(iter, struct dlm_lock, list); + if (!dlm_lock_compatible(tmplock->ml.type, type)) + goto switch_queues; + /* existing conversion requests take precedence */ + if (!dlm_lock_compatible(tmplock->ml.convert_type, type)) + goto switch_queues; + } + + /* fall thru to grant */ + +grant: + mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, + res->lockname.name, dlm_lock_mode_name(type)); + /* immediately grant the new lock type */ + lock->lksb->status = DLM_NORMAL; + if (lock->ml.node == dlm->node_num) + mlog(0, "doing in-place convert for nonlocal lock\n"); + lock->ml.type = type; + if (lock->lksb->flags & DLM_LKSB_PUT_LVB) + memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN); + + status = DLM_NORMAL; + *call_ast = 1; + goto unlock_exit; + +switch_queues: + if (flags & LKM_NOQUEUE) { + mlog(0, "failed to convert NOQUEUE lock %.*s from " + "%d to %d...\n", res->lockname.len, res->lockname.name, + lock->ml.type, type); + status = DLM_NOTQUEUED; + goto unlock_exit; + } + mlog(0, "res %.*s, queueing...\n", res->lockname.len, + res->lockname.name); + + lock->ml.convert_type = type; + /* do not alter lock refcount. switching lists. */ + list_move_tail(&lock->list, &res->converting); + +unlock_exit: + spin_unlock(&lock->spinlock); + if (status == DLM_DENIED) { + __dlm_print_one_lock_resource(res); + } + if (status == DLM_NORMAL) + *kick_thread = 1; + return status; +} + +void dlm_revert_pending_convert(struct dlm_lock_resource *res, + struct dlm_lock *lock) +{ + /* do not alter lock refcount. switching lists. */ + list_move_tail(&lock->list, &res->granted); + lock->ml.convert_type = LKM_IVMODE; + lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); +} + +/* messages the master site to do lock conversion + * locking: + * caller needs: none + * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS + * held on exit: none + * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node + */ +enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags, int type) +{ + enum dlm_status status; + + mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, + lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); + + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_RECOVERING) { + mlog(0, "bailing out early since res is RECOVERING " + "on secondary queue\n"); + /* __dlm_print_one_lock_resource(res); */ + status = DLM_RECOVERING; + goto bail; + } + /* will exit this call with spinlock held */ + __dlm_wait_on_lockres(res); + + if (lock->ml.convert_type != LKM_IVMODE) { + __dlm_print_one_lock_resource(res); + mlog(ML_ERROR, "converting a remote lock that is already " + "converting! (cookie=%u:%llu, conv=%d)\n", + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + lock->ml.convert_type); + status = DLM_DENIED; + goto bail; + } + res->state |= DLM_LOCK_RES_IN_PROGRESS; + /* move lock to local convert queue */ + /* do not alter lock refcount. switching lists. */ + list_move_tail(&lock->list, &res->converting); + lock->convert_pending = 1; + lock->ml.convert_type = type; + + if (flags & LKM_VALBLK) { + if (lock->ml.type == LKM_EXMODE) { + flags |= LKM_PUT_LVB; + lock->lksb->flags |= DLM_LKSB_PUT_LVB; + } else { + if (lock->ml.convert_type == LKM_NLMODE) + flags &= ~LKM_VALBLK; + else { + flags |= LKM_GET_LVB; + lock->lksb->flags |= DLM_LKSB_GET_LVB; + } + } + } + spin_unlock(&res->spinlock); + + /* no locks held here. + * need to wait for a reply as to whether it got queued or not. */ + status = dlm_send_remote_convert_request(dlm, res, lock, flags, type); + + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_IN_PROGRESS; + lock->convert_pending = 0; + /* if it failed, move it back to granted queue */ + if (status != DLM_NORMAL) { + if (status != DLM_NOTQUEUED) + dlm_error(status); + dlm_revert_pending_convert(res, lock); + } +bail: + spin_unlock(&res->spinlock); + + /* TODO: should this be a wake_one? */ + /* wake up any IN_PROGRESS waiters */ + wake_up(&res->wq); + + return status; +} + +/* sends DLM_CONVERT_LOCK_MSG to master site + * locking: + * caller needs: none + * taken: none + * held on exit: none + * returns: DLM_NOLOCKMGR, status from remote node + */ +static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags, int type) +{ + struct dlm_convert_lock convert; + int tmpret; + enum dlm_status ret; + int status = 0; + struct kvec vec[2]; + size_t veclen = 1; + + mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); + + memset(&convert, 0, sizeof(struct dlm_convert_lock)); + convert.node_idx = dlm->node_num; + convert.requested_type = type; + convert.cookie = lock->ml.cookie; + convert.namelen = res->lockname.len; + convert.flags = cpu_to_be32(flags); + memcpy(convert.name, res->lockname.name, convert.namelen); + + vec[0].iov_len = sizeof(struct dlm_convert_lock); + vec[0].iov_base = &convert; + + if (flags & LKM_PUT_LVB) { + /* extra data to send if we are updating lvb */ + vec[1].iov_len = DLM_LVB_LEN; + vec[1].iov_base = lock->lksb->lvb; + veclen++; + } + + tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key, + vec, veclen, res->owner, &status); + if (tmpret >= 0) { + // successfully sent and received + ret = status; // this is already a dlm_status + if (ret == DLM_RECOVERING) { + mlog(0, "node %u returned DLM_RECOVERING from convert " + "message!\n", res->owner); + } else if (ret == DLM_MIGRATING) { + mlog(0, "node %u returned DLM_MIGRATING from convert " + "message!\n", res->owner); + } else if (ret == DLM_FORWARD) { + mlog(0, "node %u returned DLM_FORWARD from convert " + "message!\n", res->owner); + } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) + dlm_error(ret); + } else { + mlog_errno(tmpret); + if (dlm_is_host_down(tmpret)) { + /* instead of logging the same network error over + * and over, sleep here and wait for the heartbeat + * to notice the node is dead. times out after 5s. */ + dlm_wait_for_node_death(dlm, res->owner, + DLM_NODE_DEATH_WAIT_MAX); + ret = DLM_RECOVERING; + mlog(0, "node %u died so returning DLM_RECOVERING " + "from convert message!\n", res->owner); + } else { + ret = dlm_err_to_dlm_status(tmpret); + } + } + + return ret; +} + +/* handler for DLM_CONVERT_LOCK_MSG on master site + * locking: + * caller needs: none + * taken: takes and drop res->spinlock + * held on exit: none + * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, + * status from __dlmconvert_master + */ +int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; + struct dlm_lock_resource *res = NULL; + struct list_head *iter; + struct dlm_lock *lock = NULL; + struct dlm_lockstatus *lksb; + enum dlm_status status = DLM_NORMAL; + u32 flags; + int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0; + + if (!dlm_grab(dlm)) { + dlm_error(DLM_REJECTED); + return DLM_REJECTED; + } + + mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), + "Domain %s not fully joined!\n", dlm->name); + + if (cnv->namelen > DLM_LOCKID_NAME_MAX) { + status = DLM_IVBUFLEN; + dlm_error(status); + goto leave; + } + + flags = be32_to_cpu(cnv->flags); + + if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == + (LKM_PUT_LVB|LKM_GET_LVB)) { + mlog(ML_ERROR, "both PUT and GET lvb specified\n"); + status = DLM_BADARGS; + goto leave; + } + + mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : + (flags & LKM_GET_LVB ? "get lvb" : "none")); + + status = DLM_IVLOCKID; + res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen); + if (!res) { + dlm_error(status); + goto leave; + } + + spin_lock(&res->spinlock); + status = __dlm_lockres_state_to_status(res); + if (status != DLM_NORMAL) { + spin_unlock(&res->spinlock); + dlm_error(status); + goto leave; + } + list_for_each(iter, &res->granted) { + lock = list_entry(iter, struct dlm_lock, list); + if (lock->ml.cookie == cnv->cookie && + lock->ml.node == cnv->node_idx) { + dlm_lock_get(lock); + break; + } + lock = NULL; + } + spin_unlock(&res->spinlock); + if (!lock) { + status = DLM_IVLOCKID; + mlog(ML_ERROR, "did not find lock to convert on grant queue! " + "cookie=%u:%llu\n", + dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie))); + dlm_print_one_lock_resource(res); + goto leave; + } + + /* found the lock */ + lksb = lock->lksb; + + /* see if caller needed to get/put lvb */ + if (flags & LKM_PUT_LVB) { + BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); + lksb->flags |= DLM_LKSB_PUT_LVB; + memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN); + } else if (flags & LKM_GET_LVB) { + BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); + lksb->flags |= DLM_LKSB_GET_LVB; + } + + spin_lock(&res->spinlock); + status = __dlm_lockres_state_to_status(res); + if (status == DLM_NORMAL) { + __dlm_lockres_reserve_ast(res); + ast_reserved = 1; + res->state |= DLM_LOCK_RES_IN_PROGRESS; + status = __dlmconvert_master(dlm, res, lock, flags, + cnv->requested_type, + &call_ast, &kick_thread); + res->state &= ~DLM_LOCK_RES_IN_PROGRESS; + wake = 1; + } + spin_unlock(&res->spinlock); + if (wake) + wake_up(&res->wq); + + if (status != DLM_NORMAL) { + if (status != DLM_NOTQUEUED) + dlm_error(status); + lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); + } + +leave: + if (lock) + dlm_lock_put(lock); + + /* either queue the ast or release it, if reserved */ + if (call_ast) + dlm_queue_ast(dlm, lock); + else if (ast_reserved) + dlm_lockres_release_ast(dlm, res); + + if (kick_thread) + dlm_kick_thread(dlm, res); + + if (res) + dlm_lockres_put(res); + + dlm_put(dlm); + + return status; +} diff --git a/fs/ocfs2/dlm/dlmconvert.h b/fs/ocfs2/dlm/dlmconvert.h new file mode 100644 index 0000000..b2e3677 --- /dev/null +++ b/fs/ocfs2/dlm/dlmconvert.h @@ -0,0 +1,35 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmconvert.h + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef DLMCONVERT_H +#define DLMCONVERT_H + +enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags, int type); +enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags, int type); + +#endif diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c new file mode 100644 index 0000000..1b81dcb --- /dev/null +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -0,0 +1,1037 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmdebug.c + * + * debug functionality for the dlm + * + * Copyright (C) 2004, 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/sysctl.h> +#include <linux/spinlock.h> +#include <linux/debugfs.h> + +#include "cluster/heartbeat.h" +#include "cluster/nodemanager.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" +#include "dlmcommon.h" +#include "dlmdomain.h" +#include "dlmdebug.h" + +#define MLOG_MASK_PREFIX ML_DLM +#include "cluster/masklog.h" + +static int stringify_lockname(const char *lockname, int locklen, char *buf, + int len); + +void dlm_print_one_lock_resource(struct dlm_lock_resource *res) +{ + spin_lock(&res->spinlock); + __dlm_print_one_lock_resource(res); + spin_unlock(&res->spinlock); +} + +static void dlm_print_lockres_refmap(struct dlm_lock_resource *res) +{ + int bit; + assert_spin_locked(&res->spinlock); + + printk(" refmap nodes: [ "); + bit = 0; + while (1) { + bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); + if (bit >= O2NM_MAX_NODES) + break; + printk("%u ", bit); + bit++; + } + printk("], inflight=%u\n", res->inflight_locks); +} + +static void __dlm_print_lock(struct dlm_lock *lock) +{ + spin_lock(&lock->spinlock); + + printk(" type=%d, conv=%d, node=%u, cookie=%u:%llu, " + "ref=%u, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c), " + "pending=(conv=%c,lock=%c,cancel=%c,unlock=%c)\n", + lock->ml.type, lock->ml.convert_type, lock->ml.node, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + atomic_read(&lock->lock_refs.refcount), + (list_empty(&lock->ast_list) ? 'y' : 'n'), + (lock->ast_pending ? 'y' : 'n'), + (list_empty(&lock->bast_list) ? 'y' : 'n'), + (lock->bast_pending ? 'y' : 'n'), + (lock->convert_pending ? 'y' : 'n'), + (lock->lock_pending ? 'y' : 'n'), + (lock->cancel_pending ? 'y' : 'n'), + (lock->unlock_pending ? 'y' : 'n')); + + spin_unlock(&lock->spinlock); +} + +void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) +{ + struct list_head *iter2; + struct dlm_lock *lock; + char buf[DLM_LOCKID_NAME_MAX]; + + assert_spin_locked(&res->spinlock); + + stringify_lockname(res->lockname.name, res->lockname.len, + buf, sizeof(buf) - 1); + printk("lockres: %s, owner=%u, state=%u\n", + buf, res->owner, res->state); + printk(" last used: %lu, refcnt: %u, on purge list: %s\n", + res->last_used, atomic_read(&res->refs.refcount), + list_empty(&res->purge) ? "no" : "yes"); + printk(" on dirty list: %s, on reco list: %s, " + "migrating pending: %s\n", + list_empty(&res->dirty) ? "no" : "yes", + list_empty(&res->recovering) ? "no" : "yes", + res->migration_pending ? "yes" : "no"); + printk(" inflight locks: %d, asts reserved: %d\n", + res->inflight_locks, atomic_read(&res->asts_reserved)); + dlm_print_lockres_refmap(res); + printk(" granted queue:\n"); + list_for_each(iter2, &res->granted) { + lock = list_entry(iter2, struct dlm_lock, list); + __dlm_print_lock(lock); + } + printk(" converting queue:\n"); + list_for_each(iter2, &res->converting) { + lock = list_entry(iter2, struct dlm_lock, list); + __dlm_print_lock(lock); + } + printk(" blocked queue:\n"); + list_for_each(iter2, &res->blocked) { + lock = list_entry(iter2, struct dlm_lock, list); + __dlm_print_lock(lock); + } +} + +void dlm_print_one_lock(struct dlm_lock *lockid) +{ + dlm_print_one_lock_resource(lockid->lockres); +} +EXPORT_SYMBOL_GPL(dlm_print_one_lock); + +static const char *dlm_errnames[] = { + [DLM_NORMAL] = "DLM_NORMAL", + [DLM_GRANTED] = "DLM_GRANTED", + [DLM_DENIED] = "DLM_DENIED", + [DLM_DENIED_NOLOCKS] = "DLM_DENIED_NOLOCKS", + [DLM_WORKING] = "DLM_WORKING", + [DLM_BLOCKED] = "DLM_BLOCKED", + [DLM_BLOCKED_ORPHAN] = "DLM_BLOCKED_ORPHAN", + [DLM_DENIED_GRACE_PERIOD] = "DLM_DENIED_GRACE_PERIOD", + [DLM_SYSERR] = "DLM_SYSERR", + [DLM_NOSUPPORT] = "DLM_NOSUPPORT", + [DLM_CANCELGRANT] = "DLM_CANCELGRANT", + [DLM_IVLOCKID] = "DLM_IVLOCKID", + [DLM_SYNC] = "DLM_SYNC", + [DLM_BADTYPE] = "DLM_BADTYPE", + [DLM_BADRESOURCE] = "DLM_BADRESOURCE", + [DLM_MAXHANDLES] = "DLM_MAXHANDLES", + [DLM_NOCLINFO] = "DLM_NOCLINFO", + [DLM_NOLOCKMGR] = "DLM_NOLOCKMGR", + [DLM_NOPURGED] = "DLM_NOPURGED", + [DLM_BADARGS] = "DLM_BADARGS", + [DLM_VOID] = "DLM_VOID", + [DLM_NOTQUEUED] = "DLM_NOTQUEUED", + [DLM_IVBUFLEN] = "DLM_IVBUFLEN", + [DLM_CVTUNGRANT] = "DLM_CVTUNGRANT", + [DLM_BADPARAM] = "DLM_BADPARAM", + [DLM_VALNOTVALID] = "DLM_VALNOTVALID", + [DLM_REJECTED] = "DLM_REJECTED", + [DLM_ABORT] = "DLM_ABORT", + [DLM_CANCEL] = "DLM_CANCEL", + [DLM_IVRESHANDLE] = "DLM_IVRESHANDLE", + [DLM_DEADLOCK] = "DLM_DEADLOCK", + [DLM_DENIED_NOASTS] = "DLM_DENIED_NOASTS", + [DLM_FORWARD] = "DLM_FORWARD", + [DLM_TIMEOUT] = "DLM_TIMEOUT", + [DLM_IVGROUPID] = "DLM_IVGROUPID", + [DLM_VERS_CONFLICT] = "DLM_VERS_CONFLICT", + [DLM_BAD_DEVICE_PATH] = "DLM_BAD_DEVICE_PATH", + [DLM_NO_DEVICE_PERMISSION] = "DLM_NO_DEVICE_PERMISSION", + [DLM_NO_CONTROL_DEVICE ] = "DLM_NO_CONTROL_DEVICE ", + [DLM_RECOVERING] = "DLM_RECOVERING", + [DLM_MIGRATING] = "DLM_MIGRATING", + [DLM_MAXSTATS] = "DLM_MAXSTATS", +}; + +static const char *dlm_errmsgs[] = { + [DLM_NORMAL] = "request in progress", + [DLM_GRANTED] = "request granted", + [DLM_DENIED] = "request denied", + [DLM_DENIED_NOLOCKS] = "request denied, out of system resources", + [DLM_WORKING] = "async request in progress", + [DLM_BLOCKED] = "lock request blocked", + [DLM_BLOCKED_ORPHAN] = "lock request blocked by a orphan lock", + [DLM_DENIED_GRACE_PERIOD] = "topological change in progress", + [DLM_SYSERR] = "system error", + [DLM_NOSUPPORT] = "unsupported", + [DLM_CANCELGRANT] = "can't cancel convert: already granted", + [DLM_IVLOCKID] = "bad lockid", + [DLM_SYNC] = "synchronous request granted", + [DLM_BADTYPE] = "bad resource type", + [DLM_BADRESOURCE] = "bad resource handle", + [DLM_MAXHANDLES] = "no more resource handles", + [DLM_NOCLINFO] = "can't contact cluster manager", + [DLM_NOLOCKMGR] = "can't contact lock manager", + [DLM_NOPURGED] = "can't contact purge daemon", + [DLM_BADARGS] = "bad api args", + [DLM_VOID] = "no status", + [DLM_NOTQUEUED] = "NOQUEUE was specified and request failed", + [DLM_IVBUFLEN] = "invalid resource name length", + [DLM_CVTUNGRANT] = "attempted to convert ungranted lock", + [DLM_BADPARAM] = "invalid lock mode specified", + [DLM_VALNOTVALID] = "value block has been invalidated", + [DLM_REJECTED] = "request rejected, unrecognized client", + [DLM_ABORT] = "blocked lock request cancelled", + [DLM_CANCEL] = "conversion request cancelled", + [DLM_IVRESHANDLE] = "invalid resource handle", + [DLM_DEADLOCK] = "deadlock recovery refused this request", + [DLM_DENIED_NOASTS] = "failed to allocate AST", + [DLM_FORWARD] = "request must wait for primary's response", + [DLM_TIMEOUT] = "timeout value for lock has expired", + [DLM_IVGROUPID] = "invalid group specification", + [DLM_VERS_CONFLICT] = "version conflicts prevent request handling", + [DLM_BAD_DEVICE_PATH] = "Locks device does not exist or path wrong", + [DLM_NO_DEVICE_PERMISSION] = "Client has insufficient perms for device", + [DLM_NO_CONTROL_DEVICE] = "Cannot set options on opened device ", + [DLM_RECOVERING] = "lock resource being recovered", + [DLM_MIGRATING] = "lock resource being migrated", + [DLM_MAXSTATS] = "invalid error number", +}; + +const char *dlm_errmsg(enum dlm_status err) +{ + if (err >= DLM_MAXSTATS || err < 0) + return dlm_errmsgs[DLM_MAXSTATS]; + return dlm_errmsgs[err]; +} +EXPORT_SYMBOL_GPL(dlm_errmsg); + +const char *dlm_errname(enum dlm_status err) +{ + if (err >= DLM_MAXSTATS || err < 0) + return dlm_errnames[DLM_MAXSTATS]; + return dlm_errnames[err]; +} +EXPORT_SYMBOL_GPL(dlm_errname); + +/* NOTE: This function converts a lockname into a string. It uses knowledge + * of the format of the lockname that should be outside the purview of the dlm. + * We are adding only to make dlm debugging slightly easier. + * + * For more on lockname formats, please refer to dlmglue.c and ocfs2_lockid.h. + */ +static int stringify_lockname(const char *lockname, int locklen, char *buf, + int len) +{ + int out = 0; + __be64 inode_blkno_be; + +#define OCFS2_DENTRY_LOCK_INO_START 18 + if (*lockname == 'N') { + memcpy((__be64 *)&inode_blkno_be, + (char *)&lockname[OCFS2_DENTRY_LOCK_INO_START], + sizeof(__be64)); + out += snprintf(buf + out, len - out, "%.*s%08x", + OCFS2_DENTRY_LOCK_INO_START - 1, lockname, + (unsigned int)be64_to_cpu(inode_blkno_be)); + } else + out += snprintf(buf + out, len - out, "%.*s", + locklen, lockname); + return out; +} + +static int stringify_nodemap(unsigned long *nodemap, int maxnodes, + char *buf, int len) +{ + int out = 0; + int i = -1; + + while ((i = find_next_bit(nodemap, maxnodes, i + 1)) < maxnodes) + out += snprintf(buf + out, len - out, "%d ", i); + + return out; +} + +static int dump_mle(struct dlm_master_list_entry *mle, char *buf, int len) +{ + int out = 0; + unsigned int namelen; + const char *name; + char *mle_type; + + if (mle->type != DLM_MLE_MASTER) { + namelen = mle->u.name.len; + name = mle->u.name.name; + } else { + namelen = mle->u.res->lockname.len; + name = mle->u.res->lockname.name; + } + + if (mle->type == DLM_MLE_BLOCK) + mle_type = "BLK"; + else if (mle->type == DLM_MLE_MASTER) + mle_type = "MAS"; + else + mle_type = "MIG"; + + out += stringify_lockname(name, namelen, buf + out, len - out); + out += snprintf(buf + out, len - out, + "\t%3s\tmas=%3u\tnew=%3u\tevt=%1d\tuse=%1d\tref=%3d\n", + mle_type, mle->master, mle->new_master, + !list_empty(&mle->hb_events), + !!mle->inuse, + atomic_read(&mle->mle_refs.refcount)); + + out += snprintf(buf + out, len - out, "Maybe="); + out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES, + buf + out, len - out); + out += snprintf(buf + out, len - out, "\n"); + + out += snprintf(buf + out, len - out, "Vote="); + out += stringify_nodemap(mle->vote_map, O2NM_MAX_NODES, + buf + out, len - out); + out += snprintf(buf + out, len - out, "\n"); + + out += snprintf(buf + out, len - out, "Response="); + out += stringify_nodemap(mle->response_map, O2NM_MAX_NODES, + buf + out, len - out); + out += snprintf(buf + out, len - out, "\n"); + + out += snprintf(buf + out, len - out, "Node="); + out += stringify_nodemap(mle->node_map, O2NM_MAX_NODES, + buf + out, len - out); + out += snprintf(buf + out, len - out, "\n"); + + out += snprintf(buf + out, len - out, "\n"); + + return out; +} + +void dlm_print_one_mle(struct dlm_master_list_entry *mle) +{ + char *buf; + + buf = (char *) get_zeroed_page(GFP_NOFS); + if (buf) { + dump_mle(mle, buf, PAGE_SIZE - 1); + free_page((unsigned long)buf); + } +} + +#ifdef CONFIG_DEBUG_FS + +static struct dentry *dlm_debugfs_root = NULL; + +#define DLM_DEBUGFS_DIR "o2dlm" +#define DLM_DEBUGFS_DLM_STATE "dlm_state" +#define DLM_DEBUGFS_LOCKING_STATE "locking_state" +#define DLM_DEBUGFS_MLE_STATE "mle_state" +#define DLM_DEBUGFS_PURGE_LIST "purge_list" + +/* begin - utils funcs */ +static void dlm_debug_free(struct kref *kref) +{ + struct dlm_debug_ctxt *dc; + + dc = container_of(kref, struct dlm_debug_ctxt, debug_refcnt); + + kfree(dc); +} + +static void dlm_debug_put(struct dlm_debug_ctxt *dc) +{ + if (dc) + kref_put(&dc->debug_refcnt, dlm_debug_free); +} + +static void dlm_debug_get(struct dlm_debug_ctxt *dc) +{ + kref_get(&dc->debug_refcnt); +} + +static struct debug_buffer *debug_buffer_allocate(void) +{ + struct debug_buffer *db = NULL; + + db = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); + if (!db) + goto bail; + + db->len = PAGE_SIZE; + db->buf = kmalloc(db->len, GFP_KERNEL); + if (!db->buf) + goto bail; + + return db; +bail: + kfree(db); + return NULL; +} + +static ssize_t debug_buffer_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct debug_buffer *db = file->private_data; + + return simple_read_from_buffer(buf, nbytes, ppos, db->buf, db->len); +} + +static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence) +{ + struct debug_buffer *db = file->private_data; + loff_t new = -1; + + switch (whence) { + case 0: + new = off; + break; + case 1: + new = file->f_pos + off; + break; + } + + if (new < 0 || new > db->len) + return -EINVAL; + + return (file->f_pos = new); +} + +static int debug_buffer_release(struct inode *inode, struct file *file) +{ + struct debug_buffer *db = (struct debug_buffer *)file->private_data; + + if (db) + kfree(db->buf); + kfree(db); + + return 0; +} +/* end - util funcs */ + +/* begin - purge list funcs */ +static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db) +{ + struct dlm_lock_resource *res; + int out = 0; + unsigned long total = 0; + + out += snprintf(db->buf + out, db->len - out, + "Dumping Purgelist for Domain: %s\n", dlm->name); + + spin_lock(&dlm->spinlock); + list_for_each_entry(res, &dlm->purge_list, purge) { + ++total; + if (db->len - out < 100) + continue; + spin_lock(&res->spinlock); + out += stringify_lockname(res->lockname.name, + res->lockname.len, + db->buf + out, db->len - out); + out += snprintf(db->buf + out, db->len - out, "\t%ld\n", + (jiffies - res->last_used)/HZ); + spin_unlock(&res->spinlock); + } + spin_unlock(&dlm->spinlock); + + out += snprintf(db->buf + out, db->len - out, + "Total on list: %ld\n", total); + + return out; +} + +static int debug_purgelist_open(struct inode *inode, struct file *file) +{ + struct dlm_ctxt *dlm = inode->i_private; + struct debug_buffer *db; + + db = debug_buffer_allocate(); + if (!db) + goto bail; + + db->len = debug_purgelist_print(dlm, db); + + file->private_data = db; + + return 0; +bail: + return -ENOMEM; +} + +static struct file_operations debug_purgelist_fops = { + .open = debug_purgelist_open, + .release = debug_buffer_release, + .read = debug_buffer_read, + .llseek = debug_buffer_llseek, +}; +/* end - purge list funcs */ + +/* begin - debug mle funcs */ +static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) +{ + struct dlm_master_list_entry *mle; + int out = 0; + unsigned long total = 0; + + out += snprintf(db->buf + out, db->len - out, + "Dumping MLEs for Domain: %s\n", dlm->name); + + spin_lock(&dlm->master_lock); + list_for_each_entry(mle, &dlm->master_list, list) { + ++total; + if (db->len - out < 200) + continue; + out += dump_mle(mle, db->buf + out, db->len - out); + } + spin_unlock(&dlm->master_lock); + + out += snprintf(db->buf + out, db->len - out, + "Total on list: %ld\n", total); + return out; +} + +static int debug_mle_open(struct inode *inode, struct file *file) +{ + struct dlm_ctxt *dlm = inode->i_private; + struct debug_buffer *db; + + db = debug_buffer_allocate(); + if (!db) + goto bail; + + db->len = debug_mle_print(dlm, db); + + file->private_data = db; + + return 0; +bail: + return -ENOMEM; +} + +static struct file_operations debug_mle_fops = { + .open = debug_mle_open, + .release = debug_buffer_release, + .read = debug_buffer_read, + .llseek = debug_buffer_llseek, +}; + +/* end - debug mle funcs */ + +/* begin - debug lockres funcs */ +static int dump_lock(struct dlm_lock *lock, int list_type, char *buf, int len) +{ + int out; + +#define DEBUG_LOCK_VERSION 1 + spin_lock(&lock->spinlock); + out = snprintf(buf, len, "LOCK:%d,%d,%d,%d,%d,%d:%lld,%d,%d,%d,%d,%d," + "%d,%d,%d,%d\n", + DEBUG_LOCK_VERSION, + list_type, lock->ml.type, lock->ml.convert_type, + lock->ml.node, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + !list_empty(&lock->ast_list), + !list_empty(&lock->bast_list), + lock->ast_pending, lock->bast_pending, + lock->convert_pending, lock->lock_pending, + lock->cancel_pending, lock->unlock_pending, + atomic_read(&lock->lock_refs.refcount)); + spin_unlock(&lock->spinlock); + + return out; +} + +static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len) +{ + struct dlm_lock *lock; + int i; + int out = 0; + + out += snprintf(buf + out, len - out, "NAME:"); + out += stringify_lockname(res->lockname.name, res->lockname.len, + buf + out, len - out); + out += snprintf(buf + out, len - out, "\n"); + +#define DEBUG_LRES_VERSION 1 + out += snprintf(buf + out, len - out, + "LRES:%d,%d,%d,%ld,%d,%d,%d,%d,%d,%d,%d\n", + DEBUG_LRES_VERSION, + res->owner, res->state, res->last_used, + !list_empty(&res->purge), + !list_empty(&res->dirty), + !list_empty(&res->recovering), + res->inflight_locks, res->migration_pending, + atomic_read(&res->asts_reserved), + atomic_read(&res->refs.refcount)); + + /* refmap */ + out += snprintf(buf + out, len - out, "RMAP:"); + out += stringify_nodemap(res->refmap, O2NM_MAX_NODES, + buf + out, len - out); + out += snprintf(buf + out, len - out, "\n"); + + /* lvb */ + out += snprintf(buf + out, len - out, "LVBX:"); + for (i = 0; i < DLM_LVB_LEN; i++) + out += snprintf(buf + out, len - out, + "%02x", (unsigned char)res->lvb[i]); + out += snprintf(buf + out, len - out, "\n"); + + /* granted */ + list_for_each_entry(lock, &res->granted, list) + out += dump_lock(lock, 0, buf + out, len - out); + + /* converting */ + list_for_each_entry(lock, &res->converting, list) + out += dump_lock(lock, 1, buf + out, len - out); + + /* blocked */ + list_for_each_entry(lock, &res->blocked, list) + out += dump_lock(lock, 2, buf + out, len - out); + + out += snprintf(buf + out, len - out, "\n"); + + return out; +} + +static void *lockres_seq_start(struct seq_file *m, loff_t *pos) +{ + struct debug_lockres *dl = m->private; + struct dlm_ctxt *dlm = dl->dl_ctxt; + struct dlm_lock_resource *res = NULL; + + spin_lock(&dlm->spinlock); + + if (dl->dl_res) { + list_for_each_entry(res, &dl->dl_res->tracking, tracking) { + if (dl->dl_res) { + dlm_lockres_put(dl->dl_res); + dl->dl_res = NULL; + } + if (&res->tracking == &dlm->tracking_list) { + mlog(0, "End of list found, %p\n", res); + dl = NULL; + break; + } + dlm_lockres_get(res); + dl->dl_res = res; + break; + } + } else { + if (!list_empty(&dlm->tracking_list)) { + list_for_each_entry(res, &dlm->tracking_list, tracking) + break; + dlm_lockres_get(res); + dl->dl_res = res; + } else + dl = NULL; + } + + if (dl) { + spin_lock(&dl->dl_res->spinlock); + dump_lockres(dl->dl_res, dl->dl_buf, dl->dl_len - 1); + spin_unlock(&dl->dl_res->spinlock); + } + + spin_unlock(&dlm->spinlock); + + return dl; +} + +static void lockres_seq_stop(struct seq_file *m, void *v) +{ +} + +static void *lockres_seq_next(struct seq_file *m, void *v, loff_t *pos) +{ + return NULL; +} + +static int lockres_seq_show(struct seq_file *s, void *v) +{ + struct debug_lockres *dl = (struct debug_lockres *)v; + + seq_printf(s, "%s", dl->dl_buf); + + return 0; +} + +static struct seq_operations debug_lockres_ops = { + .start = lockres_seq_start, + .stop = lockres_seq_stop, + .next = lockres_seq_next, + .show = lockres_seq_show, +}; + +static int debug_lockres_open(struct inode *inode, struct file *file) +{ + struct dlm_ctxt *dlm = inode->i_private; + int ret = -ENOMEM; + struct seq_file *seq; + struct debug_lockres *dl = NULL; + + dl = kzalloc(sizeof(struct debug_lockres), GFP_KERNEL); + if (!dl) { + mlog_errno(ret); + goto bail; + } + + dl->dl_len = PAGE_SIZE; + dl->dl_buf = kmalloc(dl->dl_len, GFP_KERNEL); + if (!dl->dl_buf) { + mlog_errno(ret); + goto bail; + } + + ret = seq_open(file, &debug_lockres_ops); + if (ret) { + mlog_errno(ret); + goto bail; + } + + seq = (struct seq_file *) file->private_data; + seq->private = dl; + + dlm_grab(dlm); + dl->dl_ctxt = dlm; + + return 0; +bail: + if (dl) + kfree(dl->dl_buf); + kfree(dl); + return ret; +} + +static int debug_lockres_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = (struct seq_file *)file->private_data; + struct debug_lockres *dl = (struct debug_lockres *)seq->private; + + if (dl->dl_res) + dlm_lockres_put(dl->dl_res); + dlm_put(dl->dl_ctxt); + kfree(dl->dl_buf); + return seq_release_private(inode, file); +} + +static struct file_operations debug_lockres_fops = { + .open = debug_lockres_open, + .release = debug_lockres_release, + .read = seq_read, + .llseek = seq_lseek, +}; +/* end - debug lockres funcs */ + +/* begin - debug state funcs */ +static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) +{ + int out = 0; + struct dlm_reco_node_data *node; + char *state; + int lres, rres, ures, tres; + + lres = atomic_read(&dlm->local_resources); + rres = atomic_read(&dlm->remote_resources); + ures = atomic_read(&dlm->unknown_resources); + tres = lres + rres + ures; + + spin_lock(&dlm->spinlock); + + switch (dlm->dlm_state) { + case DLM_CTXT_NEW: + state = "NEW"; break; + case DLM_CTXT_JOINED: + state = "JOINED"; break; + case DLM_CTXT_IN_SHUTDOWN: + state = "SHUTDOWN"; break; + case DLM_CTXT_LEAVING: + state = "LEAVING"; break; + default: + state = "UNKNOWN"; break; + } + + /* Domain: xxxxxxxxxx Key: 0xdfbac769 */ + out += snprintf(db->buf + out, db->len - out, + "Domain: %s Key: 0x%08x\n", dlm->name, dlm->key); + + /* Thread Pid: xxx Node: xxx State: xxxxx */ + out += snprintf(db->buf + out, db->len - out, + "Thread Pid: %d Node: %d State: %s\n", + dlm->dlm_thread_task->pid, dlm->node_num, state); + + /* Number of Joins: xxx Joining Node: xxx */ + out += snprintf(db->buf + out, db->len - out, + "Number of Joins: %d Joining Node: %d\n", + dlm->num_joins, dlm->joining_node); + + /* Domain Map: xx xx xx */ + out += snprintf(db->buf + out, db->len - out, "Domain Map: "); + out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES, + db->buf + out, db->len - out); + out += snprintf(db->buf + out, db->len - out, "\n"); + + /* Live Map: xx xx xx */ + out += snprintf(db->buf + out, db->len - out, "Live Map: "); + out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES, + db->buf + out, db->len - out); + out += snprintf(db->buf + out, db->len - out, "\n"); + + /* Mastered Resources Total: xxx Locally: xxx Remotely: ... */ + out += snprintf(db->buf + out, db->len - out, + "Mastered Resources Total: %d Locally: %d " + "Remotely: %d Unknown: %d\n", + tres, lres, rres, ures); + + /* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */ + out += snprintf(db->buf + out, db->len - out, + "Lists: Dirty=%s Purge=%s PendingASTs=%s " + "PendingBASTs=%s Master=%s\n", + (list_empty(&dlm->dirty_list) ? "Empty" : "InUse"), + (list_empty(&dlm->purge_list) ? "Empty" : "InUse"), + (list_empty(&dlm->pending_asts) ? "Empty" : "InUse"), + (list_empty(&dlm->pending_basts) ? "Empty" : "InUse"), + (list_empty(&dlm->master_list) ? "Empty" : "InUse")); + + /* Purge Count: xxx Refs: xxx */ + out += snprintf(db->buf + out, db->len - out, + "Purge Count: %d Refs: %d\n", dlm->purge_count, + atomic_read(&dlm->dlm_refs.refcount)); + + /* Dead Node: xxx */ + out += snprintf(db->buf + out, db->len - out, + "Dead Node: %d\n", dlm->reco.dead_node); + + /* What about DLM_RECO_STATE_FINALIZE? */ + if (dlm->reco.state == DLM_RECO_STATE_ACTIVE) + state = "ACTIVE"; + else + state = "INACTIVE"; + + /* Recovery Pid: xxxx Master: xxx State: xxxx */ + out += snprintf(db->buf + out, db->len - out, + "Recovery Pid: %d Master: %d State: %s\n", + dlm->dlm_reco_thread_task->pid, + dlm->reco.new_master, state); + + /* Recovery Map: xx xx */ + out += snprintf(db->buf + out, db->len - out, "Recovery Map: "); + out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES, + db->buf + out, db->len - out); + out += snprintf(db->buf + out, db->len - out, "\n"); + + /* Recovery Node State: */ + out += snprintf(db->buf + out, db->len - out, "Recovery Node State:\n"); + list_for_each_entry(node, &dlm->reco.node_data, list) { + switch (node->state) { + case DLM_RECO_NODE_DATA_INIT: + state = "INIT"; + break; + case DLM_RECO_NODE_DATA_REQUESTING: + state = "REQUESTING"; + break; + case DLM_RECO_NODE_DATA_DEAD: + state = "DEAD"; + break; + case DLM_RECO_NODE_DATA_RECEIVING: + state = "RECEIVING"; + break; + case DLM_RECO_NODE_DATA_REQUESTED: + state = "REQUESTED"; + break; + case DLM_RECO_NODE_DATA_DONE: + state = "DONE"; + break; + case DLM_RECO_NODE_DATA_FINALIZE_SENT: + state = "FINALIZE-SENT"; + break; + default: + state = "BAD"; + break; + } + out += snprintf(db->buf + out, db->len - out, "\t%u - %s\n", + node->node_num, state); + } + + spin_unlock(&dlm->spinlock); + + return out; +} + +static int debug_state_open(struct inode *inode, struct file *file) +{ + struct dlm_ctxt *dlm = inode->i_private; + struct debug_buffer *db = NULL; + + db = debug_buffer_allocate(); + if (!db) + goto bail; + + db->len = debug_state_print(dlm, db); + + file->private_data = db; + + return 0; +bail: + return -ENOMEM; +} + +static struct file_operations debug_state_fops = { + .open = debug_state_open, + .release = debug_buffer_release, + .read = debug_buffer_read, + .llseek = debug_buffer_llseek, +}; +/* end - debug state funcs */ + +/* files in subroot */ +int dlm_debug_init(struct dlm_ctxt *dlm) +{ + struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt; + + /* for dumping dlm_ctxt */ + dc->debug_state_dentry = debugfs_create_file(DLM_DEBUGFS_DLM_STATE, + S_IFREG|S_IRUSR, + dlm->dlm_debugfs_subroot, + dlm, &debug_state_fops); + if (!dc->debug_state_dentry) { + mlog_errno(-ENOMEM); + goto bail; + } + + /* for dumping lockres */ + dc->debug_lockres_dentry = + debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE, + S_IFREG|S_IRUSR, + dlm->dlm_debugfs_subroot, + dlm, &debug_lockres_fops); + if (!dc->debug_lockres_dentry) { + mlog_errno(-ENOMEM); + goto bail; + } + + /* for dumping mles */ + dc->debug_mle_dentry = debugfs_create_file(DLM_DEBUGFS_MLE_STATE, + S_IFREG|S_IRUSR, + dlm->dlm_debugfs_subroot, + dlm, &debug_mle_fops); + if (!dc->debug_mle_dentry) { + mlog_errno(-ENOMEM); + goto bail; + } + + /* for dumping lockres on the purge list */ + dc->debug_purgelist_dentry = + debugfs_create_file(DLM_DEBUGFS_PURGE_LIST, + S_IFREG|S_IRUSR, + dlm->dlm_debugfs_subroot, + dlm, &debug_purgelist_fops); + if (!dc->debug_purgelist_dentry) { + mlog_errno(-ENOMEM); + goto bail; + } + + dlm_debug_get(dc); + return 0; + +bail: + dlm_debug_shutdown(dlm); + return -ENOMEM; +} + +void dlm_debug_shutdown(struct dlm_ctxt *dlm) +{ + struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt; + + if (dc) { + if (dc->debug_purgelist_dentry) + debugfs_remove(dc->debug_purgelist_dentry); + if (dc->debug_mle_dentry) + debugfs_remove(dc->debug_mle_dentry); + if (dc->debug_lockres_dentry) + debugfs_remove(dc->debug_lockres_dentry); + if (dc->debug_state_dentry) + debugfs_remove(dc->debug_state_dentry); + dlm_debug_put(dc); + } +} + +/* subroot - domain dir */ +int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm) +{ + dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name, + dlm_debugfs_root); + if (!dlm->dlm_debugfs_subroot) { + mlog_errno(-ENOMEM); + goto bail; + } + + dlm->dlm_debug_ctxt = kzalloc(sizeof(struct dlm_debug_ctxt), + GFP_KERNEL); + if (!dlm->dlm_debug_ctxt) { + mlog_errno(-ENOMEM); + goto bail; + } + kref_init(&dlm->dlm_debug_ctxt->debug_refcnt); + + return 0; +bail: + dlm_destroy_debugfs_subroot(dlm); + return -ENOMEM; +} + +void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm) +{ + if (dlm->dlm_debugfs_subroot) + debugfs_remove(dlm->dlm_debugfs_subroot); +} + +/* debugfs root */ +int dlm_create_debugfs_root(void) +{ + dlm_debugfs_root = debugfs_create_dir(DLM_DEBUGFS_DIR, NULL); + if (!dlm_debugfs_root) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + return 0; +} + +void dlm_destroy_debugfs_root(void) +{ + if (dlm_debugfs_root) + debugfs_remove(dlm_debugfs_root); +} +#endif /* CONFIG_DEBUG_FS */ diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h new file mode 100644 index 0000000..8c686d2 --- /dev/null +++ b/fs/ocfs2/dlm/dlmdebug.h @@ -0,0 +1,86 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmdebug.h + * + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef DLMDEBUG_H +#define DLMDEBUG_H + +void dlm_print_one_mle(struct dlm_master_list_entry *mle); + +#ifdef CONFIG_DEBUG_FS + +struct dlm_debug_ctxt { + struct kref debug_refcnt; + struct dentry *debug_state_dentry; + struct dentry *debug_lockres_dentry; + struct dentry *debug_mle_dentry; + struct dentry *debug_purgelist_dentry; +}; + +struct debug_buffer { + int len; + char *buf; +}; + +struct debug_lockres { + int dl_len; + char *dl_buf; + struct dlm_ctxt *dl_ctxt; + struct dlm_lock_resource *dl_res; +}; + +int dlm_debug_init(struct dlm_ctxt *dlm); +void dlm_debug_shutdown(struct dlm_ctxt *dlm); + +int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm); +void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm); + +int dlm_create_debugfs_root(void); +void dlm_destroy_debugfs_root(void); + +#else + +static inline int dlm_debug_init(struct dlm_ctxt *dlm) +{ + return 0; +} +static inline void dlm_debug_shutdown(struct dlm_ctxt *dlm) +{ +} +static inline int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm) +{ + return 0; +} +static inline void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm) +{ +} +static inline int dlm_create_debugfs_root(void) +{ + return 0; +} +static inline void dlm_destroy_debugfs_root(void) +{ +} + +#endif /* CONFIG_DEBUG_FS */ +#endif /* DLMDEBUG_H */ diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c new file mode 100644 index 0000000..63f8125 --- /dev/null +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -0,0 +1,1901 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmdomain.c + * + * defines domain join / leave apis + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/debugfs.h> + +#include "cluster/heartbeat.h" +#include "cluster/nodemanager.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" +#include "dlmcommon.h" +#include "dlmdomain.h" +#include "dlmdebug.h" + +#include "dlmver.h" + +#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) +#include "cluster/masklog.h" + +/* + * ocfs2 node maps are array of long int, which limits to send them freely + * across the wire due to endianness issues. To workaround this, we convert + * long ints to byte arrays. Following 3 routines are helper functions to + * set/test/copy bits within those array of bytes + */ +static inline void byte_set_bit(u8 nr, u8 map[]) +{ + map[nr >> 3] |= (1UL << (nr & 7)); +} + +static inline int byte_test_bit(u8 nr, u8 map[]) +{ + return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0; +} + +static inline void byte_copymap(u8 dmap[], unsigned long smap[], + unsigned int sz) +{ + unsigned int nn; + + if (!sz) + return; + + memset(dmap, 0, ((sz + 7) >> 3)); + for (nn = 0 ; nn < sz; nn++) + if (test_bit(nn, smap)) + byte_set_bit(nn, dmap); +} + +static void dlm_free_pagevec(void **vec, int pages) +{ + while (pages--) + free_page((unsigned long)vec[pages]); + kfree(vec); +} + +static void **dlm_alloc_pagevec(int pages) +{ + void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL); + int i; + + if (!vec) + return NULL; + + for (i = 0; i < pages; i++) + if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL))) + goto out_free; + + mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n", + pages, (unsigned long)DLM_HASH_PAGES, + (unsigned long)DLM_BUCKETS_PER_PAGE); + return vec; +out_free: + dlm_free_pagevec(vec, i); + return NULL; +} + +/* + * + * spinlock lock ordering: if multiple locks are needed, obey this ordering: + * dlm_domain_lock + * struct dlm_ctxt->spinlock + * struct dlm_lock_resource->spinlock + * struct dlm_ctxt->master_lock + * struct dlm_ctxt->ast_lock + * dlm_master_list_entry->spinlock + * dlm_lock->spinlock + * + */ + +DEFINE_SPINLOCK(dlm_domain_lock); +LIST_HEAD(dlm_domains); +static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); + +/* + * The supported protocol version for DLM communication. Running domains + * will have a negotiated version with the same major number and a minor + * number equal or smaller. The dlm_ctxt->dlm_locking_proto field should + * be used to determine what a running domain is actually using. + */ +static const struct dlm_protocol_version dlm_protocol = { + .pv_major = 1, + .pv_minor = 0, +}; + +#define DLM_DOMAIN_BACKOFF_MS 200 + +static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data); +static int dlm_protocol_compare(struct dlm_protocol_version *existing, + struct dlm_protocol_version *request); + +static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); + +void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) +{ + if (!hlist_unhashed(&lockres->hash_node)) { + hlist_del_init(&lockres->hash_node); + dlm_lockres_put(lockres); + } +} + +void __dlm_insert_lockres(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + struct hlist_head *bucket; + struct qstr *q; + + assert_spin_locked(&dlm->spinlock); + + q = &res->lockname; + bucket = dlm_lockres_hash(dlm, q->hash); + + /* get a reference for our hashtable */ + dlm_lockres_get(res); + + hlist_add_head(&res->hash_node, bucket); +} + +struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, + const char *name, + unsigned int len, + unsigned int hash) +{ + struct hlist_head *bucket; + struct hlist_node *list; + + mlog_entry("%.*s\n", len, name); + + assert_spin_locked(&dlm->spinlock); + + bucket = dlm_lockres_hash(dlm, hash); + + hlist_for_each(list, bucket) { + struct dlm_lock_resource *res = hlist_entry(list, + struct dlm_lock_resource, hash_node); + if (res->lockname.name[0] != name[0]) + continue; + if (unlikely(res->lockname.len != len)) + continue; + if (memcmp(res->lockname.name + 1, name + 1, len - 1)) + continue; + dlm_lockres_get(res); + return res; + } + return NULL; +} + +/* intended to be called by functions which do not care about lock + * resources which are being purged (most net _handler functions). + * this will return NULL for any lock resource which is found but + * currently in the process of dropping its mastery reference. + * use __dlm_lookup_lockres_full when you need the lock resource + * regardless (e.g. dlm_get_lock_resource) */ +struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, + const char *name, + unsigned int len, + unsigned int hash) +{ + struct dlm_lock_resource *res = NULL; + + mlog_entry("%.*s\n", len, name); + + assert_spin_locked(&dlm->spinlock); + + res = __dlm_lookup_lockres_full(dlm, name, len, hash); + if (res) { + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_DROPPING_REF) { + spin_unlock(&res->spinlock); + dlm_lockres_put(res); + return NULL; + } + spin_unlock(&res->spinlock); + } + + return res; +} + +struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, + const char *name, + unsigned int len) +{ + struct dlm_lock_resource *res; + unsigned int hash = dlm_lockid_hash(name, len); + + spin_lock(&dlm->spinlock); + res = __dlm_lookup_lockres(dlm, name, len, hash); + spin_unlock(&dlm->spinlock); + return res; +} + +static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) +{ + struct dlm_ctxt *tmp = NULL; + struct list_head *iter; + + assert_spin_locked(&dlm_domain_lock); + + /* tmp->name here is always NULL terminated, + * but domain may not be! */ + list_for_each(iter, &dlm_domains) { + tmp = list_entry (iter, struct dlm_ctxt, list); + if (strlen(tmp->name) == len && + memcmp(tmp->name, domain, len)==0) + break; + tmp = NULL; + } + + return tmp; +} + +/* For null terminated domain strings ONLY */ +static struct dlm_ctxt * __dlm_lookup_domain(const char *domain) +{ + assert_spin_locked(&dlm_domain_lock); + + return __dlm_lookup_domain_full(domain, strlen(domain)); +} + + +/* returns true on one of two conditions: + * 1) the domain does not exist + * 2) the domain exists and it's state is "joined" */ +static int dlm_wait_on_domain_helper(const char *domain) +{ + int ret = 0; + struct dlm_ctxt *tmp = NULL; + + spin_lock(&dlm_domain_lock); + + tmp = __dlm_lookup_domain(domain); + if (!tmp) + ret = 1; + else if (tmp->dlm_state == DLM_CTXT_JOINED) + ret = 1; + + spin_unlock(&dlm_domain_lock); + return ret; +} + +static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) +{ + dlm_destroy_debugfs_subroot(dlm); + + if (dlm->lockres_hash) + dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES); + + if (dlm->name) + kfree(dlm->name); + + kfree(dlm); +} + +/* A little strange - this function will be called while holding + * dlm_domain_lock and is expected to be holding it on the way out. We + * will however drop and reacquire it multiple times */ +static void dlm_ctxt_release(struct kref *kref) +{ + struct dlm_ctxt *dlm; + + dlm = container_of(kref, struct dlm_ctxt, dlm_refs); + + BUG_ON(dlm->num_joins); + BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED); + + /* we may still be in the list if we hit an error during join. */ + list_del_init(&dlm->list); + + spin_unlock(&dlm_domain_lock); + + mlog(0, "freeing memory from domain %s\n", dlm->name); + + wake_up(&dlm_domain_events); + + dlm_free_ctxt_mem(dlm); + + spin_lock(&dlm_domain_lock); +} + +void dlm_put(struct dlm_ctxt *dlm) +{ + spin_lock(&dlm_domain_lock); + kref_put(&dlm->dlm_refs, dlm_ctxt_release); + spin_unlock(&dlm_domain_lock); +} + +static void __dlm_get(struct dlm_ctxt *dlm) +{ + kref_get(&dlm->dlm_refs); +} + +/* given a questionable reference to a dlm object, gets a reference if + * it can find it in the list, otherwise returns NULL in which case + * you shouldn't trust your pointer. */ +struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm) +{ + struct list_head *iter; + struct dlm_ctxt *target = NULL; + + spin_lock(&dlm_domain_lock); + + list_for_each(iter, &dlm_domains) { + target = list_entry (iter, struct dlm_ctxt, list); + + if (target == dlm) { + __dlm_get(target); + break; + } + + target = NULL; + } + + spin_unlock(&dlm_domain_lock); + + return target; +} + +int dlm_domain_fully_joined(struct dlm_ctxt *dlm) +{ + int ret; + + spin_lock(&dlm_domain_lock); + ret = (dlm->dlm_state == DLM_CTXT_JOINED) || + (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN); + spin_unlock(&dlm_domain_lock); + + return ret; +} + +static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm) +{ + if (dlm->dlm_worker) { + flush_workqueue(dlm->dlm_worker); + destroy_workqueue(dlm->dlm_worker); + dlm->dlm_worker = NULL; + } +} + +static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm) +{ + dlm_unregister_domain_handlers(dlm); + dlm_debug_shutdown(dlm); + dlm_complete_thread(dlm); + dlm_complete_recovery_thread(dlm); + dlm_destroy_dlm_worker(dlm); + + /* We've left the domain. Now we can take ourselves out of the + * list and allow the kref stuff to help us free the + * memory. */ + spin_lock(&dlm_domain_lock); + list_del_init(&dlm->list); + spin_unlock(&dlm_domain_lock); + + /* Wake up anyone waiting for us to remove this domain */ + wake_up(&dlm_domain_events); +} + +static int dlm_migrate_all_locks(struct dlm_ctxt *dlm) +{ + int i, num, n, ret = 0; + struct dlm_lock_resource *res; + struct hlist_node *iter; + struct hlist_head *bucket; + int dropped; + + mlog(0, "Migrating locks from domain %s\n", dlm->name); + + num = 0; + spin_lock(&dlm->spinlock); + for (i = 0; i < DLM_HASH_BUCKETS; i++) { +redo_bucket: + n = 0; + bucket = dlm_lockres_hash(dlm, i); + iter = bucket->first; + while (iter) { + n++; + res = hlist_entry(iter, struct dlm_lock_resource, + hash_node); + dlm_lockres_get(res); + /* migrate, if necessary. this will drop the dlm + * spinlock and retake it if it does migration. */ + dropped = dlm_empty_lockres(dlm, res); + + spin_lock(&res->spinlock); + __dlm_lockres_calc_usage(dlm, res); + iter = res->hash_node.next; + spin_unlock(&res->spinlock); + + dlm_lockres_put(res); + + if (dropped) + goto redo_bucket; + } + cond_resched_lock(&dlm->spinlock); + num += n; + mlog(0, "%s: touched %d lockreses in bucket %d " + "(tot=%d)\n", dlm->name, n, i, num); + } + spin_unlock(&dlm->spinlock); + wake_up(&dlm->dlm_thread_wq); + + /* let the dlm thread take care of purging, keep scanning until + * nothing remains in the hash */ + if (num) { + mlog(0, "%s: %d lock resources in hash last pass\n", + dlm->name, num); + ret = -EAGAIN; + } + mlog(0, "DONE Migrating locks from domain %s\n", dlm->name); + return ret; +} + +static int dlm_no_joining_node(struct dlm_ctxt *dlm) +{ + int ret; + + spin_lock(&dlm->spinlock); + ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN; + spin_unlock(&dlm->spinlock); + + return ret; +} + +static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm) +{ + /* Yikes, a double spinlock! I need domain_lock for the dlm + * state and the dlm spinlock for join state... Sorry! */ +again: + spin_lock(&dlm_domain_lock); + spin_lock(&dlm->spinlock); + + if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { + mlog(0, "Node %d is joining, we wait on it.\n", + dlm->joining_node); + spin_unlock(&dlm->spinlock); + spin_unlock(&dlm_domain_lock); + + wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm)); + goto again; + } + + dlm->dlm_state = DLM_CTXT_LEAVING; + spin_unlock(&dlm->spinlock); + spin_unlock(&dlm_domain_lock); +} + +static void __dlm_print_nodes(struct dlm_ctxt *dlm) +{ + int node = -1; + + assert_spin_locked(&dlm->spinlock); + + printk(KERN_INFO "ocfs2_dlm: Nodes in domain (\"%s\"): ", dlm->name); + + while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, + node + 1)) < O2NM_MAX_NODES) { + printk("%d ", node); + } + printk("\n"); +} + +static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + unsigned int node; + struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; + + mlog_entry("%p %u %p", msg, len, data); + + if (!dlm_grab(dlm)) + return 0; + + node = exit_msg->node_idx; + + printk(KERN_INFO "ocfs2_dlm: Node %u leaves domain %s\n", node, dlm->name); + + spin_lock(&dlm->spinlock); + clear_bit(node, dlm->domain_map); + __dlm_print_nodes(dlm); + + /* notify anything attached to the heartbeat events */ + dlm_hb_event_notify_attached(dlm, node, 0); + + spin_unlock(&dlm->spinlock); + + dlm_put(dlm); + + return 0; +} + +static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, + unsigned int node) +{ + int status; + struct dlm_exit_domain leave_msg; + + mlog(0, "Asking node %u if we can leave the domain %s me = %u\n", + node, dlm->name, dlm->node_num); + + memset(&leave_msg, 0, sizeof(leave_msg)); + leave_msg.node_idx = dlm->node_num; + + status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key, + &leave_msg, sizeof(leave_msg), node, + NULL); + + mlog(0, "status return %d from o2net_send_message\n", status); + + return status; +} + + +static void dlm_leave_domain(struct dlm_ctxt *dlm) +{ + int node, clear_node, status; + + /* At this point we've migrated away all our locks and won't + * accept mastership of new ones. The dlm is responsible for + * almost nothing now. We make sure not to confuse any joining + * nodes and then commence shutdown procedure. */ + + spin_lock(&dlm->spinlock); + /* Clear ourselves from the domain map */ + clear_bit(dlm->node_num, dlm->domain_map); + while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, + 0)) < O2NM_MAX_NODES) { + /* Drop the dlm spinlock. This is safe wrt the domain_map. + * -nodes cannot be added now as the + * query_join_handlers knows to respond with OK_NO_MAP + * -we catch the right network errors if a node is + * removed from the map while we're sending him the + * exit message. */ + spin_unlock(&dlm->spinlock); + + clear_node = 1; + + status = dlm_send_one_domain_exit(dlm, node); + if (status < 0 && + status != -ENOPROTOOPT && + status != -ENOTCONN) { + mlog(ML_NOTICE, "Error %d sending domain exit message " + "to node %d\n", status, node); + + /* Not sure what to do here but lets sleep for + * a bit in case this was a transient + * error... */ + msleep(DLM_DOMAIN_BACKOFF_MS); + clear_node = 0; + } + + spin_lock(&dlm->spinlock); + /* If we're not clearing the node bit then we intend + * to loop back around to try again. */ + if (clear_node) + clear_bit(node, dlm->domain_map); + } + spin_unlock(&dlm->spinlock); +} + +int dlm_joined(struct dlm_ctxt *dlm) +{ + int ret = 0; + + spin_lock(&dlm_domain_lock); + + if (dlm->dlm_state == DLM_CTXT_JOINED) + ret = 1; + + spin_unlock(&dlm_domain_lock); + + return ret; +} + +int dlm_shutting_down(struct dlm_ctxt *dlm) +{ + int ret = 0; + + spin_lock(&dlm_domain_lock); + + if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN) + ret = 1; + + spin_unlock(&dlm_domain_lock); + + return ret; +} + +void dlm_unregister_domain(struct dlm_ctxt *dlm) +{ + int leave = 0; + struct dlm_lock_resource *res; + + spin_lock(&dlm_domain_lock); + BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED); + BUG_ON(!dlm->num_joins); + + dlm->num_joins--; + if (!dlm->num_joins) { + /* We mark it "in shutdown" now so new register + * requests wait until we've completely left the + * domain. Don't use DLM_CTXT_LEAVING yet as we still + * want new domain joins to communicate with us at + * least until we've completed migration of our + * resources. */ + dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN; + leave = 1; + } + spin_unlock(&dlm_domain_lock); + + if (leave) { + mlog(0, "shutting down domain %s\n", dlm->name); + + /* We changed dlm state, notify the thread */ + dlm_kick_thread(dlm, NULL); + + while (dlm_migrate_all_locks(dlm)) { + /* Give dlm_thread time to purge the lockres' */ + msleep(500); + mlog(0, "%s: more migration to do\n", dlm->name); + } + + /* This list should be empty. If not, print remaining lockres */ + if (!list_empty(&dlm->tracking_list)) { + mlog(ML_ERROR, "Following lockres' are still on the " + "tracking list:\n"); + list_for_each_entry(res, &dlm->tracking_list, tracking) + dlm_print_one_lock_resource(res); + } + + dlm_mark_domain_leaving(dlm); + dlm_leave_domain(dlm); + dlm_complete_dlm_shutdown(dlm); + } + dlm_put(dlm); +} +EXPORT_SYMBOL_GPL(dlm_unregister_domain); + +static int dlm_query_join_proto_check(char *proto_type, int node, + struct dlm_protocol_version *ours, + struct dlm_protocol_version *request) +{ + int rc; + struct dlm_protocol_version proto = *request; + + if (!dlm_protocol_compare(ours, &proto)) { + mlog(0, + "node %u wanted to join with %s locking protocol " + "%u.%u, we respond with %u.%u\n", + node, proto_type, + request->pv_major, + request->pv_minor, + proto.pv_major, proto.pv_minor); + request->pv_minor = proto.pv_minor; + rc = 0; + } else { + mlog(ML_NOTICE, + "Node %u wanted to join with %s locking " + "protocol %u.%u, but we have %u.%u, disallowing\n", + node, proto_type, + request->pv_major, + request->pv_minor, + ours->pv_major, + ours->pv_minor); + rc = 1; + } + + return rc; +} + +/* + * struct dlm_query_join_packet is made up of four one-byte fields. They + * are effectively in big-endian order already. However, little-endian + * machines swap them before putting the packet on the wire (because + * query_join's response is a status, and that status is treated as a u32 + * on the wire). Thus, a big-endian and little-endian machines will treat + * this structure differently. + * + * The solution is to have little-endian machines swap the structure when + * converting from the structure to the u32 representation. This will + * result in the structure having the correct format on the wire no matter + * the host endian format. + */ +static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet, + u32 *wire) +{ + union dlm_query_join_response response; + + response.packet = *packet; + *wire = cpu_to_be32(response.intval); +} + +static void dlm_query_join_wire_to_packet(u32 wire, + struct dlm_query_join_packet *packet) +{ + union dlm_query_join_response response; + + response.intval = cpu_to_be32(wire); + *packet = response.packet; +} + +static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_query_join_request *query; + struct dlm_query_join_packet packet = { + .code = JOIN_DISALLOW, + }; + struct dlm_ctxt *dlm = NULL; + u32 response; + u8 nodenum; + + query = (struct dlm_query_join_request *) msg->buf; + + mlog(0, "node %u wants to join domain %s\n", query->node_idx, + query->domain); + + /* + * If heartbeat doesn't consider the node live, tell it + * to back off and try again. This gives heartbeat a chance + * to catch up. + */ + if (!o2hb_check_node_heartbeating(query->node_idx)) { + mlog(0, "node %u is not in our live map yet\n", + query->node_idx); + + packet.code = JOIN_DISALLOW; + goto respond; + } + + packet.code = JOIN_OK_NO_MAP; + + spin_lock(&dlm_domain_lock); + dlm = __dlm_lookup_domain_full(query->domain, query->name_len); + if (!dlm) + goto unlock_respond; + + /* + * There is a small window where the joining node may not see the + * node(s) that just left but still part of the cluster. DISALLOW + * join request if joining node has different node map. + */ + nodenum=0; + while (nodenum < O2NM_MAX_NODES) { + if (test_bit(nodenum, dlm->domain_map)) { + if (!byte_test_bit(nodenum, query->node_map)) { + mlog(0, "disallow join as node %u does not " + "have node %u in its nodemap\n", + query->node_idx, nodenum); + packet.code = JOIN_DISALLOW; + goto unlock_respond; + } + } + nodenum++; + } + + /* Once the dlm ctxt is marked as leaving then we don't want + * to be put in someone's domain map. + * Also, explicitly disallow joining at certain troublesome + * times (ie. during recovery). */ + if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { + int bit = query->node_idx; + spin_lock(&dlm->spinlock); + + if (dlm->dlm_state == DLM_CTXT_NEW && + dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) { + /*If this is a brand new context and we + * haven't started our join process yet, then + * the other node won the race. */ + packet.code = JOIN_OK_NO_MAP; + } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { + /* Disallow parallel joins. */ + packet.code = JOIN_DISALLOW; + } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) { + mlog(0, "node %u trying to join, but recovery " + "is ongoing.\n", bit); + packet.code = JOIN_DISALLOW; + } else if (test_bit(bit, dlm->recovery_map)) { + mlog(0, "node %u trying to join, but it " + "still needs recovery.\n", bit); + packet.code = JOIN_DISALLOW; + } else if (test_bit(bit, dlm->domain_map)) { + mlog(0, "node %u trying to join, but it " + "is still in the domain! needs recovery?\n", + bit); + packet.code = JOIN_DISALLOW; + } else { + /* Alright we're fully a part of this domain + * so we keep some state as to who's joining + * and indicate to him that needs to be fixed + * up. */ + + /* Make sure we speak compatible locking protocols. */ + if (dlm_query_join_proto_check("DLM", bit, + &dlm->dlm_locking_proto, + &query->dlm_proto)) { + packet.code = JOIN_PROTOCOL_MISMATCH; + } else if (dlm_query_join_proto_check("fs", bit, + &dlm->fs_locking_proto, + &query->fs_proto)) { + packet.code = JOIN_PROTOCOL_MISMATCH; + } else { + packet.dlm_minor = query->dlm_proto.pv_minor; + packet.fs_minor = query->fs_proto.pv_minor; + packet.code = JOIN_OK; + __dlm_set_joining_node(dlm, query->node_idx); + } + } + + spin_unlock(&dlm->spinlock); + } +unlock_respond: + spin_unlock(&dlm_domain_lock); + +respond: + mlog(0, "We respond with %u\n", packet.code); + + dlm_query_join_packet_to_wire(&packet, &response); + return response; +} + +static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_assert_joined *assert; + struct dlm_ctxt *dlm = NULL; + + assert = (struct dlm_assert_joined *) msg->buf; + + mlog(0, "node %u asserts join on domain %s\n", assert->node_idx, + assert->domain); + + spin_lock(&dlm_domain_lock); + dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len); + /* XXX should we consider no dlm ctxt an error? */ + if (dlm) { + spin_lock(&dlm->spinlock); + + /* Alright, this node has officially joined our + * domain. Set him in the map and clean up our + * leftover join state. */ + BUG_ON(dlm->joining_node != assert->node_idx); + set_bit(assert->node_idx, dlm->domain_map); + __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); + + printk(KERN_INFO "ocfs2_dlm: Node %u joins domain %s\n", + assert->node_idx, dlm->name); + __dlm_print_nodes(dlm); + + /* notify anything attached to the heartbeat events */ + dlm_hb_event_notify_attached(dlm, assert->node_idx, 1); + + spin_unlock(&dlm->spinlock); + } + spin_unlock(&dlm_domain_lock); + + return 0; +} + +static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_cancel_join *cancel; + struct dlm_ctxt *dlm = NULL; + + cancel = (struct dlm_cancel_join *) msg->buf; + + mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx, + cancel->domain); + + spin_lock(&dlm_domain_lock); + dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len); + + if (dlm) { + spin_lock(&dlm->spinlock); + + /* Yikes, this guy wants to cancel his join. No + * problem, we simply cleanup our join state. */ + BUG_ON(dlm->joining_node != cancel->node_idx); + __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); + + spin_unlock(&dlm->spinlock); + } + spin_unlock(&dlm_domain_lock); + + return 0; +} + +static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm, + unsigned int node) +{ + int status; + struct dlm_cancel_join cancel_msg; + + memset(&cancel_msg, 0, sizeof(cancel_msg)); + cancel_msg.node_idx = dlm->node_num; + cancel_msg.name_len = strlen(dlm->name); + memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len); + + status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, + &cancel_msg, sizeof(cancel_msg), node, + NULL); + if (status < 0) { + mlog_errno(status); + goto bail; + } + +bail: + return status; +} + +/* map_size should be in bytes. */ +static int dlm_send_join_cancels(struct dlm_ctxt *dlm, + unsigned long *node_map, + unsigned int map_size) +{ + int status, tmpstat; + unsigned int node; + + if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) * + sizeof(unsigned long))) { + mlog(ML_ERROR, + "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n", + map_size, (unsigned)BITS_TO_LONGS(O2NM_MAX_NODES)); + return -EINVAL; + } + + status = 0; + node = -1; + while ((node = find_next_bit(node_map, O2NM_MAX_NODES, + node + 1)) < O2NM_MAX_NODES) { + if (node == dlm->node_num) + continue; + + tmpstat = dlm_send_one_join_cancel(dlm, node); + if (tmpstat) { + mlog(ML_ERROR, "Error return %d cancelling join on " + "node %d\n", tmpstat, node); + if (!status) + status = tmpstat; + } + } + + if (status) + mlog_errno(status); + return status; +} + +static int dlm_request_join(struct dlm_ctxt *dlm, + int node, + enum dlm_query_join_response_code *response) +{ + int status; + struct dlm_query_join_request join_msg; + struct dlm_query_join_packet packet; + u32 join_resp; + + mlog(0, "querying node %d\n", node); + + memset(&join_msg, 0, sizeof(join_msg)); + join_msg.node_idx = dlm->node_num; + join_msg.name_len = strlen(dlm->name); + memcpy(join_msg.domain, dlm->name, join_msg.name_len); + join_msg.dlm_proto = dlm->dlm_locking_proto; + join_msg.fs_proto = dlm->fs_locking_proto; + + /* copy live node map to join message */ + byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES); + + status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, + sizeof(join_msg), node, + &join_resp); + if (status < 0 && status != -ENOPROTOOPT) { + mlog_errno(status); + goto bail; + } + dlm_query_join_wire_to_packet(join_resp, &packet); + + /* -ENOPROTOOPT from the net code means the other side isn't + listening for our message type -- that's fine, it means + his dlm isn't up, so we can consider him a 'yes' but not + joined into the domain. */ + if (status == -ENOPROTOOPT) { + status = 0; + *response = JOIN_OK_NO_MAP; + } else if (packet.code == JOIN_DISALLOW || + packet.code == JOIN_OK_NO_MAP) { + *response = packet.code; + } else if (packet.code == JOIN_PROTOCOL_MISMATCH) { + mlog(ML_NOTICE, + "This node requested DLM locking protocol %u.%u and " + "filesystem locking protocol %u.%u. At least one of " + "the protocol versions on node %d is not compatible, " + "disconnecting\n", + dlm->dlm_locking_proto.pv_major, + dlm->dlm_locking_proto.pv_minor, + dlm->fs_locking_proto.pv_major, + dlm->fs_locking_proto.pv_minor, + node); + status = -EPROTO; + *response = packet.code; + } else if (packet.code == JOIN_OK) { + *response = packet.code; + /* Use the same locking protocol as the remote node */ + dlm->dlm_locking_proto.pv_minor = packet.dlm_minor; + dlm->fs_locking_proto.pv_minor = packet.fs_minor; + mlog(0, + "Node %d responds JOIN_OK with DLM locking protocol " + "%u.%u and fs locking protocol %u.%u\n", + node, + dlm->dlm_locking_proto.pv_major, + dlm->dlm_locking_proto.pv_minor, + dlm->fs_locking_proto.pv_major, + dlm->fs_locking_proto.pv_minor); + } else { + status = -EINVAL; + mlog(ML_ERROR, "invalid response %d from node %u\n", + packet.code, node); + } + + mlog(0, "status %d, node %d response is %d\n", status, node, + *response); + +bail: + return status; +} + +static int dlm_send_one_join_assert(struct dlm_ctxt *dlm, + unsigned int node) +{ + int status; + struct dlm_assert_joined assert_msg; + + mlog(0, "Sending join assert to node %u\n", node); + + memset(&assert_msg, 0, sizeof(assert_msg)); + assert_msg.node_idx = dlm->node_num; + assert_msg.name_len = strlen(dlm->name); + memcpy(assert_msg.domain, dlm->name, assert_msg.name_len); + + status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, + &assert_msg, sizeof(assert_msg), node, + NULL); + if (status < 0) + mlog_errno(status); + + return status; +} + +static void dlm_send_join_asserts(struct dlm_ctxt *dlm, + unsigned long *node_map) +{ + int status, node, live; + + status = 0; + node = -1; + while ((node = find_next_bit(node_map, O2NM_MAX_NODES, + node + 1)) < O2NM_MAX_NODES) { + if (node == dlm->node_num) + continue; + + do { + /* It is very important that this message be + * received so we spin until either the node + * has died or it gets the message. */ + status = dlm_send_one_join_assert(dlm, node); + + spin_lock(&dlm->spinlock); + live = test_bit(node, dlm->live_nodes_map); + spin_unlock(&dlm->spinlock); + + if (status) { + mlog(ML_ERROR, "Error return %d asserting " + "join on node %d\n", status, node); + + /* give us some time between errors... */ + if (live) + msleep(DLM_DOMAIN_BACKOFF_MS); + } + } while (status && live); + } +} + +struct domain_join_ctxt { + unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; +}; + +static int dlm_should_restart_join(struct dlm_ctxt *dlm, + struct domain_join_ctxt *ctxt, + enum dlm_query_join_response_code response) +{ + int ret; + + if (response == JOIN_DISALLOW) { + mlog(0, "Latest response of disallow -- should restart\n"); + return 1; + } + + spin_lock(&dlm->spinlock); + /* For now, we restart the process if the node maps have + * changed at all */ + ret = memcmp(ctxt->live_map, dlm->live_nodes_map, + sizeof(dlm->live_nodes_map)); + spin_unlock(&dlm->spinlock); + + if (ret) + mlog(0, "Node maps changed -- should restart\n"); + + return ret; +} + +static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) +{ + int status = 0, tmpstat, node; + struct domain_join_ctxt *ctxt; + enum dlm_query_join_response_code response = JOIN_DISALLOW; + + mlog_entry("%p", dlm); + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + /* group sem locking should work for us here -- we're already + * registered for heartbeat events so filling this should be + * atomic wrt getting those handlers called. */ + o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map)); + + spin_lock(&dlm->spinlock); + memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map)); + + __dlm_set_joining_node(dlm, dlm->node_num); + + spin_unlock(&dlm->spinlock); + + node = -1; + while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, + node + 1)) < O2NM_MAX_NODES) { + if (node == dlm->node_num) + continue; + + status = dlm_request_join(dlm, node, &response); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + /* Ok, either we got a response or the node doesn't have a + * dlm up. */ + if (response == JOIN_OK) + set_bit(node, ctxt->yes_resp_map); + + if (dlm_should_restart_join(dlm, ctxt, response)) { + status = -EAGAIN; + goto bail; + } + } + + mlog(0, "Yay, done querying nodes!\n"); + + /* Yay, everyone agree's we can join the domain. My domain is + * comprised of all nodes who were put in the + * yes_resp_map. Copy that into our domain map and send a join + * assert message to clean up everyone elses state. */ + spin_lock(&dlm->spinlock); + memcpy(dlm->domain_map, ctxt->yes_resp_map, + sizeof(ctxt->yes_resp_map)); + set_bit(dlm->node_num, dlm->domain_map); + spin_unlock(&dlm->spinlock); + + dlm_send_join_asserts(dlm, ctxt->yes_resp_map); + + /* Joined state *must* be set before the joining node + * information, otherwise the query_join handler may read no + * current joiner but a state of NEW and tell joining nodes + * we're not in the domain. */ + spin_lock(&dlm_domain_lock); + dlm->dlm_state = DLM_CTXT_JOINED; + dlm->num_joins++; + spin_unlock(&dlm_domain_lock); + +bail: + spin_lock(&dlm->spinlock); + __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); + if (!status) + __dlm_print_nodes(dlm); + spin_unlock(&dlm->spinlock); + + if (ctxt) { + /* Do we need to send a cancel message to any nodes? */ + if (status < 0) { + tmpstat = dlm_send_join_cancels(dlm, + ctxt->yes_resp_map, + sizeof(ctxt->yes_resp_map)); + if (tmpstat < 0) + mlog_errno(tmpstat); + } + kfree(ctxt); + } + + mlog(0, "returning %d\n", status); + return status; +} + +static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm) +{ + o2hb_unregister_callback(NULL, &dlm->dlm_hb_up); + o2hb_unregister_callback(NULL, &dlm->dlm_hb_down); + o2net_unregister_handler_list(&dlm->dlm_domain_handlers); +} + +static int dlm_register_domain_handlers(struct dlm_ctxt *dlm) +{ + int status; + + mlog(0, "registering handlers.\n"); + + o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB, + dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI); + status = o2hb_register_callback(NULL, &dlm->dlm_hb_down); + if (status) + goto bail; + + o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB, + dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI); + status = o2hb_register_callback(NULL, &dlm->dlm_hb_up); + if (status) + goto bail; + + status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key, + sizeof(struct dlm_master_request), + dlm_master_request_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key, + sizeof(struct dlm_assert_master), + dlm_assert_master_handler, + dlm, dlm_assert_master_post_handler, + &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key, + sizeof(struct dlm_create_lock), + dlm_create_lock_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key, + DLM_CONVERT_LOCK_MAX_LEN, + dlm_convert_lock_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key, + DLM_UNLOCK_LOCK_MAX_LEN, + dlm_unlock_lock_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key, + DLM_PROXY_AST_MAX_LEN, + dlm_proxy_ast_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key, + sizeof(struct dlm_exit_domain), + dlm_exit_domain_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key, + sizeof(struct dlm_deref_lockres), + dlm_deref_lockres_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key, + sizeof(struct dlm_migrate_request), + dlm_migrate_request_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key, + DLM_MIG_LOCKRES_MAX_LEN, + dlm_mig_lockres_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key, + sizeof(struct dlm_master_requery), + dlm_master_requery_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key, + sizeof(struct dlm_lock_request), + dlm_request_all_locks_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key, + sizeof(struct dlm_reco_data_done), + dlm_reco_data_done_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key, + sizeof(struct dlm_begin_reco), + dlm_begin_reco_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key, + sizeof(struct dlm_finalize_reco), + dlm_finalize_reco_handler, + dlm, NULL, &dlm->dlm_domain_handlers); + if (status) + goto bail; + +bail: + if (status) + dlm_unregister_domain_handlers(dlm); + + return status; +} + +static int dlm_join_domain(struct dlm_ctxt *dlm) +{ + int status; + unsigned int backoff; + unsigned int total_backoff = 0; + + BUG_ON(!dlm); + + mlog(0, "Join domain %s\n", dlm->name); + + status = dlm_register_domain_handlers(dlm); + if (status) { + mlog_errno(status); + goto bail; + } + + status = dlm_debug_init(dlm); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = dlm_launch_thread(dlm); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = dlm_launch_recovery_thread(dlm); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + dlm->dlm_worker = create_singlethread_workqueue("dlm_wq"); + if (!dlm->dlm_worker) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + do { + status = dlm_try_to_join_domain(dlm); + + /* If we're racing another node to the join, then we + * need to back off temporarily and let them + * complete. */ +#define DLM_JOIN_TIMEOUT_MSECS 90000 + if (status == -EAGAIN) { + if (signal_pending(current)) { + status = -ERESTARTSYS; + goto bail; + } + + if (total_backoff > + msecs_to_jiffies(DLM_JOIN_TIMEOUT_MSECS)) { + status = -ERESTARTSYS; + mlog(ML_NOTICE, "Timed out joining dlm domain " + "%s after %u msecs\n", dlm->name, + jiffies_to_msecs(total_backoff)); + goto bail; + } + + /* + * <chip> After you! + * <dale> No, after you! + * <chip> I insist! + * <dale> But you first! + * ... + */ + backoff = (unsigned int)(jiffies & 0x3); + backoff *= DLM_DOMAIN_BACKOFF_MS; + total_backoff += backoff; + mlog(0, "backoff %d\n", backoff); + msleep(backoff); + } + } while (status == -EAGAIN); + + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = 0; +bail: + wake_up(&dlm_domain_events); + + if (status) { + dlm_unregister_domain_handlers(dlm); + dlm_debug_shutdown(dlm); + dlm_complete_thread(dlm); + dlm_complete_recovery_thread(dlm); + dlm_destroy_dlm_worker(dlm); + } + + return status; +} + +static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, + u32 key) +{ + int i; + int ret; + struct dlm_ctxt *dlm = NULL; + + dlm = kzalloc(sizeof(*dlm), GFP_KERNEL); + if (!dlm) { + mlog_errno(-ENOMEM); + goto leave; + } + + dlm->name = kmalloc(strlen(domain) + 1, GFP_KERNEL); + if (dlm->name == NULL) { + mlog_errno(-ENOMEM); + kfree(dlm); + dlm = NULL; + goto leave; + } + + dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES); + if (!dlm->lockres_hash) { + mlog_errno(-ENOMEM); + kfree(dlm->name); + kfree(dlm); + dlm = NULL; + goto leave; + } + + for (i = 0; i < DLM_HASH_BUCKETS; i++) + INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i)); + + strcpy(dlm->name, domain); + dlm->key = key; + dlm->node_num = o2nm_this_node(); + + ret = dlm_create_debugfs_subroot(dlm); + if (ret < 0) { + dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES); + kfree(dlm->name); + kfree(dlm); + dlm = NULL; + goto leave; + } + + spin_lock_init(&dlm->spinlock); + spin_lock_init(&dlm->master_lock); + spin_lock_init(&dlm->ast_lock); + INIT_LIST_HEAD(&dlm->list); + INIT_LIST_HEAD(&dlm->dirty_list); + INIT_LIST_HEAD(&dlm->reco.resources); + INIT_LIST_HEAD(&dlm->reco.received); + INIT_LIST_HEAD(&dlm->reco.node_data); + INIT_LIST_HEAD(&dlm->purge_list); + INIT_LIST_HEAD(&dlm->dlm_domain_handlers); + INIT_LIST_HEAD(&dlm->tracking_list); + dlm->reco.state = 0; + + INIT_LIST_HEAD(&dlm->pending_asts); + INIT_LIST_HEAD(&dlm->pending_basts); + + mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n", + dlm->recovery_map, &(dlm->recovery_map[0])); + + memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map)); + memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map)); + memset(dlm->domain_map, 0, sizeof(dlm->domain_map)); + + dlm->dlm_thread_task = NULL; + dlm->dlm_reco_thread_task = NULL; + dlm->dlm_worker = NULL; + init_waitqueue_head(&dlm->dlm_thread_wq); + init_waitqueue_head(&dlm->dlm_reco_thread_wq); + init_waitqueue_head(&dlm->reco.event); + init_waitqueue_head(&dlm->ast_wq); + init_waitqueue_head(&dlm->migration_wq); + INIT_LIST_HEAD(&dlm->master_list); + INIT_LIST_HEAD(&dlm->mle_hb_events); + + dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN; + init_waitqueue_head(&dlm->dlm_join_events); + + dlm->reco.new_master = O2NM_INVALID_NODE_NUM; + dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; + atomic_set(&dlm->local_resources, 0); + atomic_set(&dlm->remote_resources, 0); + atomic_set(&dlm->unknown_resources, 0); + + spin_lock_init(&dlm->work_lock); + INIT_LIST_HEAD(&dlm->work_list); + INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work); + + kref_init(&dlm->dlm_refs); + dlm->dlm_state = DLM_CTXT_NEW; + + INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks); + + mlog(0, "context init: refcount %u\n", + atomic_read(&dlm->dlm_refs.refcount)); + +leave: + return dlm; +} + +/* + * Compare a requested locking protocol version against the current one. + * + * If the major numbers are different, they are incompatible. + * If the current minor is greater than the request, they are incompatible. + * If the current minor is less than or equal to the request, they are + * compatible, and the requester should run at the current minor version. + */ +static int dlm_protocol_compare(struct dlm_protocol_version *existing, + struct dlm_protocol_version *request) +{ + if (existing->pv_major != request->pv_major) + return 1; + + if (existing->pv_minor > request->pv_minor) + return 1; + + if (existing->pv_minor < request->pv_minor) + request->pv_minor = existing->pv_minor; + + return 0; +} + +/* + * dlm_register_domain: one-time setup per "domain". + * + * The filesystem passes in the requested locking version via proto. + * If registration was successful, proto will contain the negotiated + * locking protocol. + */ +struct dlm_ctxt * dlm_register_domain(const char *domain, + u32 key, + struct dlm_protocol_version *fs_proto) +{ + int ret; + struct dlm_ctxt *dlm = NULL; + struct dlm_ctxt *new_ctxt = NULL; + + if (strlen(domain) > O2NM_MAX_NAME_LEN) { + ret = -ENAMETOOLONG; + mlog(ML_ERROR, "domain name length too long\n"); + goto leave; + } + + if (!o2hb_check_local_node_heartbeating()) { + mlog(ML_ERROR, "the local node has not been configured, or is " + "not heartbeating\n"); + ret = -EPROTO; + goto leave; + } + + mlog(0, "register called for domain \"%s\"\n", domain); + +retry: + dlm = NULL; + if (signal_pending(current)) { + ret = -ERESTARTSYS; + mlog_errno(ret); + goto leave; + } + + spin_lock(&dlm_domain_lock); + + dlm = __dlm_lookup_domain(domain); + if (dlm) { + if (dlm->dlm_state != DLM_CTXT_JOINED) { + spin_unlock(&dlm_domain_lock); + + mlog(0, "This ctxt is not joined yet!\n"); + wait_event_interruptible(dlm_domain_events, + dlm_wait_on_domain_helper( + domain)); + goto retry; + } + + if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) { + mlog(ML_ERROR, + "Requested locking protocol version is not " + "compatible with already registered domain " + "\"%s\"\n", domain); + ret = -EPROTO; + goto leave; + } + + __dlm_get(dlm); + dlm->num_joins++; + + spin_unlock(&dlm_domain_lock); + + ret = 0; + goto leave; + } + + /* doesn't exist */ + if (!new_ctxt) { + spin_unlock(&dlm_domain_lock); + + new_ctxt = dlm_alloc_ctxt(domain, key); + if (new_ctxt) + goto retry; + + ret = -ENOMEM; + mlog_errno(ret); + goto leave; + } + + /* a little variable switch-a-roo here... */ + dlm = new_ctxt; + new_ctxt = NULL; + + /* add the new domain */ + list_add_tail(&dlm->list, &dlm_domains); + spin_unlock(&dlm_domain_lock); + + /* + * Pass the locking protocol version into the join. If the join + * succeeds, it will have the negotiated protocol set. + */ + dlm->dlm_locking_proto = dlm_protocol; + dlm->fs_locking_proto = *fs_proto; + + ret = dlm_join_domain(dlm); + if (ret) { + mlog_errno(ret); + dlm_put(dlm); + goto leave; + } + + /* Tell the caller what locking protocol we negotiated */ + *fs_proto = dlm->fs_locking_proto; + + ret = 0; +leave: + if (new_ctxt) + dlm_free_ctxt_mem(new_ctxt); + + if (ret < 0) + dlm = ERR_PTR(ret); + + return dlm; +} +EXPORT_SYMBOL_GPL(dlm_register_domain); + +static LIST_HEAD(dlm_join_handlers); + +static void dlm_unregister_net_handlers(void) +{ + o2net_unregister_handler_list(&dlm_join_handlers); +} + +static int dlm_register_net_handlers(void) +{ + int status = 0; + + status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, + sizeof(struct dlm_query_join_request), + dlm_query_join_handler, + NULL, NULL, &dlm_join_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, + sizeof(struct dlm_assert_joined), + dlm_assert_joined_handler, + NULL, NULL, &dlm_join_handlers); + if (status) + goto bail; + + status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, + sizeof(struct dlm_cancel_join), + dlm_cancel_join_handler, + NULL, NULL, &dlm_join_handlers); + +bail: + if (status < 0) + dlm_unregister_net_handlers(); + + return status; +} + +/* Domain eviction callback handling. + * + * The file system requires notification of node death *before* the + * dlm completes it's recovery work, otherwise it may be able to + * acquire locks on resources requiring recovery. Since the dlm can + * evict a node from it's domain *before* heartbeat fires, a similar + * mechanism is required. */ + +/* Eviction is not expected to happen often, so a per-domain lock is + * not necessary. Eviction callbacks are allowed to sleep for short + * periods of time. */ +static DECLARE_RWSEM(dlm_callback_sem); + +void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, + int node_num) +{ + struct list_head *iter; + struct dlm_eviction_cb *cb; + + down_read(&dlm_callback_sem); + list_for_each(iter, &dlm->dlm_eviction_callbacks) { + cb = list_entry(iter, struct dlm_eviction_cb, ec_item); + + cb->ec_func(node_num, cb->ec_data); + } + up_read(&dlm_callback_sem); +} + +void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, + dlm_eviction_func *f, + void *data) +{ + INIT_LIST_HEAD(&cb->ec_item); + cb->ec_func = f; + cb->ec_data = data; +} +EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb); + +void dlm_register_eviction_cb(struct dlm_ctxt *dlm, + struct dlm_eviction_cb *cb) +{ + down_write(&dlm_callback_sem); + list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks); + up_write(&dlm_callback_sem); +} +EXPORT_SYMBOL_GPL(dlm_register_eviction_cb); + +void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb) +{ + down_write(&dlm_callback_sem); + list_del_init(&cb->ec_item); + up_write(&dlm_callback_sem); +} +EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb); + +static int __init dlm_init(void) +{ + int status; + + dlm_print_version(); + + status = dlm_init_mle_cache(); + if (status) { + mlog(ML_ERROR, "Could not create o2dlm_mle slabcache\n"); + goto error; + } + + status = dlm_init_master_caches(); + if (status) { + mlog(ML_ERROR, "Could not create o2dlm_lockres and " + "o2dlm_lockname slabcaches\n"); + goto error; + } + + status = dlm_init_lock_cache(); + if (status) { + mlog(ML_ERROR, "Count not create o2dlm_lock slabcache\n"); + goto error; + } + + status = dlm_register_net_handlers(); + if (status) { + mlog(ML_ERROR, "Unable to register network handlers\n"); + goto error; + } + + status = dlm_create_debugfs_root(); + if (status) + goto error; + + return 0; +error: + dlm_unregister_net_handlers(); + dlm_destroy_lock_cache(); + dlm_destroy_master_caches(); + dlm_destroy_mle_cache(); + return -1; +} + +static void __exit dlm_exit (void) +{ + dlm_destroy_debugfs_root(); + dlm_unregister_net_handlers(); + dlm_destroy_lock_cache(); + dlm_destroy_master_caches(); + dlm_destroy_mle_cache(); +} + +MODULE_AUTHOR("Oracle"); +MODULE_LICENSE("GPL"); + +module_init(dlm_init); +module_exit(dlm_exit); diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h new file mode 100644 index 0000000..2f7f60b --- /dev/null +++ b/fs/ocfs2/dlm/dlmdomain.h @@ -0,0 +1,36 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmdomain.h + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + +#ifndef DLMDOMAIN_H +#define DLMDOMAIN_H + +extern spinlock_t dlm_domain_lock; +extern struct list_head dlm_domains; + +int dlm_joined(struct dlm_ctxt *dlm); +int dlm_shutting_down(struct dlm_ctxt *dlm); +void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, + int node_num); + +#endif diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c new file mode 100644 index 0000000..ba962d7 --- /dev/null +++ b/fs/ocfs2/dlm/dlmfs.c @@ -0,0 +1,653 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmfs.c + * + * Code which implements the kernel side of a minimal userspace + * interface to our DLM. This file handles the virtual file system + * used for communication with userspace. Credit should go to ramfs, + * which was a template for the fs side of this module. + * + * Copyright (C) 2003, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +/* Simple VFS hooks based on: */ +/* + * Resizable simple ram filesystem for Linux. + * + * Copyright (C) 2000 Linus Torvalds. + * 2000 Transmeta Corp. + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/pagemap.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/backing-dev.h> + +#include <asm/uaccess.h> + + +#include "cluster/nodemanager.h" +#include "cluster/heartbeat.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" + +#include "userdlm.h" + +#include "dlmfsver.h" + +#define MLOG_MASK_PREFIX ML_DLMFS +#include "cluster/masklog.h" + +#include "ocfs2_lockingver.h" + +static const struct super_operations dlmfs_ops; +static const struct file_operations dlmfs_file_operations; +static const struct inode_operations dlmfs_dir_inode_operations; +static const struct inode_operations dlmfs_root_inode_operations; +static const struct inode_operations dlmfs_file_inode_operations; +static struct kmem_cache *dlmfs_inode_cache; + +struct workqueue_struct *user_dlm_worker; + +/* + * This is the userdlmfs locking protocol version. + * + * See fs/ocfs2/dlmglue.c for more details on locking versions. + */ +static const struct dlm_protocol_version user_locking_protocol = { + .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, + .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, +}; + +/* + * decodes a set of open flags into a valid lock level and a set of flags. + * returns < 0 if we have invalid flags + * flags which mean something to us: + * O_RDONLY -> PRMODE level + * O_WRONLY -> EXMODE level + * + * O_NONBLOCK -> LKM_NOQUEUE + */ +static int dlmfs_decode_open_flags(int open_flags, + int *level, + int *flags) +{ + if (open_flags & (O_WRONLY|O_RDWR)) + *level = LKM_EXMODE; + else + *level = LKM_PRMODE; + + *flags = 0; + if (open_flags & O_NONBLOCK) + *flags |= LKM_NOQUEUE; + + return 0; +} + +static int dlmfs_file_open(struct inode *inode, + struct file *file) +{ + int status, level, flags; + struct dlmfs_filp_private *fp = NULL; + struct dlmfs_inode_private *ip; + + if (S_ISDIR(inode->i_mode)) + BUG(); + + mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino, + file->f_flags); + + status = dlmfs_decode_open_flags(file->f_flags, &level, &flags); + if (status < 0) + goto bail; + + /* We don't want to honor O_APPEND at read/write time as it + * doesn't make sense for LVB writes. */ + file->f_flags &= ~O_APPEND; + + fp = kmalloc(sizeof(*fp), GFP_NOFS); + if (!fp) { + status = -ENOMEM; + goto bail; + } + fp->fp_lock_level = level; + + ip = DLMFS_I(inode); + + status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags); + if (status < 0) { + /* this is a strange error to return here but I want + * to be able userspace to be able to distinguish a + * valid lock request from one that simply couldn't be + * granted. */ + if (flags & LKM_NOQUEUE && status == -EAGAIN) + status = -ETXTBSY; + kfree(fp); + goto bail; + } + + file->private_data = fp; +bail: + return status; +} + +static int dlmfs_file_release(struct inode *inode, + struct file *file) +{ + int level, status; + struct dlmfs_inode_private *ip = DLMFS_I(inode); + struct dlmfs_filp_private *fp = + (struct dlmfs_filp_private *) file->private_data; + + if (S_ISDIR(inode->i_mode)) + BUG(); + + mlog(0, "close called on inode %lu\n", inode->i_ino); + + status = 0; + if (fp) { + level = fp->fp_lock_level; + if (level != LKM_IVMODE) + user_dlm_cluster_unlock(&ip->ip_lockres, level); + + kfree(fp); + file->private_data = NULL; + } + + return 0; +} + +static ssize_t dlmfs_file_read(struct file *filp, + char __user *buf, + size_t count, + loff_t *ppos) +{ + int bytes_left; + ssize_t readlen; + char *lvb_buf; + struct inode *inode = filp->f_path.dentry->d_inode; + + mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", + inode->i_ino, count, *ppos); + + if (*ppos >= i_size_read(inode)) + return 0; + + if (!count) + return 0; + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + + /* don't read past the lvb */ + if ((count + *ppos) > i_size_read(inode)) + readlen = i_size_read(inode) - *ppos; + else + readlen = count - *ppos; + + lvb_buf = kmalloc(readlen, GFP_NOFS); + if (!lvb_buf) + return -ENOMEM; + + user_dlm_read_lvb(inode, lvb_buf, readlen); + bytes_left = __copy_to_user(buf, lvb_buf, readlen); + readlen -= bytes_left; + + kfree(lvb_buf); + + *ppos = *ppos + readlen; + + mlog(0, "read %zd bytes\n", readlen); + return readlen; +} + +static ssize_t dlmfs_file_write(struct file *filp, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + int bytes_left; + ssize_t writelen; + char *lvb_buf; + struct inode *inode = filp->f_path.dentry->d_inode; + + mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", + inode->i_ino, count, *ppos); + + if (*ppos >= i_size_read(inode)) + return -ENOSPC; + + if (!count) + return 0; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + + /* don't write past the lvb */ + if ((count + *ppos) > i_size_read(inode)) + writelen = i_size_read(inode) - *ppos; + else + writelen = count - *ppos; + + lvb_buf = kmalloc(writelen, GFP_NOFS); + if (!lvb_buf) + return -ENOMEM; + + bytes_left = copy_from_user(lvb_buf, buf, writelen); + writelen -= bytes_left; + if (writelen) + user_dlm_write_lvb(inode, lvb_buf, writelen); + + kfree(lvb_buf); + + *ppos = *ppos + writelen; + mlog(0, "wrote %zd bytes\n", writelen); + return writelen; +} + +static void dlmfs_init_once(void *foo) +{ + struct dlmfs_inode_private *ip = + (struct dlmfs_inode_private *) foo; + + ip->ip_dlm = NULL; + ip->ip_parent = NULL; + + inode_init_once(&ip->ip_vfs_inode); +} + +static struct inode *dlmfs_alloc_inode(struct super_block *sb) +{ + struct dlmfs_inode_private *ip; + + ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS); + if (!ip) + return NULL; + + return &ip->ip_vfs_inode; +} + +static void dlmfs_destroy_inode(struct inode *inode) +{ + kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode)); +} + +static void dlmfs_clear_inode(struct inode *inode) +{ + int status; + struct dlmfs_inode_private *ip; + + if (!inode) + return; + + mlog(0, "inode %lu\n", inode->i_ino); + + ip = DLMFS_I(inode); + + if (S_ISREG(inode->i_mode)) { + status = user_dlm_destroy_lock(&ip->ip_lockres); + if (status < 0) + mlog_errno(status); + iput(ip->ip_parent); + goto clear_fields; + } + + mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm); + /* we must be a directory. If required, lets unregister the + * dlm context now. */ + if (ip->ip_dlm) + user_dlm_unregister_context(ip->ip_dlm); +clear_fields: + ip->ip_parent = NULL; + ip->ip_dlm = NULL; +} + +static struct backing_dev_info dlmfs_backing_dev_info = { + .ra_pages = 0, /* No readahead */ + .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, +}; + +static struct inode *dlmfs_get_root_inode(struct super_block *sb) +{ + struct inode *inode = new_inode(sb); + int mode = S_IFDIR | 0755; + struct dlmfs_inode_private *ip; + + if (inode) { + ip = DLMFS_I(inode); + + inode->i_mode = mode; + inode->i_uid = current->fsuid; + inode->i_gid = current->fsgid; + inode->i_blocks = 0; + inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inc_nlink(inode); + + inode->i_fop = &simple_dir_operations; + inode->i_op = &dlmfs_root_inode_operations; + } + + return inode; +} + +static struct inode *dlmfs_get_inode(struct inode *parent, + struct dentry *dentry, + int mode) +{ + struct super_block *sb = parent->i_sb; + struct inode * inode = new_inode(sb); + struct dlmfs_inode_private *ip; + + if (!inode) + return NULL; + + inode->i_mode = mode; + inode->i_uid = current->fsuid; + inode->i_gid = current->fsgid; + inode->i_blocks = 0; + inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + + ip = DLMFS_I(inode); + ip->ip_dlm = DLMFS_I(parent)->ip_dlm; + + switch (mode & S_IFMT) { + default: + /* for now we don't support anything other than + * directories and regular files. */ + BUG(); + break; + case S_IFREG: + inode->i_op = &dlmfs_file_inode_operations; + inode->i_fop = &dlmfs_file_operations; + + i_size_write(inode, DLM_LVB_LEN); + + user_dlm_lock_res_init(&ip->ip_lockres, dentry); + + /* released at clear_inode time, this insures that we + * get to drop the dlm reference on each lock *before* + * we call the unregister code for releasing parent + * directories. */ + ip->ip_parent = igrab(parent); + BUG_ON(!ip->ip_parent); + break; + case S_IFDIR: + inode->i_op = &dlmfs_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + + /* directory inodes start off with i_nlink == + * 2 (for "." entry) */ + inc_nlink(inode); + break; + } + + if (parent->i_mode & S_ISGID) { + inode->i_gid = parent->i_gid; + if (S_ISDIR(mode)) + inode->i_mode |= S_ISGID; + } + + return inode; +} + +/* + * File creation. Allocate an inode, and we're done.. + */ +/* SMP-safe */ +static int dlmfs_mkdir(struct inode * dir, + struct dentry * dentry, + int mode) +{ + int status; + struct inode *inode = NULL; + struct qstr *domain = &dentry->d_name; + struct dlmfs_inode_private *ip; + struct dlm_ctxt *dlm; + struct dlm_protocol_version proto = user_locking_protocol; + + mlog(0, "mkdir %.*s\n", domain->len, domain->name); + + /* verify that we have a proper domain */ + if (domain->len >= O2NM_MAX_NAME_LEN) { + status = -EINVAL; + mlog(ML_ERROR, "invalid domain name for directory.\n"); + goto bail; + } + + inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR); + if (!inode) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + ip = DLMFS_I(inode); + + dlm = user_dlm_register_context(domain, &proto); + if (IS_ERR(dlm)) { + status = PTR_ERR(dlm); + mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", + status, domain->len, domain->name); + goto bail; + } + ip->ip_dlm = dlm; + + inc_nlink(dir); + d_instantiate(dentry, inode); + dget(dentry); /* Extra count - pin the dentry in core */ + + status = 0; +bail: + if (status < 0) + iput(inode); + return status; +} + +static int dlmfs_create(struct inode *dir, + struct dentry *dentry, + int mode, + struct nameidata *nd) +{ + int status = 0; + struct inode *inode; + struct qstr *name = &dentry->d_name; + + mlog(0, "create %.*s\n", name->len, name->name); + + /* verify name is valid and doesn't contain any dlm reserved + * characters */ + if (name->len >= USER_DLM_LOCK_ID_MAX_LEN || + name->name[0] == '$') { + status = -EINVAL; + mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len, + name->name); + goto bail; + } + + inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG); + if (!inode) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + d_instantiate(dentry, inode); + dget(dentry); /* Extra count - pin the dentry in core */ +bail: + return status; +} + +static int dlmfs_unlink(struct inode *dir, + struct dentry *dentry) +{ + int status; + struct inode *inode = dentry->d_inode; + + mlog(0, "unlink inode %lu\n", inode->i_ino); + + /* if there are no current holders, or none that are waiting + * to acquire a lock, this basically destroys our lockres. */ + status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres); + if (status < 0) { + mlog(ML_ERROR, "unlink %.*s, error %d from destroy\n", + dentry->d_name.len, dentry->d_name.name, status); + goto bail; + } + status = simple_unlink(dir, dentry); +bail: + return status; +} + +static int dlmfs_fill_super(struct super_block * sb, + void * data, + int silent) +{ + struct inode * inode; + struct dentry * root; + + sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_blocksize = PAGE_CACHE_SIZE; + sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_magic = DLMFS_MAGIC; + sb->s_op = &dlmfs_ops; + inode = dlmfs_get_root_inode(sb); + if (!inode) + return -ENOMEM; + + root = d_alloc_root(inode); + if (!root) { + iput(inode); + return -ENOMEM; + } + sb->s_root = root; + return 0; +} + +static const struct file_operations dlmfs_file_operations = { + .open = dlmfs_file_open, + .release = dlmfs_file_release, + .read = dlmfs_file_read, + .write = dlmfs_file_write, +}; + +static const struct inode_operations dlmfs_dir_inode_operations = { + .create = dlmfs_create, + .lookup = simple_lookup, + .unlink = dlmfs_unlink, +}; + +/* this way we can restrict mkdir to only the toplevel of the fs. */ +static const struct inode_operations dlmfs_root_inode_operations = { + .lookup = simple_lookup, + .mkdir = dlmfs_mkdir, + .rmdir = simple_rmdir, +}; + +static const struct super_operations dlmfs_ops = { + .statfs = simple_statfs, + .alloc_inode = dlmfs_alloc_inode, + .destroy_inode = dlmfs_destroy_inode, + .clear_inode = dlmfs_clear_inode, + .drop_inode = generic_delete_inode, +}; + +static const struct inode_operations dlmfs_file_inode_operations = { + .getattr = simple_getattr, +}; + +static int dlmfs_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, struct vfsmount *mnt) +{ + return get_sb_nodev(fs_type, flags, data, dlmfs_fill_super, mnt); +} + +static struct file_system_type dlmfs_fs_type = { + .owner = THIS_MODULE, + .name = "ocfs2_dlmfs", + .get_sb = dlmfs_get_sb, + .kill_sb = kill_litter_super, +}; + +static int __init init_dlmfs_fs(void) +{ + int status; + int cleanup_inode = 0, cleanup_worker = 0; + + dlmfs_print_version(); + + status = bdi_init(&dlmfs_backing_dev_info); + if (status) + return status; + + dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache", + sizeof(struct dlmfs_inode_private), + 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + dlmfs_init_once); + if (!dlmfs_inode_cache) { + status = -ENOMEM; + goto bail; + } + cleanup_inode = 1; + + user_dlm_worker = create_singlethread_workqueue("user_dlm"); + if (!user_dlm_worker) { + status = -ENOMEM; + goto bail; + } + cleanup_worker = 1; + + status = register_filesystem(&dlmfs_fs_type); +bail: + if (status) { + if (cleanup_inode) + kmem_cache_destroy(dlmfs_inode_cache); + if (cleanup_worker) + destroy_workqueue(user_dlm_worker); + bdi_destroy(&dlmfs_backing_dev_info); + } else + printk("OCFS2 User DLM kernel interface loaded\n"); + return status; +} + +static void __exit exit_dlmfs_fs(void) +{ + unregister_filesystem(&dlmfs_fs_type); + + flush_workqueue(user_dlm_worker); + destroy_workqueue(user_dlm_worker); + + kmem_cache_destroy(dlmfs_inode_cache); + + bdi_destroy(&dlmfs_backing_dev_info); +} + +MODULE_AUTHOR("Oracle"); +MODULE_LICENSE("GPL"); + +module_init(init_dlmfs_fs) +module_exit(exit_dlmfs_fs) diff --git a/fs/ocfs2/dlm/dlmfsver.c b/fs/ocfs2/dlm/dlmfsver.c new file mode 100644 index 0000000..a733b33 --- /dev/null +++ b/fs/ocfs2/dlm/dlmfsver.c @@ -0,0 +1,42 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmfsver.c + * + * version string + * + * Copyright (C) 2002, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/module.h> +#include <linux/kernel.h> + +#include "dlmfsver.h" + +#define DLM_BUILD_VERSION "1.5.0" + +#define VERSION_STR "OCFS2 DLMFS " DLM_BUILD_VERSION + +void dlmfs_print_version(void) +{ + printk(KERN_INFO "%s\n", VERSION_STR); +} + +MODULE_DESCRIPTION(VERSION_STR); + +MODULE_VERSION(DLM_BUILD_VERSION); diff --git a/fs/ocfs2/dlm/dlmfsver.h b/fs/ocfs2/dlm/dlmfsver.h new file mode 100644 index 0000000..f35eadb --- /dev/null +++ b/fs/ocfs2/dlm/dlmfsver.h @@ -0,0 +1,31 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmver.h + * + * Function prototypes + * + * Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef DLMFS_VER_H +#define DLMFS_VER_H + +void dlmfs_print_version(void); + +#endif /* DLMFS_VER_H */ diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c new file mode 100644 index 0000000..83a9f29 --- /dev/null +++ b/fs/ocfs2/dlm/dlmlock.c @@ -0,0 +1,767 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmlock.c + * + * underlying calls for lock creation + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/sysctl.h> +#include <linux/random.h> +#include <linux/blkdev.h> +#include <linux/socket.h> +#include <linux/inet.h> +#include <linux/spinlock.h> +#include <linux/delay.h> + + +#include "cluster/heartbeat.h" +#include "cluster/nodemanager.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" +#include "dlmcommon.h" + +#include "dlmconvert.h" + +#define MLOG_MASK_PREFIX ML_DLM +#include "cluster/masklog.h" + +static struct kmem_cache *dlm_lock_cache = NULL; + +static DEFINE_SPINLOCK(dlm_cookie_lock); +static u64 dlm_next_cookie = 1; + +static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags); +static void dlm_init_lock(struct dlm_lock *newlock, int type, + u8 node, u64 cookie); +static void dlm_lock_release(struct kref *kref); +static void dlm_lock_detach_lockres(struct dlm_lock *lock); + +int dlm_init_lock_cache(void) +{ + dlm_lock_cache = kmem_cache_create("o2dlm_lock", + sizeof(struct dlm_lock), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (dlm_lock_cache == NULL) + return -ENOMEM; + return 0; +} + +void dlm_destroy_lock_cache(void) +{ + if (dlm_lock_cache) + kmem_cache_destroy(dlm_lock_cache); +} + +/* Tell us whether we can grant a new lock request. + * locking: + * caller needs: res->spinlock + * taken: none + * held on exit: none + * returns: 1 if the lock can be granted, 0 otherwise. + */ +static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, + struct dlm_lock *lock) +{ + struct list_head *iter; + struct dlm_lock *tmplock; + + list_for_each(iter, &res->granted) { + tmplock = list_entry(iter, struct dlm_lock, list); + + if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) + return 0; + } + + list_for_each(iter, &res->converting) { + tmplock = list_entry(iter, struct dlm_lock, list); + + if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) + return 0; + } + + return 1; +} + +/* performs lock creation at the lockres master site + * locking: + * caller needs: none + * taken: takes and drops res->spinlock + * held on exit: none + * returns: DLM_NORMAL, DLM_NOTQUEUED + */ +static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags) +{ + int call_ast = 0, kick_thread = 0; + enum dlm_status status = DLM_NORMAL; + + mlog_entry("type=%d\n", lock->ml.type); + + spin_lock(&res->spinlock); + /* if called from dlm_create_lock_handler, need to + * ensure it will not sleep in dlm_wait_on_lockres */ + status = __dlm_lockres_state_to_status(res); + if (status != DLM_NORMAL && + lock->ml.node != dlm->node_num) { + /* erf. state changed after lock was dropped. */ + spin_unlock(&res->spinlock); + dlm_error(status); + return status; + } + __dlm_wait_on_lockres(res); + __dlm_lockres_reserve_ast(res); + + if (dlm_can_grant_new_lock(res, lock)) { + mlog(0, "I can grant this lock right away\n"); + /* got it right away */ + lock->lksb->status = DLM_NORMAL; + status = DLM_NORMAL; + dlm_lock_get(lock); + list_add_tail(&lock->list, &res->granted); + + /* for the recovery lock, we can't allow the ast + * to be queued since the dlmthread is already + * frozen. but the recovery lock is always locked + * with LKM_NOQUEUE so we do not need the ast in + * this special case */ + if (!dlm_is_recovery_lock(res->lockname.name, + res->lockname.len)) { + kick_thread = 1; + call_ast = 1; + } else { + mlog(0, "%s: returning DLM_NORMAL to " + "node %u for reco lock\n", dlm->name, + lock->ml.node); + } + } else { + /* for NOQUEUE request, unless we get the + * lock right away, return DLM_NOTQUEUED */ + if (flags & LKM_NOQUEUE) { + status = DLM_NOTQUEUED; + if (dlm_is_recovery_lock(res->lockname.name, + res->lockname.len)) { + mlog(0, "%s: returning NOTQUEUED to " + "node %u for reco lock\n", dlm->name, + lock->ml.node); + } + } else { + dlm_lock_get(lock); + list_add_tail(&lock->list, &res->blocked); + kick_thread = 1; + } + } + /* reduce the inflight count, this may result in the lockres + * being purged below during calc_usage */ + if (lock->ml.node == dlm->node_num) + dlm_lockres_drop_inflight_ref(dlm, res); + + spin_unlock(&res->spinlock); + wake_up(&res->wq); + + /* either queue the ast or release it */ + if (call_ast) + dlm_queue_ast(dlm, lock); + else + dlm_lockres_release_ast(dlm, res); + + dlm_lockres_calc_usage(dlm, res); + if (kick_thread) + dlm_kick_thread(dlm, res); + + return status; +} + +void dlm_revert_pending_lock(struct dlm_lock_resource *res, + struct dlm_lock *lock) +{ + /* remove from local queue if it failed */ + list_del_init(&lock->list); + lock->lksb->flags &= ~DLM_LKSB_GET_LVB; +} + + +/* + * locking: + * caller needs: none + * taken: takes and drops res->spinlock + * held on exit: none + * returns: DLM_DENIED, DLM_RECOVERING, or net status + */ +static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags) +{ + enum dlm_status status = DLM_DENIED; + int lockres_changed = 1; + + mlog_entry("type=%d\n", lock->ml.type); + mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, + res->lockname.name, flags); + + spin_lock(&res->spinlock); + + /* will exit this call with spinlock held */ + __dlm_wait_on_lockres(res); + res->state |= DLM_LOCK_RES_IN_PROGRESS; + + /* add lock to local (secondary) queue */ + dlm_lock_get(lock); + list_add_tail(&lock->list, &res->blocked); + lock->lock_pending = 1; + spin_unlock(&res->spinlock); + + /* spec seems to say that you will get DLM_NORMAL when the lock + * has been queued, meaning we need to wait for a reply here. */ + status = dlm_send_remote_lock_request(dlm, res, lock, flags); + + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_IN_PROGRESS; + lock->lock_pending = 0; + if (status != DLM_NORMAL) { + if (status == DLM_RECOVERING && + dlm_is_recovery_lock(res->lockname.name, + res->lockname.len)) { + /* recovery lock was mastered by dead node. + * we need to have calc_usage shoot down this + * lockres and completely remaster it. */ + mlog(0, "%s: recovery lock was owned by " + "dead node %u, remaster it now.\n", + dlm->name, res->owner); + } else if (status != DLM_NOTQUEUED) { + /* + * DO NOT call calc_usage, as this would unhash + * the remote lockres before we ever get to use + * it. treat as if we never made any change to + * the lockres. + */ + lockres_changed = 0; + dlm_error(status); + } + dlm_revert_pending_lock(res, lock); + dlm_lock_put(lock); + } else if (dlm_is_recovery_lock(res->lockname.name, + res->lockname.len)) { + /* special case for the $RECOVERY lock. + * there will never be an AST delivered to put + * this lock on the proper secondary queue + * (granted), so do it manually. */ + mlog(0, "%s: $RECOVERY lock for this node (%u) is " + "mastered by %u; got lock, manually granting (no ast)\n", + dlm->name, dlm->node_num, res->owner); + list_move_tail(&lock->list, &res->granted); + } + spin_unlock(&res->spinlock); + + if (lockres_changed) + dlm_lockres_calc_usage(dlm, res); + + wake_up(&res->wq); + return status; +} + + +/* for remote lock creation. + * locking: + * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS + * taken: none + * held on exit: none + * returns: DLM_NOLOCKMGR, or net status + */ +static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, int flags) +{ + struct dlm_create_lock create; + int tmpret, status = 0; + enum dlm_status ret; + + mlog_entry_void(); + + memset(&create, 0, sizeof(create)); + create.node_idx = dlm->node_num; + create.requested_type = lock->ml.type; + create.cookie = lock->ml.cookie; + create.namelen = res->lockname.len; + create.flags = cpu_to_be32(flags); + memcpy(create.name, res->lockname.name, create.namelen); + + tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, + sizeof(create), res->owner, &status); + if (tmpret >= 0) { + // successfully sent and received + ret = status; // this is already a dlm_status + if (ret == DLM_REJECTED) { + mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " + "no longer owned by %u. that node is coming back " + "up currently.\n", dlm->name, create.namelen, + create.name, res->owner); + dlm_print_one_lock_resource(res); + BUG(); + } + } else { + mlog_errno(tmpret); + if (dlm_is_host_down(tmpret)) { + ret = DLM_RECOVERING; + mlog(0, "node %u died so returning DLM_RECOVERING " + "from lock message!\n", res->owner); + } else { + ret = dlm_err_to_dlm_status(tmpret); + } + } + + return ret; +} + +void dlm_lock_get(struct dlm_lock *lock) +{ + kref_get(&lock->lock_refs); +} + +void dlm_lock_put(struct dlm_lock *lock) +{ + kref_put(&lock->lock_refs, dlm_lock_release); +} + +static void dlm_lock_release(struct kref *kref) +{ + struct dlm_lock *lock; + + lock = container_of(kref, struct dlm_lock, lock_refs); + + BUG_ON(!list_empty(&lock->list)); + BUG_ON(!list_empty(&lock->ast_list)); + BUG_ON(!list_empty(&lock->bast_list)); + BUG_ON(lock->ast_pending); + BUG_ON(lock->bast_pending); + + dlm_lock_detach_lockres(lock); + + if (lock->lksb_kernel_allocated) { + mlog(0, "freeing kernel-allocated lksb\n"); + kfree(lock->lksb); + } + kmem_cache_free(dlm_lock_cache, lock); +} + +/* associate a lock with it's lockres, getting a ref on the lockres */ +void dlm_lock_attach_lockres(struct dlm_lock *lock, + struct dlm_lock_resource *res) +{ + dlm_lockres_get(res); + lock->lockres = res; +} + +/* drop ref on lockres, if there is still one associated with lock */ +static void dlm_lock_detach_lockres(struct dlm_lock *lock) +{ + struct dlm_lock_resource *res; + + res = lock->lockres; + if (res) { + lock->lockres = NULL; + mlog(0, "removing lock's lockres reference\n"); + dlm_lockres_put(res); + } +} + +static void dlm_init_lock(struct dlm_lock *newlock, int type, + u8 node, u64 cookie) +{ + INIT_LIST_HEAD(&newlock->list); + INIT_LIST_HEAD(&newlock->ast_list); + INIT_LIST_HEAD(&newlock->bast_list); + spin_lock_init(&newlock->spinlock); + newlock->ml.type = type; + newlock->ml.convert_type = LKM_IVMODE; + newlock->ml.highest_blocked = LKM_IVMODE; + newlock->ml.node = node; + newlock->ml.pad1 = 0; + newlock->ml.list = 0; + newlock->ml.flags = 0; + newlock->ast = NULL; + newlock->bast = NULL; + newlock->astdata = NULL; + newlock->ml.cookie = cpu_to_be64(cookie); + newlock->ast_pending = 0; + newlock->bast_pending = 0; + newlock->convert_pending = 0; + newlock->lock_pending = 0; + newlock->unlock_pending = 0; + newlock->cancel_pending = 0; + newlock->lksb_kernel_allocated = 0; + + kref_init(&newlock->lock_refs); +} + +struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, + struct dlm_lockstatus *lksb) +{ + struct dlm_lock *lock; + int kernel_allocated = 0; + + lock = (struct dlm_lock *) kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS); + if (!lock) + return NULL; + + if (!lksb) { + /* zero memory only if kernel-allocated */ + lksb = kzalloc(sizeof(*lksb), GFP_NOFS); + if (!lksb) { + kfree(lock); + return NULL; + } + kernel_allocated = 1; + } + + dlm_init_lock(lock, type, node, cookie); + if (kernel_allocated) + lock->lksb_kernel_allocated = 1; + lock->lksb = lksb; + lksb->lockid = lock; + return lock; +} + +/* handler for lock creation net message + * locking: + * caller needs: none + * taken: takes and drops res->spinlock + * held on exit: none + * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED + */ +int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; + struct dlm_lock_resource *res = NULL; + struct dlm_lock *newlock = NULL; + struct dlm_lockstatus *lksb = NULL; + enum dlm_status status = DLM_NORMAL; + char *name; + unsigned int namelen; + + BUG_ON(!dlm); + + mlog_entry_void(); + + if (!dlm_grab(dlm)) + return DLM_REJECTED; + + name = create->name; + namelen = create->namelen; + status = DLM_REJECTED; + if (!dlm_domain_fully_joined(dlm)) { + mlog(ML_ERROR, "Domain %s not fully joined, but node %u is " + "sending a create_lock message for lock %.*s!\n", + dlm->name, create->node_idx, namelen, name); + dlm_error(status); + goto leave; + } + + status = DLM_IVBUFLEN; + if (namelen > DLM_LOCKID_NAME_MAX) { + dlm_error(status); + goto leave; + } + + status = DLM_SYSERR; + newlock = dlm_new_lock(create->requested_type, + create->node_idx, + be64_to_cpu(create->cookie), NULL); + if (!newlock) { + dlm_error(status); + goto leave; + } + + lksb = newlock->lksb; + + if (be32_to_cpu(create->flags) & LKM_GET_LVB) { + lksb->flags |= DLM_LKSB_GET_LVB; + mlog(0, "set DLM_LKSB_GET_LVB flag\n"); + } + + status = DLM_IVLOCKID; + res = dlm_lookup_lockres(dlm, name, namelen); + if (!res) { + dlm_error(status); + goto leave; + } + + spin_lock(&res->spinlock); + status = __dlm_lockres_state_to_status(res); + spin_unlock(&res->spinlock); + + if (status != DLM_NORMAL) { + mlog(0, "lockres recovering/migrating/in-progress\n"); + goto leave; + } + + dlm_lock_attach_lockres(newlock, res); + + status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags)); +leave: + if (status != DLM_NORMAL) + if (newlock) + dlm_lock_put(newlock); + + if (res) + dlm_lockres_put(res); + + dlm_put(dlm); + + return status; +} + + +/* fetch next node-local (u8 nodenum + u56 cookie) into u64 */ +static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie) +{ + u64 tmpnode = node_num; + + /* shift single byte of node num into top 8 bits */ + tmpnode <<= 56; + + spin_lock(&dlm_cookie_lock); + *cookie = (dlm_next_cookie | tmpnode); + if (++dlm_next_cookie & 0xff00000000000000ull) { + mlog(0, "This node's cookie will now wrap!\n"); + dlm_next_cookie = 1; + } + spin_unlock(&dlm_cookie_lock); +} + +enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode, + struct dlm_lockstatus *lksb, int flags, + const char *name, int namelen, dlm_astlockfunc_t *ast, + void *data, dlm_bastlockfunc_t *bast) +{ + enum dlm_status status; + struct dlm_lock_resource *res = NULL; + struct dlm_lock *lock = NULL; + int convert = 0, recovery = 0; + + /* yes this function is a mess. + * TODO: clean this up. lots of common code in the + * lock and convert paths, especially in the retry blocks */ + if (!lksb) { + dlm_error(DLM_BADARGS); + return DLM_BADARGS; + } + + status = DLM_BADPARAM; + if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) { + dlm_error(status); + goto error; + } + + if (flags & ~LKM_VALID_FLAGS) { + dlm_error(status); + goto error; + } + + convert = (flags & LKM_CONVERT); + recovery = (flags & LKM_RECOVERY); + + if (recovery && + (!dlm_is_recovery_lock(name, namelen) || convert) ) { + dlm_error(status); + goto error; + } + if (convert && (flags & LKM_LOCAL)) { + mlog(ML_ERROR, "strange LOCAL convert request!\n"); + goto error; + } + + if (convert) { + /* CONVERT request */ + + /* if converting, must pass in a valid dlm_lock */ + lock = lksb->lockid; + if (!lock) { + mlog(ML_ERROR, "NULL lock pointer in convert " + "request\n"); + goto error; + } + + res = lock->lockres; + if (!res) { + mlog(ML_ERROR, "NULL lockres pointer in convert " + "request\n"); + goto error; + } + dlm_lockres_get(res); + + /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are + * static after the original lock call. convert requests will + * ensure that everything is the same, or return DLM_BADARGS. + * this means that DLM_DENIED_NOASTS will never be returned. + */ + if (lock->lksb != lksb || lock->ast != ast || + lock->bast != bast || lock->astdata != data) { + status = DLM_BADARGS; + mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, " + "astdata=%p\n", lksb, ast, bast, data); + mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, " + "astdata=%p\n", lock->lksb, lock->ast, + lock->bast, lock->astdata); + goto error; + } +retry_convert: + dlm_wait_for_recovery(dlm); + + if (res->owner == dlm->node_num) + status = dlmconvert_master(dlm, res, lock, flags, mode); + else + status = dlmconvert_remote(dlm, res, lock, flags, mode); + if (status == DLM_RECOVERING || status == DLM_MIGRATING || + status == DLM_FORWARD) { + /* for now, see how this works without sleeping + * and just retry right away. I suspect the reco + * or migration will complete fast enough that + * no waiting will be necessary */ + mlog(0, "retrying convert with migration/recovery/" + "in-progress\n"); + msleep(100); + goto retry_convert; + } + } else { + u64 tmpcookie; + + /* LOCK request */ + status = DLM_BADARGS; + if (!name) { + dlm_error(status); + goto error; + } + + status = DLM_IVBUFLEN; + if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) { + dlm_error(status); + goto error; + } + + dlm_get_next_cookie(dlm->node_num, &tmpcookie); + lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb); + if (!lock) { + dlm_error(status); + goto error; + } + + if (!recovery) + dlm_wait_for_recovery(dlm); + + /* find or create the lock resource */ + res = dlm_get_lock_resource(dlm, name, namelen, flags); + if (!res) { + status = DLM_IVLOCKID; + dlm_error(status); + goto error; + } + + mlog(0, "type=%d, flags = 0x%x\n", mode, flags); + mlog(0, "creating lock: lock=%p res=%p\n", lock, res); + + dlm_lock_attach_lockres(lock, res); + lock->ast = ast; + lock->bast = bast; + lock->astdata = data; + +retry_lock: + if (flags & LKM_VALBLK) { + mlog(0, "LKM_VALBLK passed by caller\n"); + + /* LVB requests for non PR, PW or EX locks are + * ignored. */ + if (mode < LKM_PRMODE) + flags &= ~LKM_VALBLK; + else { + flags |= LKM_GET_LVB; + lock->lksb->flags |= DLM_LKSB_GET_LVB; + } + } + + if (res->owner == dlm->node_num) + status = dlmlock_master(dlm, res, lock, flags); + else + status = dlmlock_remote(dlm, res, lock, flags); + + if (status == DLM_RECOVERING || status == DLM_MIGRATING || + status == DLM_FORWARD) { + mlog(0, "retrying lock with migration/" + "recovery/in progress\n"); + msleep(100); + /* no waiting for dlm_reco_thread */ + if (recovery) { + if (status != DLM_RECOVERING) + goto retry_lock; + + mlog(0, "%s: got RECOVERING " + "for $RECOVERY lock, master " + "was %u\n", dlm->name, + res->owner); + /* wait to see the node go down, then + * drop down and allow the lockres to + * get cleaned up. need to remaster. */ + dlm_wait_for_node_death(dlm, res->owner, + DLM_NODE_DEATH_WAIT_MAX); + } else { + dlm_wait_for_recovery(dlm); + goto retry_lock; + } + } + + if (status != DLM_NORMAL) { + lock->lksb->flags &= ~DLM_LKSB_GET_LVB; + if (status != DLM_NOTQUEUED) + dlm_error(status); + goto error; + } + } + +error: + if (status != DLM_NORMAL) { + if (lock && !convert) + dlm_lock_put(lock); + // this is kind of unnecessary + lksb->status = status; + } + + /* put lockres ref from the convert path + * or from dlm_get_lock_resource */ + if (res) + dlm_lockres_put(res); + + return status; +} +EXPORT_SYMBOL_GPL(dlmlock); diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c new file mode 100644 index 0000000..44f87ca --- /dev/null +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -0,0 +1,3398 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmmod.c + * + * standalone DLM module + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/sysctl.h> +#include <linux/random.h> +#include <linux/blkdev.h> +#include <linux/socket.h> +#include <linux/inet.h> +#include <linux/spinlock.h> +#include <linux/delay.h> + + +#include "cluster/heartbeat.h" +#include "cluster/nodemanager.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" +#include "dlmcommon.h" +#include "dlmdomain.h" +#include "dlmdebug.h" + +#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) +#include "cluster/masklog.h" + +static void dlm_mle_node_down(struct dlm_ctxt *dlm, + struct dlm_master_list_entry *mle, + struct o2nm_node *node, + int idx); +static void dlm_mle_node_up(struct dlm_ctxt *dlm, + struct dlm_master_list_entry *mle, + struct o2nm_node *node, + int idx); + +static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); +static int dlm_do_assert_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + void *nodemap, u32 flags); +static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); + +static inline int dlm_mle_equal(struct dlm_ctxt *dlm, + struct dlm_master_list_entry *mle, + const char *name, + unsigned int namelen) +{ + struct dlm_lock_resource *res; + + if (dlm != mle->dlm) + return 0; + + if (mle->type == DLM_MLE_BLOCK || + mle->type == DLM_MLE_MIGRATION) { + if (namelen != mle->u.name.len || + memcmp(name, mle->u.name.name, namelen)!=0) + return 0; + } else { + res = mle->u.res; + if (namelen != res->lockname.len || + memcmp(res->lockname.name, name, namelen) != 0) + return 0; + } + return 1; +} + +static struct kmem_cache *dlm_lockres_cache = NULL; +static struct kmem_cache *dlm_lockname_cache = NULL; +static struct kmem_cache *dlm_mle_cache = NULL; + +static void dlm_mle_release(struct kref *kref); +static void dlm_init_mle(struct dlm_master_list_entry *mle, + enum dlm_mle_type type, + struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + const char *name, + unsigned int namelen); +static void dlm_put_mle(struct dlm_master_list_entry *mle); +static void __dlm_put_mle(struct dlm_master_list_entry *mle); +static int dlm_find_mle(struct dlm_ctxt *dlm, + struct dlm_master_list_entry **mle, + char *name, unsigned int namelen); + +static int dlm_do_master_request(struct dlm_lock_resource *res, + struct dlm_master_list_entry *mle, int to); + + +static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_master_list_entry *mle, + int *blocked); +static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_master_list_entry *mle, + int blocked); +static int dlm_add_migration_mle(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_master_list_entry *mle, + struct dlm_master_list_entry **oldmle, + const char *name, unsigned int namelen, + u8 new_master, u8 master); + +static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); +static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); +static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 target); +static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); + + +int dlm_is_host_down(int errno) +{ + switch (errno) { + case -EBADF: + case -ECONNREFUSED: + case -ENOTCONN: + case -ECONNRESET: + case -EPIPE: + case -EHOSTDOWN: + case -EHOSTUNREACH: + case -ETIMEDOUT: + case -ECONNABORTED: + case -ENETDOWN: + case -ENETUNREACH: + case -ENETRESET: + case -ESHUTDOWN: + case -ENOPROTOOPT: + case -EINVAL: /* if returned from our tcp code, + this means there is no socket */ + return 1; + } + return 0; +} + + +/* + * MASTER LIST FUNCTIONS + */ + + +/* + * regarding master list entries and heartbeat callbacks: + * + * in order to avoid sleeping and allocation that occurs in + * heartbeat, master list entries are simply attached to the + * dlm's established heartbeat callbacks. the mle is attached + * when it is created, and since the dlm->spinlock is held at + * that time, any heartbeat event will be properly discovered + * by the mle. the mle needs to be detached from the + * dlm->mle_hb_events list as soon as heartbeat events are no + * longer useful to the mle, and before the mle is freed. + * + * as a general rule, heartbeat events are no longer needed by + * the mle once an "answer" regarding the lock master has been + * received. + */ +static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, + struct dlm_master_list_entry *mle) +{ + assert_spin_locked(&dlm->spinlock); + + list_add_tail(&mle->hb_events, &dlm->mle_hb_events); +} + + +static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, + struct dlm_master_list_entry *mle) +{ + if (!list_empty(&mle->hb_events)) + list_del_init(&mle->hb_events); +} + + +static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, + struct dlm_master_list_entry *mle) +{ + spin_lock(&dlm->spinlock); + __dlm_mle_detach_hb_events(dlm, mle); + spin_unlock(&dlm->spinlock); +} + +static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) +{ + struct dlm_ctxt *dlm; + dlm = mle->dlm; + + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&dlm->master_lock); + mle->inuse++; + kref_get(&mle->mle_refs); +} + +static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) +{ + struct dlm_ctxt *dlm; + dlm = mle->dlm; + + spin_lock(&dlm->spinlock); + spin_lock(&dlm->master_lock); + mle->inuse--; + __dlm_put_mle(mle); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + +} + +/* remove from list and free */ +static void __dlm_put_mle(struct dlm_master_list_entry *mle) +{ + struct dlm_ctxt *dlm; + dlm = mle->dlm; + + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&dlm->master_lock); + if (!atomic_read(&mle->mle_refs.refcount)) { + /* this may or may not crash, but who cares. + * it's a BUG. */ + mlog(ML_ERROR, "bad mle: %p\n", mle); + dlm_print_one_mle(mle); + BUG(); + } else + kref_put(&mle->mle_refs, dlm_mle_release); +} + + +/* must not have any spinlocks coming in */ +static void dlm_put_mle(struct dlm_master_list_entry *mle) +{ + struct dlm_ctxt *dlm; + dlm = mle->dlm; + + spin_lock(&dlm->spinlock); + spin_lock(&dlm->master_lock); + __dlm_put_mle(mle); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); +} + +static inline void dlm_get_mle(struct dlm_master_list_entry *mle) +{ + kref_get(&mle->mle_refs); +} + +static void dlm_init_mle(struct dlm_master_list_entry *mle, + enum dlm_mle_type type, + struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + const char *name, + unsigned int namelen) +{ + assert_spin_locked(&dlm->spinlock); + + mle->dlm = dlm; + mle->type = type; + INIT_LIST_HEAD(&mle->list); + INIT_LIST_HEAD(&mle->hb_events); + memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); + spin_lock_init(&mle->spinlock); + init_waitqueue_head(&mle->wq); + atomic_set(&mle->woken, 0); + kref_init(&mle->mle_refs); + memset(mle->response_map, 0, sizeof(mle->response_map)); + mle->master = O2NM_MAX_NODES; + mle->new_master = O2NM_MAX_NODES; + mle->inuse = 0; + + if (mle->type == DLM_MLE_MASTER) { + BUG_ON(!res); + mle->u.res = res; + } else if (mle->type == DLM_MLE_BLOCK) { + BUG_ON(!name); + memcpy(mle->u.name.name, name, namelen); + mle->u.name.len = namelen; + } else /* DLM_MLE_MIGRATION */ { + BUG_ON(!name); + memcpy(mle->u.name.name, name, namelen); + mle->u.name.len = namelen; + } + + /* copy off the node_map and register hb callbacks on our copy */ + memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); + memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); + clear_bit(dlm->node_num, mle->vote_map); + clear_bit(dlm->node_num, mle->node_map); + + /* attach the mle to the domain node up/down events */ + __dlm_mle_attach_hb_events(dlm, mle); +} + + +/* returns 1 if found, 0 if not */ +static int dlm_find_mle(struct dlm_ctxt *dlm, + struct dlm_master_list_entry **mle, + char *name, unsigned int namelen) +{ + struct dlm_master_list_entry *tmpmle; + + assert_spin_locked(&dlm->master_lock); + + list_for_each_entry(tmpmle, &dlm->master_list, list) { + if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) + continue; + dlm_get_mle(tmpmle); + *mle = tmpmle; + return 1; + } + return 0; +} + +void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) +{ + struct dlm_master_list_entry *mle; + + assert_spin_locked(&dlm->spinlock); + + list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { + if (node_up) + dlm_mle_node_up(dlm, mle, NULL, idx); + else + dlm_mle_node_down(dlm, mle, NULL, idx); + } +} + +static void dlm_mle_node_down(struct dlm_ctxt *dlm, + struct dlm_master_list_entry *mle, + struct o2nm_node *node, int idx) +{ + spin_lock(&mle->spinlock); + + if (!test_bit(idx, mle->node_map)) + mlog(0, "node %u already removed from nodemap!\n", idx); + else + clear_bit(idx, mle->node_map); + + spin_unlock(&mle->spinlock); +} + +static void dlm_mle_node_up(struct dlm_ctxt *dlm, + struct dlm_master_list_entry *mle, + struct o2nm_node *node, int idx) +{ + spin_lock(&mle->spinlock); + + if (test_bit(idx, mle->node_map)) + mlog(0, "node %u already in node map!\n", idx); + else + set_bit(idx, mle->node_map); + + spin_unlock(&mle->spinlock); +} + + +int dlm_init_mle_cache(void) +{ + dlm_mle_cache = kmem_cache_create("o2dlm_mle", + sizeof(struct dlm_master_list_entry), + 0, SLAB_HWCACHE_ALIGN, + NULL); + if (dlm_mle_cache == NULL) + return -ENOMEM; + return 0; +} + +void dlm_destroy_mle_cache(void) +{ + if (dlm_mle_cache) + kmem_cache_destroy(dlm_mle_cache); +} + +static void dlm_mle_release(struct kref *kref) +{ + struct dlm_master_list_entry *mle; + struct dlm_ctxt *dlm; + + mlog_entry_void(); + + mle = container_of(kref, struct dlm_master_list_entry, mle_refs); + dlm = mle->dlm; + + if (mle->type != DLM_MLE_MASTER) { + mlog(0, "calling mle_release for %.*s, type %d\n", + mle->u.name.len, mle->u.name.name, mle->type); + } else { + mlog(0, "calling mle_release for %.*s, type %d\n", + mle->u.res->lockname.len, + mle->u.res->lockname.name, mle->type); + } + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&dlm->master_lock); + + /* remove from list if not already */ + if (!list_empty(&mle->list)) + list_del_init(&mle->list); + + /* detach the mle from the domain node up/down events */ + __dlm_mle_detach_hb_events(dlm, mle); + + /* NOTE: kfree under spinlock here. + * if this is bad, we can move this to a freelist. */ + kmem_cache_free(dlm_mle_cache, mle); +} + + +/* + * LOCK RESOURCE FUNCTIONS + */ + +int dlm_init_master_caches(void) +{ + dlm_lockres_cache = kmem_cache_create("o2dlm_lockres", + sizeof(struct dlm_lock_resource), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!dlm_lockres_cache) + goto bail; + + dlm_lockname_cache = kmem_cache_create("o2dlm_lockname", + DLM_LOCKID_NAME_MAX, 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!dlm_lockname_cache) + goto bail; + + return 0; +bail: + dlm_destroy_master_caches(); + return -ENOMEM; +} + +void dlm_destroy_master_caches(void) +{ + if (dlm_lockname_cache) + kmem_cache_destroy(dlm_lockname_cache); + + if (dlm_lockres_cache) + kmem_cache_destroy(dlm_lockres_cache); +} + +static void dlm_set_lockres_owner(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 owner) +{ + assert_spin_locked(&res->spinlock); + + mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner); + + if (owner == dlm->node_num) + atomic_inc(&dlm->local_resources); + else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN) + atomic_inc(&dlm->unknown_resources); + else + atomic_inc(&dlm->remote_resources); + + res->owner = owner; +} + +void dlm_change_lockres_owner(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, u8 owner) +{ + assert_spin_locked(&res->spinlock); + + if (owner == res->owner) + return; + + if (res->owner == dlm->node_num) + atomic_dec(&dlm->local_resources); + else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) + atomic_dec(&dlm->unknown_resources); + else + atomic_dec(&dlm->remote_resources); + + dlm_set_lockres_owner(dlm, res, owner); +} + + +static void dlm_lockres_release(struct kref *kref) +{ + struct dlm_lock_resource *res; + + res = container_of(kref, struct dlm_lock_resource, refs); + + /* This should not happen -- all lockres' have a name + * associated with them at init time. */ + BUG_ON(!res->lockname.name); + + mlog(0, "destroying lockres %.*s\n", res->lockname.len, + res->lockname.name); + + if (!list_empty(&res->tracking)) + list_del_init(&res->tracking); + else { + mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n", + res->lockname.len, res->lockname.name); + dlm_print_one_lock_resource(res); + } + + if (!hlist_unhashed(&res->hash_node) || + !list_empty(&res->granted) || + !list_empty(&res->converting) || + !list_empty(&res->blocked) || + !list_empty(&res->dirty) || + !list_empty(&res->recovering) || + !list_empty(&res->purge)) { + mlog(ML_ERROR, + "Going to BUG for resource %.*s." + " We're on a list! [%c%c%c%c%c%c%c]\n", + res->lockname.len, res->lockname.name, + !hlist_unhashed(&res->hash_node) ? 'H' : ' ', + !list_empty(&res->granted) ? 'G' : ' ', + !list_empty(&res->converting) ? 'C' : ' ', + !list_empty(&res->blocked) ? 'B' : ' ', + !list_empty(&res->dirty) ? 'D' : ' ', + !list_empty(&res->recovering) ? 'R' : ' ', + !list_empty(&res->purge) ? 'P' : ' '); + + dlm_print_one_lock_resource(res); + } + + /* By the time we're ready to blow this guy away, we shouldn't + * be on any lists. */ + BUG_ON(!hlist_unhashed(&res->hash_node)); + BUG_ON(!list_empty(&res->granted)); + BUG_ON(!list_empty(&res->converting)); + BUG_ON(!list_empty(&res->blocked)); + BUG_ON(!list_empty(&res->dirty)); + BUG_ON(!list_empty(&res->recovering)); + BUG_ON(!list_empty(&res->purge)); + + kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); + + kmem_cache_free(dlm_lockres_cache, res); +} + +void dlm_lockres_put(struct dlm_lock_resource *res) +{ + kref_put(&res->refs, dlm_lockres_release); +} + +static void dlm_init_lockres(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + const char *name, unsigned int namelen) +{ + char *qname; + + /* If we memset here, we lose our reference to the kmalloc'd + * res->lockname.name, so be sure to init every field + * correctly! */ + + qname = (char *) res->lockname.name; + memcpy(qname, name, namelen); + + res->lockname.len = namelen; + res->lockname.hash = dlm_lockid_hash(name, namelen); + + init_waitqueue_head(&res->wq); + spin_lock_init(&res->spinlock); + INIT_HLIST_NODE(&res->hash_node); + INIT_LIST_HEAD(&res->granted); + INIT_LIST_HEAD(&res->converting); + INIT_LIST_HEAD(&res->blocked); + INIT_LIST_HEAD(&res->dirty); + INIT_LIST_HEAD(&res->recovering); + INIT_LIST_HEAD(&res->purge); + INIT_LIST_HEAD(&res->tracking); + atomic_set(&res->asts_reserved, 0); + res->migration_pending = 0; + res->inflight_locks = 0; + + kref_init(&res->refs); + + /* just for consistency */ + spin_lock(&res->spinlock); + dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); + spin_unlock(&res->spinlock); + + res->state = DLM_LOCK_RES_IN_PROGRESS; + + res->last_used = 0; + + spin_lock(&dlm->spinlock); + list_add_tail(&res->tracking, &dlm->tracking_list); + spin_unlock(&dlm->spinlock); + + memset(res->lvb, 0, DLM_LVB_LEN); + memset(res->refmap, 0, sizeof(res->refmap)); +} + +struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, + const char *name, + unsigned int namelen) +{ + struct dlm_lock_resource *res = NULL; + + res = (struct dlm_lock_resource *) + kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); + if (!res) + goto error; + + res->lockname.name = (char *) + kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); + if (!res->lockname.name) + goto error; + + dlm_init_lockres(dlm, res, name, namelen); + return res; + +error: + if (res && res->lockname.name) + kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); + + if (res) + kmem_cache_free(dlm_lockres_cache, res); + return NULL; +} + +void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + int new_lockres, + const char *file, + int line) +{ + if (!new_lockres) + assert_spin_locked(&res->spinlock); + + if (!test_bit(dlm->node_num, res->refmap)) { + BUG_ON(res->inflight_locks != 0); + dlm_lockres_set_refmap_bit(dlm->node_num, res); + } + res->inflight_locks++; + mlog(0, "%s:%.*s: inflight++: now %u\n", + dlm->name, res->lockname.len, res->lockname.name, + res->inflight_locks); +} + +void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + const char *file, + int line) +{ + assert_spin_locked(&res->spinlock); + + BUG_ON(res->inflight_locks == 0); + res->inflight_locks--; + mlog(0, "%s:%.*s: inflight--: now %u\n", + dlm->name, res->lockname.len, res->lockname.name, + res->inflight_locks); + if (res->inflight_locks == 0) + dlm_lockres_clear_refmap_bit(dlm->node_num, res); + wake_up(&res->wq); +} + +/* + * lookup a lock resource by name. + * may already exist in the hashtable. + * lockid is null terminated + * + * if not, allocate enough for the lockres and for + * the temporary structure used in doing the mastering. + * + * also, do a lookup in the dlm->master_list to see + * if another node has begun mastering the same lock. + * if so, there should be a block entry in there + * for this name, and we should *not* attempt to master + * the lock here. need to wait around for that node + * to assert_master (or die). + * + */ +struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, + const char *lockid, + int namelen, + int flags) +{ + struct dlm_lock_resource *tmpres=NULL, *res=NULL; + struct dlm_master_list_entry *mle = NULL; + struct dlm_master_list_entry *alloc_mle = NULL; + int blocked = 0; + int ret, nodenum; + struct dlm_node_iter iter; + unsigned int hash; + int tries = 0; + int bit, wait_on_recovery = 0; + int drop_inflight_if_nonlocal = 0; + + BUG_ON(!lockid); + + hash = dlm_lockid_hash(lockid, namelen); + + mlog(0, "get lockres %s (len %d)\n", lockid, namelen); + +lookup: + spin_lock(&dlm->spinlock); + tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); + if (tmpres) { + int dropping_ref = 0; + + spin_lock(&tmpres->spinlock); + if (tmpres->owner == dlm->node_num) { + BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); + dlm_lockres_grab_inflight_ref(dlm, tmpres); + } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) + dropping_ref = 1; + spin_unlock(&tmpres->spinlock); + spin_unlock(&dlm->spinlock); + + /* wait until done messaging the master, drop our ref to allow + * the lockres to be purged, start over. */ + if (dropping_ref) { + spin_lock(&tmpres->spinlock); + __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF); + spin_unlock(&tmpres->spinlock); + dlm_lockres_put(tmpres); + tmpres = NULL; + goto lookup; + } + + mlog(0, "found in hash!\n"); + if (res) + dlm_lockres_put(res); + res = tmpres; + goto leave; + } + + if (!res) { + spin_unlock(&dlm->spinlock); + mlog(0, "allocating a new resource\n"); + /* nothing found and we need to allocate one. */ + alloc_mle = (struct dlm_master_list_entry *) + kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); + if (!alloc_mle) + goto leave; + res = dlm_new_lockres(dlm, lockid, namelen); + if (!res) + goto leave; + goto lookup; + } + + mlog(0, "no lockres found, allocated our own: %p\n", res); + + if (flags & LKM_LOCAL) { + /* caller knows it's safe to assume it's not mastered elsewhere + * DONE! return right away */ + spin_lock(&res->spinlock); + dlm_change_lockres_owner(dlm, res, dlm->node_num); + __dlm_insert_lockres(dlm, res); + dlm_lockres_grab_inflight_ref(dlm, res); + spin_unlock(&res->spinlock); + spin_unlock(&dlm->spinlock); + /* lockres still marked IN_PROGRESS */ + goto wake_waiters; + } + + /* check master list to see if another node has started mastering it */ + spin_lock(&dlm->master_lock); + + /* if we found a block, wait for lock to be mastered by another node */ + blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); + if (blocked) { + int mig; + if (mle->type == DLM_MLE_MASTER) { + mlog(ML_ERROR, "master entry for nonexistent lock!\n"); + BUG(); + } + mig = (mle->type == DLM_MLE_MIGRATION); + /* if there is a migration in progress, let the migration + * finish before continuing. we can wait for the absence + * of the MIGRATION mle: either the migrate finished or + * one of the nodes died and the mle was cleaned up. + * if there is a BLOCK here, but it already has a master + * set, we are too late. the master does not have a ref + * for us in the refmap. detach the mle and drop it. + * either way, go back to the top and start over. */ + if (mig || mle->master != O2NM_MAX_NODES) { + BUG_ON(mig && mle->master == dlm->node_num); + /* we arrived too late. the master does not + * have a ref for us. retry. */ + mlog(0, "%s:%.*s: late on %s\n", + dlm->name, namelen, lockid, + mig ? "MIGRATION" : "BLOCK"); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + + /* master is known, detach */ + if (!mig) + dlm_mle_detach_hb_events(dlm, mle); + dlm_put_mle(mle); + mle = NULL; + /* this is lame, but we cant wait on either + * the mle or lockres waitqueue here */ + if (mig) + msleep(100); + goto lookup; + } + } else { + /* go ahead and try to master lock on this node */ + mle = alloc_mle; + /* make sure this does not get freed below */ + alloc_mle = NULL; + dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); + set_bit(dlm->node_num, mle->maybe_map); + list_add(&mle->list, &dlm->master_list); + + /* still holding the dlm spinlock, check the recovery map + * to see if there are any nodes that still need to be + * considered. these will not appear in the mle nodemap + * but they might own this lockres. wait on them. */ + bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); + if (bit < O2NM_MAX_NODES) { + mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " + "recover before lock mastery can begin\n", + dlm->name, namelen, (char *)lockid, bit); + wait_on_recovery = 1; + } + } + + /* at this point there is either a DLM_MLE_BLOCK or a + * DLM_MLE_MASTER on the master list, so it's safe to add the + * lockres to the hashtable. anyone who finds the lock will + * still have to wait on the IN_PROGRESS. */ + + /* finally add the lockres to its hash bucket */ + __dlm_insert_lockres(dlm, res); + /* since this lockres is new it doesnt not require the spinlock */ + dlm_lockres_grab_inflight_ref_new(dlm, res); + + /* if this node does not become the master make sure to drop + * this inflight reference below */ + drop_inflight_if_nonlocal = 1; + + /* get an extra ref on the mle in case this is a BLOCK + * if so, the creator of the BLOCK may try to put the last + * ref at this time in the assert master handler, so we + * need an extra one to keep from a bad ptr deref. */ + dlm_get_mle_inuse(mle); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + +redo_request: + while (wait_on_recovery) { + /* any cluster changes that occurred after dropping the + * dlm spinlock would be detectable be a change on the mle, + * so we only need to clear out the recovery map once. */ + if (dlm_is_recovery_lock(lockid, namelen)) { + mlog(ML_NOTICE, "%s: recovery map is not empty, but " + "must master $RECOVERY lock now\n", dlm->name); + if (!dlm_pre_master_reco_lockres(dlm, res)) + wait_on_recovery = 0; + else { + mlog(0, "%s: waiting 500ms for heartbeat state " + "change\n", dlm->name); + msleep(500); + } + continue; + } + + dlm_kick_recovery_thread(dlm); + msleep(1000); + dlm_wait_for_recovery(dlm); + + spin_lock(&dlm->spinlock); + bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); + if (bit < O2NM_MAX_NODES) { + mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " + "recover before lock mastery can begin\n", + dlm->name, namelen, (char *)lockid, bit); + wait_on_recovery = 1; + } else + wait_on_recovery = 0; + spin_unlock(&dlm->spinlock); + + if (wait_on_recovery) + dlm_wait_for_node_recovery(dlm, bit, 10000); + } + + /* must wait for lock to be mastered elsewhere */ + if (blocked) + goto wait; + + ret = -EINVAL; + dlm_node_iter_init(mle->vote_map, &iter); + while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { + ret = dlm_do_master_request(res, mle, nodenum); + if (ret < 0) + mlog_errno(ret); + if (mle->master != O2NM_MAX_NODES) { + /* found a master ! */ + if (mle->master <= nodenum) + break; + /* if our master request has not reached the master + * yet, keep going until it does. this is how the + * master will know that asserts are needed back to + * the lower nodes. */ + mlog(0, "%s:%.*s: requests only up to %u but master " + "is %u, keep going\n", dlm->name, namelen, + lockid, nodenum, mle->master); + } + } + +wait: + /* keep going until the response map includes all nodes */ + ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); + if (ret < 0) { + wait_on_recovery = 1; + mlog(0, "%s:%.*s: node map changed, redo the " + "master request now, blocked=%d\n", + dlm->name, res->lockname.len, + res->lockname.name, blocked); + if (++tries > 20) { + mlog(ML_ERROR, "%s:%.*s: spinning on " + "dlm_wait_for_lock_mastery, blocked=%d\n", + dlm->name, res->lockname.len, + res->lockname.name, blocked); + dlm_print_one_lock_resource(res); + dlm_print_one_mle(mle); + tries = 0; + } + goto redo_request; + } + + mlog(0, "lockres mastered by %u\n", res->owner); + /* make sure we never continue without this */ + BUG_ON(res->owner == O2NM_MAX_NODES); + + /* master is known, detach if not already detached */ + dlm_mle_detach_hb_events(dlm, mle); + dlm_put_mle(mle); + /* put the extra ref */ + dlm_put_mle_inuse(mle); + +wake_waiters: + spin_lock(&res->spinlock); + if (res->owner != dlm->node_num && drop_inflight_if_nonlocal) + dlm_lockres_drop_inflight_ref(dlm, res); + res->state &= ~DLM_LOCK_RES_IN_PROGRESS; + spin_unlock(&res->spinlock); + wake_up(&res->wq); + +leave: + /* need to free the unused mle */ + if (alloc_mle) + kmem_cache_free(dlm_mle_cache, alloc_mle); + + return res; +} + + +#define DLM_MASTERY_TIMEOUT_MS 5000 + +static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_master_list_entry *mle, + int *blocked) +{ + u8 m; + int ret, bit; + int map_changed, voting_done; + int assert, sleep; + +recheck: + ret = 0; + assert = 0; + + /* check if another node has already become the owner */ + spin_lock(&res->spinlock); + if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { + mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, + res->lockname.len, res->lockname.name, res->owner); + spin_unlock(&res->spinlock); + /* this will cause the master to re-assert across + * the whole cluster, freeing up mles */ + if (res->owner != dlm->node_num) { + ret = dlm_do_master_request(res, mle, res->owner); + if (ret < 0) { + /* give recovery a chance to run */ + mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); + msleep(500); + goto recheck; + } + } + ret = 0; + goto leave; + } + spin_unlock(&res->spinlock); + + spin_lock(&mle->spinlock); + m = mle->master; + map_changed = (memcmp(mle->vote_map, mle->node_map, + sizeof(mle->vote_map)) != 0); + voting_done = (memcmp(mle->vote_map, mle->response_map, + sizeof(mle->vote_map)) == 0); + + /* restart if we hit any errors */ + if (map_changed) { + int b; + mlog(0, "%s: %.*s: node map changed, restarting\n", + dlm->name, res->lockname.len, res->lockname.name); + ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); + b = (mle->type == DLM_MLE_BLOCK); + if ((*blocked && !b) || (!*blocked && b)) { + mlog(0, "%s:%.*s: status change: old=%d new=%d\n", + dlm->name, res->lockname.len, res->lockname.name, + *blocked, b); + *blocked = b; + } + spin_unlock(&mle->spinlock); + if (ret < 0) { + mlog_errno(ret); + goto leave; + } + mlog(0, "%s:%.*s: restart lock mastery succeeded, " + "rechecking now\n", dlm->name, res->lockname.len, + res->lockname.name); + goto recheck; + } else { + if (!voting_done) { + mlog(0, "map not changed and voting not done " + "for %s:%.*s\n", dlm->name, res->lockname.len, + res->lockname.name); + } + } + + if (m != O2NM_MAX_NODES) { + /* another node has done an assert! + * all done! */ + sleep = 0; + } else { + sleep = 1; + /* have all nodes responded? */ + if (voting_done && !*blocked) { + bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); + if (dlm->node_num <= bit) { + /* my node number is lowest. + * now tell other nodes that I am + * mastering this. */ + mle->master = dlm->node_num; + /* ref was grabbed in get_lock_resource + * will be dropped in dlmlock_master */ + assert = 1; + sleep = 0; + } + /* if voting is done, but we have not received + * an assert master yet, we must sleep */ + } + } + + spin_unlock(&mle->spinlock); + + /* sleep if we haven't finished voting yet */ + if (sleep) { + unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); + + /* + if (atomic_read(&mle->mle_refs.refcount) < 2) + mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, + atomic_read(&mle->mle_refs.refcount), + res->lockname.len, res->lockname.name); + */ + atomic_set(&mle->woken, 0); + (void)wait_event_timeout(mle->wq, + (atomic_read(&mle->woken) == 1), + timeo); + if (res->owner == O2NM_MAX_NODES) { + mlog(0, "%s:%.*s: waiting again\n", dlm->name, + res->lockname.len, res->lockname.name); + goto recheck; + } + mlog(0, "done waiting, master is %u\n", res->owner); + ret = 0; + goto leave; + } + + ret = 0; /* done */ + if (assert) { + m = dlm->node_num; + mlog(0, "about to master %.*s here, this=%u\n", + res->lockname.len, res->lockname.name, m); + ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); + if (ret) { + /* This is a failure in the network path, + * not in the response to the assert_master + * (any nonzero response is a BUG on this node). + * Most likely a socket just got disconnected + * due to node death. */ + mlog_errno(ret); + } + /* no longer need to restart lock mastery. + * all living nodes have been contacted. */ + ret = 0; + } + + /* set the lockres owner */ + spin_lock(&res->spinlock); + /* mastery reference obtained either during + * assert_master_handler or in get_lock_resource */ + dlm_change_lockres_owner(dlm, res, m); + spin_unlock(&res->spinlock); + +leave: + return ret; +} + +struct dlm_bitmap_diff_iter +{ + int curnode; + unsigned long *orig_bm; + unsigned long *cur_bm; + unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; +}; + +enum dlm_node_state_change +{ + NODE_DOWN = -1, + NODE_NO_CHANGE = 0, + NODE_UP +}; + +static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, + unsigned long *orig_bm, + unsigned long *cur_bm) +{ + unsigned long p1, p2; + int i; + + iter->curnode = -1; + iter->orig_bm = orig_bm; + iter->cur_bm = cur_bm; + + for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { + p1 = *(iter->orig_bm + i); + p2 = *(iter->cur_bm + i); + iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); + } +} + +static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, + enum dlm_node_state_change *state) +{ + int bit; + + if (iter->curnode >= O2NM_MAX_NODES) + return -ENOENT; + + bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, + iter->curnode+1); + if (bit >= O2NM_MAX_NODES) { + iter->curnode = O2NM_MAX_NODES; + return -ENOENT; + } + + /* if it was there in the original then this node died */ + if (test_bit(bit, iter->orig_bm)) + *state = NODE_DOWN; + else + *state = NODE_UP; + + iter->curnode = bit; + return bit; +} + + +static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_master_list_entry *mle, + int blocked) +{ + struct dlm_bitmap_diff_iter bdi; + enum dlm_node_state_change sc; + int node; + int ret = 0; + + mlog(0, "something happened such that the " + "master process may need to be restarted!\n"); + + assert_spin_locked(&mle->spinlock); + + dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); + node = dlm_bitmap_diff_iter_next(&bdi, &sc); + while (node >= 0) { + if (sc == NODE_UP) { + /* a node came up. clear any old vote from + * the response map and set it in the vote map + * then restart the mastery. */ + mlog(ML_NOTICE, "node %d up while restarting\n", node); + + /* redo the master request, but only for the new node */ + mlog(0, "sending request to new node\n"); + clear_bit(node, mle->response_map); + set_bit(node, mle->vote_map); + } else { + mlog(ML_ERROR, "node down! %d\n", node); + if (blocked) { + int lowest = find_next_bit(mle->maybe_map, + O2NM_MAX_NODES, 0); + + /* act like it was never there */ + clear_bit(node, mle->maybe_map); + + if (node == lowest) { + mlog(0, "expected master %u died" + " while this node was blocked " + "waiting on it!\n", node); + lowest = find_next_bit(mle->maybe_map, + O2NM_MAX_NODES, + lowest+1); + if (lowest < O2NM_MAX_NODES) { + mlog(0, "%s:%.*s:still " + "blocked. waiting on %u " + "now\n", dlm->name, + res->lockname.len, + res->lockname.name, + lowest); + } else { + /* mle is an MLE_BLOCK, but + * there is now nothing left to + * block on. we need to return + * all the way back out and try + * again with an MLE_MASTER. + * dlm_do_local_recovery_cleanup + * has already run, so the mle + * refcount is ok */ + mlog(0, "%s:%.*s: no " + "longer blocking. try to " + "master this here\n", + dlm->name, + res->lockname.len, + res->lockname.name); + mle->type = DLM_MLE_MASTER; + mle->u.res = res; + } + } + } + + /* now blank out everything, as if we had never + * contacted anyone */ + memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); + memset(mle->response_map, 0, sizeof(mle->response_map)); + /* reset the vote_map to the current node_map */ + memcpy(mle->vote_map, mle->node_map, + sizeof(mle->node_map)); + /* put myself into the maybe map */ + if (mle->type != DLM_MLE_BLOCK) + set_bit(dlm->node_num, mle->maybe_map); + } + ret = -EAGAIN; + node = dlm_bitmap_diff_iter_next(&bdi, &sc); + } + return ret; +} + + +/* + * DLM_MASTER_REQUEST_MSG + * + * returns: 0 on success, + * -errno on a network error + * + * on error, the caller should assume the target node is "dead" + * + */ + +static int dlm_do_master_request(struct dlm_lock_resource *res, + struct dlm_master_list_entry *mle, int to) +{ + struct dlm_ctxt *dlm = mle->dlm; + struct dlm_master_request request; + int ret, response=0, resend; + + memset(&request, 0, sizeof(request)); + request.node_idx = dlm->node_num; + + BUG_ON(mle->type == DLM_MLE_MIGRATION); + + if (mle->type != DLM_MLE_MASTER) { + request.namelen = mle->u.name.len; + memcpy(request.name, mle->u.name.name, request.namelen); + } else { + request.namelen = mle->u.res->lockname.len; + memcpy(request.name, mle->u.res->lockname.name, + request.namelen); + } + +again: + ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, + sizeof(request), to, &response); + if (ret < 0) { + if (ret == -ESRCH) { + /* should never happen */ + mlog(ML_ERROR, "TCP stack not ready!\n"); + BUG(); + } else if (ret == -EINVAL) { + mlog(ML_ERROR, "bad args passed to o2net!\n"); + BUG(); + } else if (ret == -ENOMEM) { + mlog(ML_ERROR, "out of memory while trying to send " + "network message! retrying\n"); + /* this is totally crude */ + msleep(50); + goto again; + } else if (!dlm_is_host_down(ret)) { + /* not a network error. bad. */ + mlog_errno(ret); + mlog(ML_ERROR, "unhandled error!"); + BUG(); + } + /* all other errors should be network errors, + * and likely indicate node death */ + mlog(ML_ERROR, "link to %d went down!\n", to); + goto out; + } + + ret = 0; + resend = 0; + spin_lock(&mle->spinlock); + switch (response) { + case DLM_MASTER_RESP_YES: + set_bit(to, mle->response_map); + mlog(0, "node %u is the master, response=YES\n", to); + mlog(0, "%s:%.*s: master node %u now knows I have a " + "reference\n", dlm->name, res->lockname.len, + res->lockname.name, to); + mle->master = to; + break; + case DLM_MASTER_RESP_NO: + mlog(0, "node %u not master, response=NO\n", to); + set_bit(to, mle->response_map); + break; + case DLM_MASTER_RESP_MAYBE: + mlog(0, "node %u not master, response=MAYBE\n", to); + set_bit(to, mle->response_map); + set_bit(to, mle->maybe_map); + break; + case DLM_MASTER_RESP_ERROR: + mlog(0, "node %u hit an error, resending\n", to); + resend = 1; + response = 0; + break; + default: + mlog(ML_ERROR, "bad response! %u\n", response); + BUG(); + } + spin_unlock(&mle->spinlock); + if (resend) { + /* this is also totally crude */ + msleep(50); + goto again; + } + +out: + return ret; +} + +/* + * locks that can be taken here: + * dlm->spinlock + * res->spinlock + * mle->spinlock + * dlm->master_list + * + * if possible, TRIM THIS DOWN!!! + */ +int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + u8 response = DLM_MASTER_RESP_MAYBE; + struct dlm_ctxt *dlm = data; + struct dlm_lock_resource *res = NULL; + struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; + struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; + char *name; + unsigned int namelen, hash; + int found, ret; + int set_maybe; + int dispatch_assert = 0; + + if (!dlm_grab(dlm)) + return DLM_MASTER_RESP_NO; + + if (!dlm_domain_fully_joined(dlm)) { + response = DLM_MASTER_RESP_NO; + goto send_response; + } + + name = request->name; + namelen = request->namelen; + hash = dlm_lockid_hash(name, namelen); + + if (namelen > DLM_LOCKID_NAME_MAX) { + response = DLM_IVBUFLEN; + goto send_response; + } + +way_up_top: + spin_lock(&dlm->spinlock); + res = __dlm_lookup_lockres(dlm, name, namelen, hash); + if (res) { + spin_unlock(&dlm->spinlock); + + /* take care of the easy cases up front */ + spin_lock(&res->spinlock); + if (res->state & (DLM_LOCK_RES_RECOVERING| + DLM_LOCK_RES_MIGRATING)) { + spin_unlock(&res->spinlock); + mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " + "being recovered/migrated\n"); + response = DLM_MASTER_RESP_ERROR; + if (mle) + kmem_cache_free(dlm_mle_cache, mle); + goto send_response; + } + + if (res->owner == dlm->node_num) { + mlog(0, "%s:%.*s: setting bit %u in refmap\n", + dlm->name, namelen, name, request->node_idx); + dlm_lockres_set_refmap_bit(request->node_idx, res); + spin_unlock(&res->spinlock); + response = DLM_MASTER_RESP_YES; + if (mle) + kmem_cache_free(dlm_mle_cache, mle); + + /* this node is the owner. + * there is some extra work that needs to + * happen now. the requesting node has + * caused all nodes up to this one to + * create mles. this node now needs to + * go back and clean those up. */ + dispatch_assert = 1; + goto send_response; + } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { + spin_unlock(&res->spinlock); + // mlog(0, "node %u is the master\n", res->owner); + response = DLM_MASTER_RESP_NO; + if (mle) + kmem_cache_free(dlm_mle_cache, mle); + goto send_response; + } + + /* ok, there is no owner. either this node is + * being blocked, or it is actively trying to + * master this lock. */ + if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { + mlog(ML_ERROR, "lock with no owner should be " + "in-progress!\n"); + BUG(); + } + + // mlog(0, "lockres is in progress...\n"); + spin_lock(&dlm->master_lock); + found = dlm_find_mle(dlm, &tmpmle, name, namelen); + if (!found) { + mlog(ML_ERROR, "no mle found for this lock!\n"); + BUG(); + } + set_maybe = 1; + spin_lock(&tmpmle->spinlock); + if (tmpmle->type == DLM_MLE_BLOCK) { + // mlog(0, "this node is waiting for " + // "lockres to be mastered\n"); + response = DLM_MASTER_RESP_NO; + } else if (tmpmle->type == DLM_MLE_MIGRATION) { + mlog(0, "node %u is master, but trying to migrate to " + "node %u.\n", tmpmle->master, tmpmle->new_master); + if (tmpmle->master == dlm->node_num) { + mlog(ML_ERROR, "no owner on lockres, but this " + "node is trying to migrate it to %u?!\n", + tmpmle->new_master); + BUG(); + } else { + /* the real master can respond on its own */ + response = DLM_MASTER_RESP_NO; + } + } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { + set_maybe = 0; + if (tmpmle->master == dlm->node_num) { + response = DLM_MASTER_RESP_YES; + /* this node will be the owner. + * go back and clean the mles on any + * other nodes */ + dispatch_assert = 1; + dlm_lockres_set_refmap_bit(request->node_idx, res); + mlog(0, "%s:%.*s: setting bit %u in refmap\n", + dlm->name, namelen, name, + request->node_idx); + } else + response = DLM_MASTER_RESP_NO; + } else { + // mlog(0, "this node is attempting to " + // "master lockres\n"); + response = DLM_MASTER_RESP_MAYBE; + } + if (set_maybe) + set_bit(request->node_idx, tmpmle->maybe_map); + spin_unlock(&tmpmle->spinlock); + + spin_unlock(&dlm->master_lock); + spin_unlock(&res->spinlock); + + /* keep the mle attached to heartbeat events */ + dlm_put_mle(tmpmle); + if (mle) + kmem_cache_free(dlm_mle_cache, mle); + goto send_response; + } + + /* + * lockres doesn't exist on this node + * if there is an MLE_BLOCK, return NO + * if there is an MLE_MASTER, return MAYBE + * otherwise, add an MLE_BLOCK, return NO + */ + spin_lock(&dlm->master_lock); + found = dlm_find_mle(dlm, &tmpmle, name, namelen); + if (!found) { + /* this lockid has never been seen on this node yet */ + // mlog(0, "no mle found\n"); + if (!mle) { + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + + mle = (struct dlm_master_list_entry *) + kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); + if (!mle) { + response = DLM_MASTER_RESP_ERROR; + mlog_errno(-ENOMEM); + goto send_response; + } + goto way_up_top; + } + + // mlog(0, "this is second time thru, already allocated, " + // "add the block.\n"); + dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); + set_bit(request->node_idx, mle->maybe_map); + list_add(&mle->list, &dlm->master_list); + response = DLM_MASTER_RESP_NO; + } else { + // mlog(0, "mle was found\n"); + set_maybe = 1; + spin_lock(&tmpmle->spinlock); + if (tmpmle->master == dlm->node_num) { + mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); + BUG(); + } + if (tmpmle->type == DLM_MLE_BLOCK) + response = DLM_MASTER_RESP_NO; + else if (tmpmle->type == DLM_MLE_MIGRATION) { + mlog(0, "migration mle was found (%u->%u)\n", + tmpmle->master, tmpmle->new_master); + /* real master can respond on its own */ + response = DLM_MASTER_RESP_NO; + } else + response = DLM_MASTER_RESP_MAYBE; + if (set_maybe) + set_bit(request->node_idx, tmpmle->maybe_map); + spin_unlock(&tmpmle->spinlock); + } + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + + if (found) { + /* keep the mle attached to heartbeat events */ + dlm_put_mle(tmpmle); + } +send_response: + /* + * __dlm_lookup_lockres() grabbed a reference to this lockres. + * The reference is released by dlm_assert_master_worker() under + * the call to dlm_dispatch_assert_master(). If + * dlm_assert_master_worker() isn't called, we drop it here. + */ + if (dispatch_assert) { + if (response != DLM_MASTER_RESP_YES) + mlog(ML_ERROR, "invalid response %d\n", response); + if (!res) { + mlog(ML_ERROR, "bad lockres while trying to assert!\n"); + BUG(); + } + mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", + dlm->node_num, res->lockname.len, res->lockname.name); + ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, + DLM_ASSERT_MASTER_MLE_CLEANUP); + if (ret < 0) { + mlog(ML_ERROR, "failed to dispatch assert master work\n"); + response = DLM_MASTER_RESP_ERROR; + dlm_lockres_put(res); + } + } else { + if (res) + dlm_lockres_put(res); + } + + dlm_put(dlm); + return response; +} + +/* + * DLM_ASSERT_MASTER_MSG + */ + + +/* + * NOTE: this can be used for debugging + * can periodically run all locks owned by this node + * and re-assert across the cluster... + */ +static int dlm_do_assert_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + void *nodemap, u32 flags) +{ + struct dlm_assert_master assert; + int to, tmpret; + struct dlm_node_iter iter; + int ret = 0; + int reassert; + const char *lockname = res->lockname.name; + unsigned int namelen = res->lockname.len; + + BUG_ON(namelen > O2NM_MAX_NAME_LEN); + + spin_lock(&res->spinlock); + res->state |= DLM_LOCK_RES_SETREF_INPROG; + spin_unlock(&res->spinlock); + +again: + reassert = 0; + + /* note that if this nodemap is empty, it returns 0 */ + dlm_node_iter_init(nodemap, &iter); + while ((to = dlm_node_iter_next(&iter)) >= 0) { + int r = 0; + struct dlm_master_list_entry *mle = NULL; + + mlog(0, "sending assert master to %d (%.*s)\n", to, + namelen, lockname); + memset(&assert, 0, sizeof(assert)); + assert.node_idx = dlm->node_num; + assert.namelen = namelen; + memcpy(assert.name, lockname, namelen); + assert.flags = cpu_to_be32(flags); + + tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, + &assert, sizeof(assert), to, &r); + if (tmpret < 0) { + mlog(0, "assert_master returned %d!\n", tmpret); + if (!dlm_is_host_down(tmpret)) { + mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); + BUG(); + } + /* a node died. finish out the rest of the nodes. */ + mlog(0, "link to %d went down!\n", to); + /* any nonzero status return will do */ + ret = tmpret; + r = 0; + } else if (r < 0) { + /* ok, something horribly messed. kill thyself. */ + mlog(ML_ERROR,"during assert master of %.*s to %u, " + "got %d.\n", namelen, lockname, to, r); + spin_lock(&dlm->spinlock); + spin_lock(&dlm->master_lock); + if (dlm_find_mle(dlm, &mle, (char *)lockname, + namelen)) { + dlm_print_one_mle(mle); + __dlm_put_mle(mle); + } + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + BUG(); + } + + if (r & DLM_ASSERT_RESPONSE_REASSERT && + !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { + mlog(ML_ERROR, "%.*s: very strange, " + "master MLE but no lockres on %u\n", + namelen, lockname, to); + } + + if (r & DLM_ASSERT_RESPONSE_REASSERT) { + mlog(0, "%.*s: node %u create mles on other " + "nodes and requests a re-assert\n", + namelen, lockname, to); + reassert = 1; + } + if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { + mlog(0, "%.*s: node %u has a reference to this " + "lockres, set the bit in the refmap\n", + namelen, lockname, to); + spin_lock(&res->spinlock); + dlm_lockres_set_refmap_bit(to, res); + spin_unlock(&res->spinlock); + } + } + + if (reassert) + goto again; + + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_SETREF_INPROG; + spin_unlock(&res->spinlock); + wake_up(&res->wq); + + return ret; +} + +/* + * locks that can be taken here: + * dlm->spinlock + * res->spinlock + * mle->spinlock + * dlm->master_list + * + * if possible, TRIM THIS DOWN!!! + */ +int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_master_list_entry *mle = NULL; + struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; + struct dlm_lock_resource *res = NULL; + char *name; + unsigned int namelen, hash; + u32 flags; + int master_request = 0, have_lockres_ref = 0; + int ret = 0; + + if (!dlm_grab(dlm)) + return 0; + + name = assert->name; + namelen = assert->namelen; + hash = dlm_lockid_hash(name, namelen); + flags = be32_to_cpu(assert->flags); + + if (namelen > DLM_LOCKID_NAME_MAX) { + mlog(ML_ERROR, "Invalid name length!"); + goto done; + } + + spin_lock(&dlm->spinlock); + + if (flags) + mlog(0, "assert_master with flags: %u\n", flags); + + /* find the MLE */ + spin_lock(&dlm->master_lock); + if (!dlm_find_mle(dlm, &mle, name, namelen)) { + /* not an error, could be master just re-asserting */ + mlog(0, "just got an assert_master from %u, but no " + "MLE for it! (%.*s)\n", assert->node_idx, + namelen, name); + } else { + int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); + if (bit >= O2NM_MAX_NODES) { + /* not necessarily an error, though less likely. + * could be master just re-asserting. */ + mlog(0, "no bits set in the maybe_map, but %u " + "is asserting! (%.*s)\n", assert->node_idx, + namelen, name); + } else if (bit != assert->node_idx) { + if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { + mlog(0, "master %u was found, %u should " + "back off\n", assert->node_idx, bit); + } else { + /* with the fix for bug 569, a higher node + * number winning the mastery will respond + * YES to mastery requests, but this node + * had no way of knowing. let it pass. */ + mlog(0, "%u is the lowest node, " + "%u is asserting. (%.*s) %u must " + "have begun after %u won.\n", bit, + assert->node_idx, namelen, name, bit, + assert->node_idx); + } + } + if (mle->type == DLM_MLE_MIGRATION) { + if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { + mlog(0, "%s:%.*s: got cleanup assert" + " from %u for migration\n", + dlm->name, namelen, name, + assert->node_idx); + } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { + mlog(0, "%s:%.*s: got unrelated assert" + " from %u for migration, ignoring\n", + dlm->name, namelen, name, + assert->node_idx); + __dlm_put_mle(mle); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + goto done; + } + } + } + spin_unlock(&dlm->master_lock); + + /* ok everything checks out with the MLE + * now check to see if there is a lockres */ + res = __dlm_lookup_lockres(dlm, name, namelen, hash); + if (res) { + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_RECOVERING) { + mlog(ML_ERROR, "%u asserting but %.*s is " + "RECOVERING!\n", assert->node_idx, namelen, name); + goto kill; + } + if (!mle) { + if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && + res->owner != assert->node_idx) { + mlog(ML_ERROR, "assert_master from " + "%u, but current owner is " + "%u! (%.*s)\n", + assert->node_idx, res->owner, + namelen, name); + goto kill; + } + } else if (mle->type != DLM_MLE_MIGRATION) { + if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { + /* owner is just re-asserting */ + if (res->owner == assert->node_idx) { + mlog(0, "owner %u re-asserting on " + "lock %.*s\n", assert->node_idx, + namelen, name); + goto ok; + } + mlog(ML_ERROR, "got assert_master from " + "node %u, but %u is the owner! " + "(%.*s)\n", assert->node_idx, + res->owner, namelen, name); + goto kill; + } + if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { + mlog(ML_ERROR, "got assert from %u, but lock " + "with no owner should be " + "in-progress! (%.*s)\n", + assert->node_idx, + namelen, name); + goto kill; + } + } else /* mle->type == DLM_MLE_MIGRATION */ { + /* should only be getting an assert from new master */ + if (assert->node_idx != mle->new_master) { + mlog(ML_ERROR, "got assert from %u, but " + "new master is %u, and old master " + "was %u (%.*s)\n", + assert->node_idx, mle->new_master, + mle->master, namelen, name); + goto kill; + } + + } +ok: + spin_unlock(&res->spinlock); + } + spin_unlock(&dlm->spinlock); + + // mlog(0, "woo! got an assert_master from node %u!\n", + // assert->node_idx); + if (mle) { + int extra_ref = 0; + int nn = -1; + int rr, err = 0; + + spin_lock(&mle->spinlock); + if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) + extra_ref = 1; + else { + /* MASTER mle: if any bits set in the response map + * then the calling node needs to re-assert to clear + * up nodes that this node contacted */ + while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, + nn+1)) < O2NM_MAX_NODES) { + if (nn != dlm->node_num && nn != assert->node_idx) + master_request = 1; + } + } + mle->master = assert->node_idx; + atomic_set(&mle->woken, 1); + wake_up(&mle->wq); + spin_unlock(&mle->spinlock); + + if (res) { + int wake = 0; + spin_lock(&res->spinlock); + if (mle->type == DLM_MLE_MIGRATION) { + mlog(0, "finishing off migration of lockres %.*s, " + "from %u to %u\n", + res->lockname.len, res->lockname.name, + dlm->node_num, mle->new_master); + res->state &= ~DLM_LOCK_RES_MIGRATING; + wake = 1; + dlm_change_lockres_owner(dlm, res, mle->new_master); + BUG_ON(res->state & DLM_LOCK_RES_DIRTY); + } else { + dlm_change_lockres_owner(dlm, res, mle->master); + } + spin_unlock(&res->spinlock); + have_lockres_ref = 1; + if (wake) + wake_up(&res->wq); + } + + /* master is known, detach if not already detached. + * ensures that only one assert_master call will happen + * on this mle. */ + spin_lock(&dlm->spinlock); + spin_lock(&dlm->master_lock); + + rr = atomic_read(&mle->mle_refs.refcount); + if (mle->inuse > 0) { + if (extra_ref && rr < 3) + err = 1; + else if (!extra_ref && rr < 2) + err = 1; + } else { + if (extra_ref && rr < 2) + err = 1; + else if (!extra_ref && rr < 1) + err = 1; + } + if (err) { + mlog(ML_ERROR, "%s:%.*s: got assert master from %u " + "that will mess up this node, refs=%d, extra=%d, " + "inuse=%d\n", dlm->name, namelen, name, + assert->node_idx, rr, extra_ref, mle->inuse); + dlm_print_one_mle(mle); + } + list_del_init(&mle->list); + __dlm_mle_detach_hb_events(dlm, mle); + __dlm_put_mle(mle); + if (extra_ref) { + /* the assert master message now balances the extra + * ref given by the master / migration request message. + * if this is the last put, it will be removed + * from the list. */ + __dlm_put_mle(mle); + } + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + } else if (res) { + if (res->owner != assert->node_idx) { + mlog(0, "assert_master from %u, but current " + "owner is %u (%.*s), no mle\n", assert->node_idx, + res->owner, namelen, name); + } + } + +done: + ret = 0; + if (res) { + spin_lock(&res->spinlock); + res->state |= DLM_LOCK_RES_SETREF_INPROG; + spin_unlock(&res->spinlock); + *ret_data = (void *)res; + } + dlm_put(dlm); + if (master_request) { + mlog(0, "need to tell master to reassert\n"); + /* positive. negative would shoot down the node. */ + ret |= DLM_ASSERT_RESPONSE_REASSERT; + if (!have_lockres_ref) { + mlog(ML_ERROR, "strange, got assert from %u, MASTER " + "mle present here for %s:%.*s, but no lockres!\n", + assert->node_idx, dlm->name, namelen, name); + } + } + if (have_lockres_ref) { + /* let the master know we have a reference to the lockres */ + ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; + mlog(0, "%s:%.*s: got assert from %u, need a ref\n", + dlm->name, namelen, name, assert->node_idx); + } + return ret; + +kill: + /* kill the caller! */ + mlog(ML_ERROR, "Bad message received from another node. Dumping state " + "and killing the other node now! This node is OK and can continue.\n"); + __dlm_print_one_lock_resource(res); + spin_unlock(&res->spinlock); + spin_unlock(&dlm->spinlock); + *ret_data = (void *)res; + dlm_put(dlm); + return -EINVAL; +} + +void dlm_assert_master_post_handler(int status, void *data, void *ret_data) +{ + struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; + + if (ret_data) { + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_SETREF_INPROG; + spin_unlock(&res->spinlock); + wake_up(&res->wq); + dlm_lockres_put(res); + } + return; +} + +int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + int ignore_higher, u8 request_from, u32 flags) +{ + struct dlm_work_item *item; + item = kzalloc(sizeof(*item), GFP_NOFS); + if (!item) + return -ENOMEM; + + + /* queue up work for dlm_assert_master_worker */ + dlm_grab(dlm); /* get an extra ref for the work item */ + dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); + item->u.am.lockres = res; /* already have a ref */ + /* can optionally ignore node numbers higher than this node */ + item->u.am.ignore_higher = ignore_higher; + item->u.am.request_from = request_from; + item->u.am.flags = flags; + + if (ignore_higher) + mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, + res->lockname.name); + + spin_lock(&dlm->work_lock); + list_add_tail(&item->list, &dlm->work_list); + spin_unlock(&dlm->work_lock); + + queue_work(dlm->dlm_worker, &dlm->dispatched_work); + return 0; +} + +static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) +{ + struct dlm_ctxt *dlm = data; + int ret = 0; + struct dlm_lock_resource *res; + unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; + int ignore_higher; + int bit; + u8 request_from; + u32 flags; + + dlm = item->dlm; + res = item->u.am.lockres; + ignore_higher = item->u.am.ignore_higher; + request_from = item->u.am.request_from; + flags = item->u.am.flags; + + spin_lock(&dlm->spinlock); + memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); + spin_unlock(&dlm->spinlock); + + clear_bit(dlm->node_num, nodemap); + if (ignore_higher) { + /* if is this just to clear up mles for nodes below + * this node, do not send the message to the original + * caller or any node number higher than this */ + clear_bit(request_from, nodemap); + bit = dlm->node_num; + while (1) { + bit = find_next_bit(nodemap, O2NM_MAX_NODES, + bit+1); + if (bit >= O2NM_MAX_NODES) + break; + clear_bit(bit, nodemap); + } + } + + /* + * If we're migrating this lock to someone else, we are no + * longer allowed to assert out own mastery. OTOH, we need to + * prevent migration from starting while we're still asserting + * our dominance. The reserved ast delays migration. + */ + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_MIGRATING) { + mlog(0, "Someone asked us to assert mastery, but we're " + "in the middle of migration. Skipping assert, " + "the new master will handle that.\n"); + spin_unlock(&res->spinlock); + goto put; + } else + __dlm_lockres_reserve_ast(res); + spin_unlock(&res->spinlock); + + /* this call now finishes out the nodemap + * even if one or more nodes die */ + mlog(0, "worker about to master %.*s here, this=%u\n", + res->lockname.len, res->lockname.name, dlm->node_num); + ret = dlm_do_assert_master(dlm, res, nodemap, flags); + if (ret < 0) { + /* no need to restart, we are done */ + if (!dlm_is_host_down(ret)) + mlog_errno(ret); + } + + /* Ok, we've asserted ourselves. Let's let migration start. */ + dlm_lockres_release_ast(dlm, res); + +put: + dlm_lockres_put(res); + + mlog(0, "finished with dlm_assert_master_worker\n"); +} + +/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. + * We cannot wait for node recovery to complete to begin mastering this + * lockres because this lockres is used to kick off recovery! ;-) + * So, do a pre-check on all living nodes to see if any of those nodes + * think that $RECOVERY is currently mastered by a dead node. If so, + * we wait a short time to allow that node to get notified by its own + * heartbeat stack, then check again. All $RECOVERY lock resources + * mastered by dead nodes are purged when the hearbeat callback is + * fired, so we can know for sure that it is safe to continue once + * the node returns a live node or no node. */ +static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + struct dlm_node_iter iter; + int nodenum; + int ret = 0; + u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; + + spin_lock(&dlm->spinlock); + dlm_node_iter_init(dlm->domain_map, &iter); + spin_unlock(&dlm->spinlock); + + while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { + /* do not send to self */ + if (nodenum == dlm->node_num) + continue; + ret = dlm_do_master_requery(dlm, res, nodenum, &master); + if (ret < 0) { + mlog_errno(ret); + if (!dlm_is_host_down(ret)) + BUG(); + /* host is down, so answer for that node would be + * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ + ret = 0; + } + + if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { + /* check to see if this master is in the recovery map */ + spin_lock(&dlm->spinlock); + if (test_bit(master, dlm->recovery_map)) { + mlog(ML_NOTICE, "%s: node %u has not seen " + "node %u go down yet, and thinks the " + "dead node is mastering the recovery " + "lock. must wait.\n", dlm->name, + nodenum, master); + ret = -EAGAIN; + } + spin_unlock(&dlm->spinlock); + mlog(0, "%s: reco lock master is %u\n", dlm->name, + master); + break; + } + } + return ret; +} + +/* + * DLM_DEREF_LOCKRES_MSG + */ + +int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) +{ + struct dlm_deref_lockres deref; + int ret = 0, r; + const char *lockname; + unsigned int namelen; + + lockname = res->lockname.name; + namelen = res->lockname.len; + BUG_ON(namelen > O2NM_MAX_NAME_LEN); + + mlog(0, "%s:%.*s: sending deref to %d\n", + dlm->name, namelen, lockname, res->owner); + memset(&deref, 0, sizeof(deref)); + deref.node_idx = dlm->node_num; + deref.namelen = namelen; + memcpy(deref.name, lockname, namelen); + + ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, + &deref, sizeof(deref), res->owner, &r); + if (ret < 0) + mlog_errno(ret); + else if (r < 0) { + /* BAD. other node says I did not have a ref. */ + mlog(ML_ERROR,"while dropping ref on %s:%.*s " + "(master=%u) got %d.\n", dlm->name, namelen, + lockname, res->owner, r); + dlm_print_one_lock_resource(res); + BUG(); + } + return ret; +} + +int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; + struct dlm_lock_resource *res = NULL; + char *name; + unsigned int namelen; + int ret = -EINVAL; + u8 node; + unsigned int hash; + struct dlm_work_item *item; + int cleared = 0; + int dispatch = 0; + + if (!dlm_grab(dlm)) + return 0; + + name = deref->name; + namelen = deref->namelen; + node = deref->node_idx; + + if (namelen > DLM_LOCKID_NAME_MAX) { + mlog(ML_ERROR, "Invalid name length!"); + goto done; + } + if (deref->node_idx >= O2NM_MAX_NODES) { + mlog(ML_ERROR, "Invalid node number: %u\n", node); + goto done; + } + + hash = dlm_lockid_hash(name, namelen); + + spin_lock(&dlm->spinlock); + res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); + if (!res) { + spin_unlock(&dlm->spinlock); + mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", + dlm->name, namelen, name); + goto done; + } + spin_unlock(&dlm->spinlock); + + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_SETREF_INPROG) + dispatch = 1; + else { + BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); + if (test_bit(node, res->refmap)) { + dlm_lockres_clear_refmap_bit(node, res); + cleared = 1; + } + } + spin_unlock(&res->spinlock); + + if (!dispatch) { + if (cleared) + dlm_lockres_calc_usage(dlm, res); + else { + mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " + "but it is already dropped!\n", dlm->name, + res->lockname.len, res->lockname.name, node); + dlm_print_one_lock_resource(res); + } + ret = 0; + goto done; + } + + item = kzalloc(sizeof(*item), GFP_NOFS); + if (!item) { + ret = -ENOMEM; + mlog_errno(ret); + goto done; + } + + dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); + item->u.dl.deref_res = res; + item->u.dl.deref_node = node; + + spin_lock(&dlm->work_lock); + list_add_tail(&item->list, &dlm->work_list); + spin_unlock(&dlm->work_lock); + + queue_work(dlm->dlm_worker, &dlm->dispatched_work); + return 0; + +done: + if (res) + dlm_lockres_put(res); + dlm_put(dlm); + + return ret; +} + +static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) +{ + struct dlm_ctxt *dlm; + struct dlm_lock_resource *res; + u8 node; + u8 cleared = 0; + + dlm = item->dlm; + res = item->u.dl.deref_res; + node = item->u.dl.deref_node; + + spin_lock(&res->spinlock); + BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); + if (test_bit(node, res->refmap)) { + __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); + dlm_lockres_clear_refmap_bit(node, res); + cleared = 1; + } + spin_unlock(&res->spinlock); + + if (cleared) { + mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", + dlm->name, res->lockname.len, res->lockname.name, node); + dlm_lockres_calc_usage(dlm, res); + } else { + mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " + "but it is already dropped!\n", dlm->name, + res->lockname.len, res->lockname.name, node); + dlm_print_one_lock_resource(res); + } + + dlm_lockres_put(res); +} + +/* Checks whether the lockres can be migrated. Returns 0 if yes, < 0 + * if not. If 0, numlocks is set to the number of locks in the lockres. + */ +static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + int *numlocks) +{ + int ret; + int i; + int count = 0; + struct list_head *queue; + struct dlm_lock *lock; + + assert_spin_locked(&res->spinlock); + + ret = -EINVAL; + if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { + mlog(0, "cannot migrate lockres with unknown owner!\n"); + goto leave; + } + + if (res->owner != dlm->node_num) { + mlog(0, "cannot migrate lockres this node doesn't own!\n"); + goto leave; + } + + ret = 0; + queue = &res->granted; + for (i = 0; i < 3; i++) { + list_for_each_entry(lock, queue, list) { + ++count; + if (lock->ml.node == dlm->node_num) { + mlog(0, "found a lock owned by this node still " + "on the %s queue! will not migrate this " + "lockres\n", (i == 0 ? "granted" : + (i == 1 ? "converting" : + "blocked"))); + ret = -ENOTEMPTY; + goto leave; + } + } + queue++; + } + + *numlocks = count; + mlog(0, "migrateable lockres having %d locks\n", *numlocks); + +leave: + return ret; +} + +/* + * DLM_MIGRATE_LOCKRES + */ + + +static int dlm_migrate_lockres(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 target) +{ + struct dlm_master_list_entry *mle = NULL; + struct dlm_master_list_entry *oldmle = NULL; + struct dlm_migratable_lockres *mres = NULL; + int ret = 0; + const char *name; + unsigned int namelen; + int mle_added = 0; + int numlocks; + int wake = 0; + + if (!dlm_grab(dlm)) + return -EINVAL; + + name = res->lockname.name; + namelen = res->lockname.len; + + mlog(0, "migrating %.*s to %u\n", namelen, name, target); + + /* + * ensure this lockres is a proper candidate for migration + */ + spin_lock(&res->spinlock); + ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); + if (ret < 0) { + spin_unlock(&res->spinlock); + goto leave; + } + spin_unlock(&res->spinlock); + + /* no work to do */ + if (numlocks == 0) { + mlog(0, "no locks were found on this lockres! done!\n"); + goto leave; + } + + /* + * preallocate up front + * if this fails, abort + */ + + ret = -ENOMEM; + mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); + if (!mres) { + mlog_errno(ret); + goto leave; + } + + mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, + GFP_NOFS); + if (!mle) { + mlog_errno(ret); + goto leave; + } + ret = 0; + + /* + * find a node to migrate the lockres to + */ + + mlog(0, "picking a migration node\n"); + spin_lock(&dlm->spinlock); + /* pick a new node */ + if (!test_bit(target, dlm->domain_map) || + target >= O2NM_MAX_NODES) { + target = dlm_pick_migration_target(dlm, res); + } + mlog(0, "node %u chosen for migration\n", target); + + if (target >= O2NM_MAX_NODES || + !test_bit(target, dlm->domain_map)) { + /* target chosen is not alive */ + ret = -EINVAL; + } + + if (ret) { + spin_unlock(&dlm->spinlock); + goto fail; + } + + mlog(0, "continuing with target = %u\n", target); + + /* + * clear any existing master requests and + * add the migration mle to the list + */ + spin_lock(&dlm->master_lock); + ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, + namelen, target, dlm->node_num); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + + if (ret == -EEXIST) { + mlog(0, "another process is already migrating it\n"); + goto fail; + } + mle_added = 1; + + /* + * set the MIGRATING flag and flush asts + * if we fail after this we need to re-dirty the lockres + */ + if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { + mlog(ML_ERROR, "tried to migrate %.*s to %u, but " + "the target went down.\n", res->lockname.len, + res->lockname.name, target); + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_MIGRATING; + wake = 1; + spin_unlock(&res->spinlock); + ret = -EINVAL; + } + +fail: + if (oldmle) { + /* master is known, detach if not already detached */ + dlm_mle_detach_hb_events(dlm, oldmle); + dlm_put_mle(oldmle); + } + + if (ret < 0) { + if (mle_added) { + dlm_mle_detach_hb_events(dlm, mle); + dlm_put_mle(mle); + } else if (mle) { + kmem_cache_free(dlm_mle_cache, mle); + } + goto leave; + } + + /* + * at this point, we have a migration target, an mle + * in the master list, and the MIGRATING flag set on + * the lockres + */ + + /* now that remote nodes are spinning on the MIGRATING flag, + * ensure that all assert_master work is flushed. */ + flush_workqueue(dlm->dlm_worker); + + /* get an extra reference on the mle. + * otherwise the assert_master from the new + * master will destroy this. + * also, make sure that all callers of dlm_get_mle + * take both dlm->spinlock and dlm->master_lock */ + spin_lock(&dlm->spinlock); + spin_lock(&dlm->master_lock); + dlm_get_mle_inuse(mle); + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + + /* notify new node and send all lock state */ + /* call send_one_lockres with migration flag. + * this serves as notice to the target node that a + * migration is starting. */ + ret = dlm_send_one_lockres(dlm, res, mres, target, + DLM_MRES_MIGRATION); + + if (ret < 0) { + mlog(0, "migration to node %u failed with %d\n", + target, ret); + /* migration failed, detach and clean up mle */ + dlm_mle_detach_hb_events(dlm, mle); + dlm_put_mle(mle); + dlm_put_mle_inuse(mle); + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_MIGRATING; + wake = 1; + spin_unlock(&res->spinlock); + goto leave; + } + + /* at this point, the target sends a message to all nodes, + * (using dlm_do_migrate_request). this node is skipped since + * we had to put an mle in the list to begin the process. this + * node now waits for target to do an assert master. this node + * will be the last one notified, ensuring that the migration + * is complete everywhere. if the target dies while this is + * going on, some nodes could potentially see the target as the + * master, so it is important that my recovery finds the migration + * mle and sets the master to UNKNONWN. */ + + + /* wait for new node to assert master */ + while (1) { + ret = wait_event_interruptible_timeout(mle->wq, + (atomic_read(&mle->woken) == 1), + msecs_to_jiffies(5000)); + + if (ret >= 0) { + if (atomic_read(&mle->woken) == 1 || + res->owner == target) + break; + + mlog(0, "%s:%.*s: timed out during migration\n", + dlm->name, res->lockname.len, res->lockname.name); + /* avoid hang during shutdown when migrating lockres + * to a node which also goes down */ + if (dlm_is_node_dead(dlm, target)) { + mlog(0, "%s:%.*s: expected migration " + "target %u is no longer up, restarting\n", + dlm->name, res->lockname.len, + res->lockname.name, target); + ret = -EINVAL; + /* migration failed, detach and clean up mle */ + dlm_mle_detach_hb_events(dlm, mle); + dlm_put_mle(mle); + dlm_put_mle_inuse(mle); + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_MIGRATING; + wake = 1; + spin_unlock(&res->spinlock); + goto leave; + } + } else + mlog(0, "%s:%.*s: caught signal during migration\n", + dlm->name, res->lockname.len, res->lockname.name); + } + + /* all done, set the owner, clear the flag */ + spin_lock(&res->spinlock); + dlm_set_lockres_owner(dlm, res, target); + res->state &= ~DLM_LOCK_RES_MIGRATING; + dlm_remove_nonlocal_locks(dlm, res); + spin_unlock(&res->spinlock); + wake_up(&res->wq); + + /* master is known, detach if not already detached */ + dlm_mle_detach_hb_events(dlm, mle); + dlm_put_mle_inuse(mle); + ret = 0; + + dlm_lockres_calc_usage(dlm, res); + +leave: + /* re-dirty the lockres if we failed */ + if (ret < 0) + dlm_kick_thread(dlm, res); + + /* wake up waiters if the MIGRATING flag got set + * but migration failed */ + if (wake) + wake_up(&res->wq); + + /* TODO: cleanup */ + if (mres) + free_page((unsigned long)mres); + + dlm_put(dlm); + + mlog(0, "returning %d\n", ret); + return ret; +} + +#define DLM_MIGRATION_RETRY_MS 100 + +/* Should be called only after beginning the domain leave process. + * There should not be any remaining locks on nonlocal lock resources, + * and there should be no local locks left on locally mastered resources. + * + * Called with the dlm spinlock held, may drop it to do migration, but + * will re-acquire before exit. + * + * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */ +int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) +{ + int ret; + int lock_dropped = 0; + int numlocks; + + spin_lock(&res->spinlock); + if (res->owner != dlm->node_num) { + if (!__dlm_lockres_unused(res)) { + mlog(ML_ERROR, "%s:%.*s: this node is not master, " + "trying to free this but locks remain\n", + dlm->name, res->lockname.len, res->lockname.name); + } + spin_unlock(&res->spinlock); + goto leave; + } + + /* No need to migrate a lockres having no locks */ + ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); + if (ret >= 0 && numlocks == 0) { + spin_unlock(&res->spinlock); + goto leave; + } + spin_unlock(&res->spinlock); + + /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ + spin_unlock(&dlm->spinlock); + lock_dropped = 1; + while (1) { + ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES); + if (ret >= 0) + break; + if (ret == -ENOTEMPTY) { + mlog(ML_ERROR, "lockres %.*s still has local locks!\n", + res->lockname.len, res->lockname.name); + BUG(); + } + + mlog(0, "lockres %.*s: migrate failed, " + "retrying\n", res->lockname.len, + res->lockname.name); + msleep(DLM_MIGRATION_RETRY_MS); + } + spin_lock(&dlm->spinlock); +leave: + return lock_dropped; +} + +int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) +{ + int ret; + spin_lock(&dlm->ast_lock); + spin_lock(&lock->spinlock); + ret = (list_empty(&lock->bast_list) && !lock->bast_pending); + spin_unlock(&lock->spinlock); + spin_unlock(&dlm->ast_lock); + return ret; +} + +static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 mig_target) +{ + int can_proceed; + spin_lock(&res->spinlock); + can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); + spin_unlock(&res->spinlock); + + /* target has died, so make the caller break out of the + * wait_event, but caller must recheck the domain_map */ + spin_lock(&dlm->spinlock); + if (!test_bit(mig_target, dlm->domain_map)) + can_proceed = 1; + spin_unlock(&dlm->spinlock); + return can_proceed; +} + +static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + int ret; + spin_lock(&res->spinlock); + ret = !!(res->state & DLM_LOCK_RES_DIRTY); + spin_unlock(&res->spinlock); + return ret; +} + + +static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 target) +{ + int ret = 0; + + mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", + res->lockname.len, res->lockname.name, dlm->node_num, + target); + /* need to set MIGRATING flag on lockres. this is done by + * ensuring that all asts have been flushed for this lockres. */ + spin_lock(&res->spinlock); + BUG_ON(res->migration_pending); + res->migration_pending = 1; + /* strategy is to reserve an extra ast then release + * it below, letting the release do all of the work */ + __dlm_lockres_reserve_ast(res); + spin_unlock(&res->spinlock); + + /* now flush all the pending asts */ + dlm_kick_thread(dlm, res); + /* before waiting on DIRTY, block processes which may + * try to dirty the lockres before MIGRATING is set */ + spin_lock(&res->spinlock); + BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); + res->state |= DLM_LOCK_RES_BLOCK_DIRTY; + spin_unlock(&res->spinlock); + /* now wait on any pending asts and the DIRTY state */ + wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); + dlm_lockres_release_ast(dlm, res); + + mlog(0, "about to wait on migration_wq, dirty=%s\n", + res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); + /* if the extra ref we just put was the final one, this + * will pass thru immediately. otherwise, we need to wait + * for the last ast to finish. */ +again: + ret = wait_event_interruptible_timeout(dlm->migration_wq, + dlm_migration_can_proceed(dlm, res, target), + msecs_to_jiffies(1000)); + if (ret < 0) { + mlog(0, "woken again: migrating? %s, dead? %s\n", + res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", + test_bit(target, dlm->domain_map) ? "no":"yes"); + } else { + mlog(0, "all is well: migrating? %s, dead? %s\n", + res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", + test_bit(target, dlm->domain_map) ? "no":"yes"); + } + if (!dlm_migration_can_proceed(dlm, res, target)) { + mlog(0, "trying again...\n"); + goto again; + } + /* now that we are sure the MIGRATING state is there, drop + * the unneded state which blocked threads trying to DIRTY */ + spin_lock(&res->spinlock); + BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); + BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); + res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; + spin_unlock(&res->spinlock); + + /* did the target go down or die? */ + spin_lock(&dlm->spinlock); + if (!test_bit(target, dlm->domain_map)) { + mlog(ML_ERROR, "aha. migration target %u just went down\n", + target); + ret = -EHOSTDOWN; + } + spin_unlock(&dlm->spinlock); + + /* + * at this point: + * + * o the DLM_LOCK_RES_MIGRATING flag is set + * o there are no pending asts on this lockres + * o all processes trying to reserve an ast on this + * lockres must wait for the MIGRATING flag to clear + */ + return ret; +} + +/* last step in the migration process. + * original master calls this to free all of the dlm_lock + * structures that used to be for other nodes. */ +static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + struct list_head *queue = &res->granted; + int i, bit; + struct dlm_lock *lock, *next; + + assert_spin_locked(&res->spinlock); + + BUG_ON(res->owner == dlm->node_num); + + for (i=0; i<3; i++) { + list_for_each_entry_safe(lock, next, queue, list) { + if (lock->ml.node != dlm->node_num) { + mlog(0, "putting lock for node %u\n", + lock->ml.node); + /* be extra careful */ + BUG_ON(!list_empty(&lock->ast_list)); + BUG_ON(!list_empty(&lock->bast_list)); + BUG_ON(lock->ast_pending); + BUG_ON(lock->bast_pending); + dlm_lockres_clear_refmap_bit(lock->ml.node, res); + list_del_init(&lock->list); + dlm_lock_put(lock); + /* In a normal unlock, we would have added a + * DLM_UNLOCK_FREE_LOCK action. Force it. */ + dlm_lock_put(lock); + } + } + queue++; + } + bit = 0; + while (1) { + bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); + if (bit >= O2NM_MAX_NODES) + break; + /* do not clear the local node reference, if there is a + * process holding this, let it drop the ref itself */ + if (bit != dlm->node_num) { + mlog(0, "%s:%.*s: node %u had a ref to this " + "migrating lockres, clearing\n", dlm->name, + res->lockname.len, res->lockname.name, bit); + dlm_lockres_clear_refmap_bit(bit, res); + } + bit++; + } +} + +/* for now this is not too intelligent. we will + * need stats to make this do the right thing. + * this just finds the first lock on one of the + * queues and uses that node as the target. */ +static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + int i; + struct list_head *queue = &res->granted; + struct dlm_lock *lock; + int nodenum; + + assert_spin_locked(&dlm->spinlock); + + spin_lock(&res->spinlock); + for (i=0; i<3; i++) { + list_for_each_entry(lock, queue, list) { + /* up to the caller to make sure this node + * is alive */ + if (lock->ml.node != dlm->node_num) { + spin_unlock(&res->spinlock); + return lock->ml.node; + } + } + queue++; + } + spin_unlock(&res->spinlock); + mlog(0, "have not found a suitable target yet! checking domain map\n"); + + /* ok now we're getting desperate. pick anyone alive. */ + nodenum = -1; + while (1) { + nodenum = find_next_bit(dlm->domain_map, + O2NM_MAX_NODES, nodenum+1); + mlog(0, "found %d in domain map\n", nodenum); + if (nodenum >= O2NM_MAX_NODES) + break; + if (nodenum != dlm->node_num) { + mlog(0, "picking %d\n", nodenum); + return nodenum; + } + } + + mlog(0, "giving up. no master to migrate to\n"); + return DLM_LOCK_RES_OWNER_UNKNOWN; +} + + + +/* this is called by the new master once all lockres + * data has been received */ +static int dlm_do_migrate_request(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 master, u8 new_master, + struct dlm_node_iter *iter) +{ + struct dlm_migrate_request migrate; + int ret, status = 0; + int nodenum; + + memset(&migrate, 0, sizeof(migrate)); + migrate.namelen = res->lockname.len; + memcpy(migrate.name, res->lockname.name, migrate.namelen); + migrate.new_master = new_master; + migrate.master = master; + + ret = 0; + + /* send message to all nodes, except the master and myself */ + while ((nodenum = dlm_node_iter_next(iter)) >= 0) { + if (nodenum == master || + nodenum == new_master) + continue; + + ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, + &migrate, sizeof(migrate), nodenum, + &status); + if (ret < 0) + mlog_errno(ret); + else if (status < 0) { + mlog(0, "migrate request (node %u) returned %d!\n", + nodenum, status); + ret = status; + } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { + /* during the migration request we short-circuited + * the mastery of the lockres. make sure we have + * a mastery ref for nodenum */ + mlog(0, "%s:%.*s: need ref for node %u\n", + dlm->name, res->lockname.len, res->lockname.name, + nodenum); + spin_lock(&res->spinlock); + dlm_lockres_set_refmap_bit(nodenum, res); + spin_unlock(&res->spinlock); + } + } + + if (ret < 0) + mlog_errno(ret); + + mlog(0, "returning ret=%d\n", ret); + return ret; +} + + +/* if there is an existing mle for this lockres, we now know who the master is. + * (the one who sent us *this* message) we can clear it up right away. + * since the process that put the mle on the list still has a reference to it, + * we can unhash it now, set the master and wake the process. as a result, + * we will have no mle in the list to start with. now we can add an mle for + * the migration and this should be the only one found for those scanning the + * list. */ +int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_lock_resource *res = NULL; + struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; + struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; + const char *name; + unsigned int namelen, hash; + int ret = 0; + + if (!dlm_grab(dlm)) + return -EINVAL; + + name = migrate->name; + namelen = migrate->namelen; + hash = dlm_lockid_hash(name, namelen); + + /* preallocate.. if this fails, abort */ + mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, + GFP_NOFS); + + if (!mle) { + ret = -ENOMEM; + goto leave; + } + + /* check for pre-existing lock */ + spin_lock(&dlm->spinlock); + res = __dlm_lookup_lockres(dlm, name, namelen, hash); + spin_lock(&dlm->master_lock); + + if (res) { + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_RECOVERING) { + /* if all is working ok, this can only mean that we got + * a migrate request from a node that we now see as + * dead. what can we do here? drop it to the floor? */ + spin_unlock(&res->spinlock); + mlog(ML_ERROR, "Got a migrate request, but the " + "lockres is marked as recovering!"); + kmem_cache_free(dlm_mle_cache, mle); + ret = -EINVAL; /* need a better solution */ + goto unlock; + } + res->state |= DLM_LOCK_RES_MIGRATING; + spin_unlock(&res->spinlock); + } + + /* ignore status. only nonzero status would BUG. */ + ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, + name, namelen, + migrate->new_master, + migrate->master); + +unlock: + spin_unlock(&dlm->master_lock); + spin_unlock(&dlm->spinlock); + + if (oldmle) { + /* master is known, detach if not already detached */ + dlm_mle_detach_hb_events(dlm, oldmle); + dlm_put_mle(oldmle); + } + + if (res) + dlm_lockres_put(res); +leave: + dlm_put(dlm); + return ret; +} + +/* must be holding dlm->spinlock and dlm->master_lock + * when adding a migration mle, we can clear any other mles + * in the master list because we know with certainty that + * the master is "master". so we remove any old mle from + * the list after setting it's master field, and then add + * the new migration mle. this way we can hold with the rule + * of having only one mle for a given lock name at all times. */ +static int dlm_add_migration_mle(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_master_list_entry *mle, + struct dlm_master_list_entry **oldmle, + const char *name, unsigned int namelen, + u8 new_master, u8 master) +{ + int found; + int ret = 0; + + *oldmle = NULL; + + mlog_entry_void(); + + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&dlm->master_lock); + + /* caller is responsible for any ref taken here on oldmle */ + found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); + if (found) { + struct dlm_master_list_entry *tmp = *oldmle; + spin_lock(&tmp->spinlock); + if (tmp->type == DLM_MLE_MIGRATION) { + if (master == dlm->node_num) { + /* ah another process raced me to it */ + mlog(0, "tried to migrate %.*s, but some " + "process beat me to it\n", + namelen, name); + ret = -EEXIST; + } else { + /* bad. 2 NODES are trying to migrate! */ + mlog(ML_ERROR, "migration error mle: " + "master=%u new_master=%u // request: " + "master=%u new_master=%u // " + "lockres=%.*s\n", + tmp->master, tmp->new_master, + master, new_master, + namelen, name); + BUG(); + } + } else { + /* this is essentially what assert_master does */ + tmp->master = master; + atomic_set(&tmp->woken, 1); + wake_up(&tmp->wq); + /* remove it from the list so that only one + * mle will be found */ + list_del_init(&tmp->list); + /* this was obviously WRONG. mle is uninited here. should be tmp. */ + __dlm_mle_detach_hb_events(dlm, tmp); + ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; + mlog(0, "%s:%.*s: master=%u, newmaster=%u, " + "telling master to get ref for cleared out mle " + "during migration\n", dlm->name, namelen, name, + master, new_master); + } + spin_unlock(&tmp->spinlock); + } + + /* now add a migration mle to the tail of the list */ + dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); + mle->new_master = new_master; + /* the new master will be sending an assert master for this. + * at that point we will get the refmap reference */ + mle->master = master; + /* do this for consistency with other mle types */ + set_bit(new_master, mle->maybe_map); + list_add(&mle->list, &dlm->master_list); + + return ret; +} + + +void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) +{ + struct dlm_master_list_entry *mle, *next; + struct dlm_lock_resource *res; + unsigned int hash; + + mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); +top: + assert_spin_locked(&dlm->spinlock); + + /* clean the master list */ + spin_lock(&dlm->master_lock); + list_for_each_entry_safe(mle, next, &dlm->master_list, list) { + BUG_ON(mle->type != DLM_MLE_BLOCK && + mle->type != DLM_MLE_MASTER && + mle->type != DLM_MLE_MIGRATION); + + /* MASTER mles are initiated locally. the waiting + * process will notice the node map change + * shortly. let that happen as normal. */ + if (mle->type == DLM_MLE_MASTER) + continue; + + + /* BLOCK mles are initiated by other nodes. + * need to clean up if the dead node would have + * been the master. */ + if (mle->type == DLM_MLE_BLOCK) { + int bit; + + spin_lock(&mle->spinlock); + bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); + if (bit != dead_node) { + mlog(0, "mle found, but dead node %u would " + "not have been master\n", dead_node); + spin_unlock(&mle->spinlock); + } else { + /* must drop the refcount by one since the + * assert_master will never arrive. this + * may result in the mle being unlinked and + * freed, but there may still be a process + * waiting in the dlmlock path which is fine. */ + mlog(0, "node %u was expected master\n", + dead_node); + atomic_set(&mle->woken, 1); + spin_unlock(&mle->spinlock); + wake_up(&mle->wq); + /* do not need events any longer, so detach + * from heartbeat */ + __dlm_mle_detach_hb_events(dlm, mle); + __dlm_put_mle(mle); + } + continue; + } + + /* everything else is a MIGRATION mle */ + + /* the rule for MIGRATION mles is that the master + * becomes UNKNOWN if *either* the original or + * the new master dies. all UNKNOWN lockreses + * are sent to whichever node becomes the recovery + * master. the new master is responsible for + * determining if there is still a master for + * this lockres, or if he needs to take over + * mastery. either way, this node should expect + * another message to resolve this. */ + if (mle->master != dead_node && + mle->new_master != dead_node) + continue; + + /* if we have reached this point, this mle needs to + * be removed from the list and freed. */ + + /* remove from the list early. NOTE: unlinking + * list_head while in list_for_each_safe */ + __dlm_mle_detach_hb_events(dlm, mle); + spin_lock(&mle->spinlock); + list_del_init(&mle->list); + atomic_set(&mle->woken, 1); + spin_unlock(&mle->spinlock); + wake_up(&mle->wq); + + mlog(0, "%s: node %u died during migration from " + "%u to %u!\n", dlm->name, dead_node, + mle->master, mle->new_master); + /* if there is a lockres associated with this + * mle, find it and set its owner to UNKNOWN */ + hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len); + res = __dlm_lookup_lockres(dlm, mle->u.name.name, + mle->u.name.len, hash); + if (res) { + /* unfortunately if we hit this rare case, our + * lock ordering is messed. we need to drop + * the master lock so that we can take the + * lockres lock, meaning that we will have to + * restart from the head of list. */ + spin_unlock(&dlm->master_lock); + + /* move lockres onto recovery list */ + spin_lock(&res->spinlock); + dlm_set_lockres_owner(dlm, res, + DLM_LOCK_RES_OWNER_UNKNOWN); + dlm_move_lockres_to_recovery_list(dlm, res); + spin_unlock(&res->spinlock); + dlm_lockres_put(res); + + /* about to get rid of mle, detach from heartbeat */ + __dlm_mle_detach_hb_events(dlm, mle); + + /* dump the mle */ + spin_lock(&dlm->master_lock); + __dlm_put_mle(mle); + spin_unlock(&dlm->master_lock); + + /* restart */ + goto top; + } + + /* this may be the last reference */ + __dlm_put_mle(mle); + } + spin_unlock(&dlm->master_lock); +} + + +int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + u8 old_master) +{ + struct dlm_node_iter iter; + int ret = 0; + + spin_lock(&dlm->spinlock); + dlm_node_iter_init(dlm->domain_map, &iter); + clear_bit(old_master, iter.node_map); + clear_bit(dlm->node_num, iter.node_map); + spin_unlock(&dlm->spinlock); + + /* ownership of the lockres is changing. account for the + * mastery reference here since old_master will briefly have + * a reference after the migration completes */ + spin_lock(&res->spinlock); + dlm_lockres_set_refmap_bit(old_master, res); + spin_unlock(&res->spinlock); + + mlog(0, "now time to do a migrate request to other nodes\n"); + ret = dlm_do_migrate_request(dlm, res, old_master, + dlm->node_num, &iter); + if (ret < 0) { + mlog_errno(ret); + goto leave; + } + + mlog(0, "doing assert master of %.*s to all except the original node\n", + res->lockname.len, res->lockname.name); + /* this call now finishes out the nodemap + * even if one or more nodes die */ + ret = dlm_do_assert_master(dlm, res, iter.node_map, + DLM_ASSERT_MASTER_FINISH_MIGRATION); + if (ret < 0) { + /* no longer need to retry. all living nodes contacted. */ + mlog_errno(ret); + ret = 0; + } + + memset(iter.node_map, 0, sizeof(iter.node_map)); + set_bit(old_master, iter.node_map); + mlog(0, "doing assert master of %.*s back to %u\n", + res->lockname.len, res->lockname.name, old_master); + ret = dlm_do_assert_master(dlm, res, iter.node_map, + DLM_ASSERT_MASTER_FINISH_MIGRATION); + if (ret < 0) { + mlog(0, "assert master to original master failed " + "with %d.\n", ret); + /* the only nonzero status here would be because of + * a dead original node. we're done. */ + ret = 0; + } + + /* all done, set the owner, clear the flag */ + spin_lock(&res->spinlock); + dlm_set_lockres_owner(dlm, res, dlm->node_num); + res->state &= ~DLM_LOCK_RES_MIGRATING; + spin_unlock(&res->spinlock); + /* re-dirty it on the new master */ + dlm_kick_thread(dlm, res); + wake_up(&res->wq); +leave: + return ret; +} + +/* + * LOCKRES AST REFCOUNT + * this is integral to migration + */ + +/* for future intent to call an ast, reserve one ahead of time. + * this should be called only after waiting on the lockres + * with dlm_wait_on_lockres, and while still holding the + * spinlock after the call. */ +void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) +{ + assert_spin_locked(&res->spinlock); + if (res->state & DLM_LOCK_RES_MIGRATING) { + __dlm_print_one_lock_resource(res); + } + BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); + + atomic_inc(&res->asts_reserved); +} + +/* + * used to drop the reserved ast, either because it went unused, + * or because the ast/bast was actually called. + * + * also, if there is a pending migration on this lockres, + * and this was the last pending ast on the lockres, + * atomically set the MIGRATING flag before we drop the lock. + * this is how we ensure that migration can proceed with no + * asts in progress. note that it is ok if the state of the + * queues is such that a lock should be granted in the future + * or that a bast should be fired, because the new master will + * shuffle the lists on this lockres as soon as it is migrated. + */ +void dlm_lockres_release_ast(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) + return; + + if (!res->migration_pending) { + spin_unlock(&res->spinlock); + return; + } + + BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); + res->migration_pending = 0; + res->state |= DLM_LOCK_RES_MIGRATING; + spin_unlock(&res->spinlock); + wake_up(&res->wq); + wake_up(&dlm->migration_wq); +} diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c new file mode 100644 index 0000000..bcb9260 --- /dev/null +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -0,0 +1,2825 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmrecovery.c + * + * recovery stuff + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/sysctl.h> +#include <linux/random.h> +#include <linux/blkdev.h> +#include <linux/socket.h> +#include <linux/inet.h> +#include <linux/timer.h> +#include <linux/kthread.h> +#include <linux/delay.h> + + +#include "cluster/heartbeat.h" +#include "cluster/nodemanager.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" +#include "dlmcommon.h" +#include "dlmdomain.h" + +#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) +#include "cluster/masklog.h" + +static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); + +static int dlm_recovery_thread(void *data); +void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); +int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); +void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); +static int dlm_do_recovery(struct dlm_ctxt *dlm); + +static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); +static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); +static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); +static int dlm_request_all_locks(struct dlm_ctxt *dlm, + u8 request_from, u8 dead_node); +static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); + +static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); +static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, + const char *lockname, int namelen, + int total_locks, u64 cookie, + u8 flags, u8 master); +static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, + struct dlm_migratable_lockres *mres, + u8 send_to, + struct dlm_lock_resource *res, + int total_locks); +static int dlm_process_recovery_data(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_migratable_lockres *mres); +static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); +static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, + u8 dead_node, u8 send_to); +static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); +static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, + struct list_head *list, u8 dead_node); +static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, + u8 dead_node, u8 new_master); +static void dlm_reco_ast(void *astdata); +static void dlm_reco_bast(void *astdata, int blocked_type); +static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); +static void dlm_request_all_locks_worker(struct dlm_work_item *item, + void *data); +static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); +static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 *real_master); + +static u64 dlm_get_next_mig_cookie(void); + +static DEFINE_SPINLOCK(dlm_reco_state_lock); +static DEFINE_SPINLOCK(dlm_mig_cookie_lock); +static u64 dlm_mig_cookie = 1; + +static u64 dlm_get_next_mig_cookie(void) +{ + u64 c; + spin_lock(&dlm_mig_cookie_lock); + c = dlm_mig_cookie; + if (dlm_mig_cookie == (~0ULL)) + dlm_mig_cookie = 1; + else + dlm_mig_cookie++; + spin_unlock(&dlm_mig_cookie_lock); + return c; +} + +static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, + u8 dead_node) +{ + assert_spin_locked(&dlm->spinlock); + if (dlm->reco.dead_node != dead_node) + mlog(0, "%s: changing dead_node from %u to %u\n", + dlm->name, dlm->reco.dead_node, dead_node); + dlm->reco.dead_node = dead_node; +} + +static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, + u8 master) +{ + assert_spin_locked(&dlm->spinlock); + mlog(0, "%s: changing new_master from %u to %u\n", + dlm->name, dlm->reco.new_master, master); + dlm->reco.new_master = master; +} + +static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) +{ + assert_spin_locked(&dlm->spinlock); + clear_bit(dlm->reco.dead_node, dlm->recovery_map); + dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); + dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); +} + +static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) +{ + spin_lock(&dlm->spinlock); + __dlm_reset_recovery(dlm); + spin_unlock(&dlm->spinlock); +} + +/* Worker function used during recovery. */ +void dlm_dispatch_work(struct work_struct *work) +{ + struct dlm_ctxt *dlm = + container_of(work, struct dlm_ctxt, dispatched_work); + LIST_HEAD(tmp_list); + struct dlm_work_item *item, *next; + dlm_workfunc_t *workfunc; + int tot=0; + + spin_lock(&dlm->work_lock); + list_splice_init(&dlm->work_list, &tmp_list); + spin_unlock(&dlm->work_lock); + + list_for_each_entry(item, &tmp_list, list) { + tot++; + } + mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); + + list_for_each_entry_safe(item, next, &tmp_list, list) { + workfunc = item->func; + list_del_init(&item->list); + + /* already have ref on dlm to avoid having + * it disappear. just double-check. */ + BUG_ON(item->dlm != dlm); + + /* this is allowed to sleep and + * call network stuff */ + workfunc(item, item->data); + + dlm_put(dlm); + kfree(item); + } +} + +/* + * RECOVERY THREAD + */ + +void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) +{ + /* wake the recovery thread + * this will wake the reco thread in one of three places + * 1) sleeping with no recovery happening + * 2) sleeping with recovery mastered elsewhere + * 3) recovery mastered here, waiting on reco data */ + + wake_up(&dlm->dlm_reco_thread_wq); +} + +/* Launch the recovery thread */ +int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) +{ + mlog(0, "starting dlm recovery thread...\n"); + + dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, + "dlm_reco_thread"); + if (IS_ERR(dlm->dlm_reco_thread_task)) { + mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); + dlm->dlm_reco_thread_task = NULL; + return -EINVAL; + } + + return 0; +} + +void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) +{ + if (dlm->dlm_reco_thread_task) { + mlog(0, "waiting for dlm recovery thread to exit\n"); + kthread_stop(dlm->dlm_reco_thread_task); + dlm->dlm_reco_thread_task = NULL; + } +} + + + +/* + * this is lame, but here's how recovery works... + * 1) all recovery threads cluster wide will work on recovering + * ONE node at a time + * 2) negotiate who will take over all the locks for the dead node. + * thats right... ALL the locks. + * 3) once a new master is chosen, everyone scans all locks + * and moves aside those mastered by the dead guy + * 4) each of these locks should be locked until recovery is done + * 5) the new master collects up all of secondary lock queue info + * one lock at a time, forcing each node to communicate back + * before continuing + * 6) each secondary lock queue responds with the full known lock info + * 7) once the new master has run all its locks, it sends a ALLDONE! + * message to everyone + * 8) upon receiving this message, the secondary queue node unlocks + * and responds to the ALLDONE + * 9) once the new master gets responses from everyone, he unlocks + * everything and recovery for this dead node is done + *10) go back to 2) while there are still dead nodes + * + */ + +static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) +{ + struct dlm_reco_node_data *ndata; + struct dlm_lock_resource *res; + + mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", + dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), + dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", + dlm->reco.dead_node, dlm->reco.new_master); + + list_for_each_entry(ndata, &dlm->reco.node_data, list) { + char *st = "unknown"; + switch (ndata->state) { + case DLM_RECO_NODE_DATA_INIT: + st = "init"; + break; + case DLM_RECO_NODE_DATA_REQUESTING: + st = "requesting"; + break; + case DLM_RECO_NODE_DATA_DEAD: + st = "dead"; + break; + case DLM_RECO_NODE_DATA_RECEIVING: + st = "receiving"; + break; + case DLM_RECO_NODE_DATA_REQUESTED: + st = "requested"; + break; + case DLM_RECO_NODE_DATA_DONE: + st = "done"; + break; + case DLM_RECO_NODE_DATA_FINALIZE_SENT: + st = "finalize-sent"; + break; + default: + st = "bad"; + break; + } + mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", + dlm->name, ndata->node_num, st); + } + list_for_each_entry(res, &dlm->reco.resources, recovering) { + mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", + dlm->name, res->lockname.len, res->lockname.name); + } +} + +#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) + +static int dlm_recovery_thread(void *data) +{ + int status; + struct dlm_ctxt *dlm = data; + unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); + + mlog(0, "dlm thread running for %s...\n", dlm->name); + + while (!kthread_should_stop()) { + if (dlm_joined(dlm)) { + status = dlm_do_recovery(dlm); + if (status == -EAGAIN) { + /* do not sleep, recheck immediately. */ + continue; + } + if (status < 0) + mlog_errno(status); + } + + wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, + kthread_should_stop(), + timeout); + } + + mlog(0, "quitting DLM recovery thread\n"); + return 0; +} + +/* returns true when the recovery master has contacted us */ +static int dlm_reco_master_ready(struct dlm_ctxt *dlm) +{ + int ready; + spin_lock(&dlm->spinlock); + ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); + spin_unlock(&dlm->spinlock); + return ready; +} + +/* returns true if node is no longer in the domain + * could be dead or just not joined */ +int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) +{ + int dead; + spin_lock(&dlm->spinlock); + dead = !test_bit(node, dlm->domain_map); + spin_unlock(&dlm->spinlock); + return dead; +} + +/* returns true if node is no longer in the domain + * could be dead or just not joined */ +static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) +{ + int recovered; + spin_lock(&dlm->spinlock); + recovered = !test_bit(node, dlm->recovery_map); + spin_unlock(&dlm->spinlock); + return recovered; +} + + +int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) +{ + if (timeout) { + mlog(ML_NOTICE, "%s: waiting %dms for notification of " + "death of node %u\n", dlm->name, timeout, node); + wait_event_timeout(dlm->dlm_reco_thread_wq, + dlm_is_node_dead(dlm, node), + msecs_to_jiffies(timeout)); + } else { + mlog(ML_NOTICE, "%s: waiting indefinitely for notification " + "of death of node %u\n", dlm->name, node); + wait_event(dlm->dlm_reco_thread_wq, + dlm_is_node_dead(dlm, node)); + } + /* for now, return 0 */ + return 0; +} + +int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) +{ + if (timeout) { + mlog(0, "%s: waiting %dms for notification of " + "recovery of node %u\n", dlm->name, timeout, node); + wait_event_timeout(dlm->dlm_reco_thread_wq, + dlm_is_node_recovered(dlm, node), + msecs_to_jiffies(timeout)); + } else { + mlog(0, "%s: waiting indefinitely for notification " + "of recovery of node %u\n", dlm->name, node); + wait_event(dlm->dlm_reco_thread_wq, + dlm_is_node_recovered(dlm, node)); + } + /* for now, return 0 */ + return 0; +} + +/* callers of the top-level api calls (dlmlock/dlmunlock) should + * block on the dlm->reco.event when recovery is in progress. + * the dlm recovery thread will set this state when it begins + * recovering a dead node (as the new master or not) and clear + * the state and wake as soon as all affected lock resources have + * been marked with the RECOVERY flag */ +static int dlm_in_recovery(struct dlm_ctxt *dlm) +{ + int in_recovery; + spin_lock(&dlm->spinlock); + in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); + spin_unlock(&dlm->spinlock); + return in_recovery; +} + + +void dlm_wait_for_recovery(struct dlm_ctxt *dlm) +{ + if (dlm_in_recovery(dlm)) { + mlog(0, "%s: reco thread %d in recovery: " + "state=%d, master=%u, dead=%u\n", + dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), + dlm->reco.state, dlm->reco.new_master, + dlm->reco.dead_node); + } + wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); +} + +static void dlm_begin_recovery(struct dlm_ctxt *dlm) +{ + spin_lock(&dlm->spinlock); + BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); + dlm->reco.state |= DLM_RECO_STATE_ACTIVE; + spin_unlock(&dlm->spinlock); +} + +static void dlm_end_recovery(struct dlm_ctxt *dlm) +{ + spin_lock(&dlm->spinlock); + BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); + dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; + spin_unlock(&dlm->spinlock); + wake_up(&dlm->reco.event); +} + +static int dlm_do_recovery(struct dlm_ctxt *dlm) +{ + int status = 0; + int ret; + + spin_lock(&dlm->spinlock); + + /* check to see if the new master has died */ + if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && + test_bit(dlm->reco.new_master, dlm->recovery_map)) { + mlog(0, "new master %u died while recovering %u!\n", + dlm->reco.new_master, dlm->reco.dead_node); + /* unset the new_master, leave dead_node */ + dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); + } + + /* select a target to recover */ + if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { + int bit; + + bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0); + if (bit >= O2NM_MAX_NODES || bit < 0) + dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); + else + dlm_set_reco_dead_node(dlm, bit); + } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { + /* BUG? */ + mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", + dlm->reco.dead_node); + dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); + } + + if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { + // mlog(0, "nothing to recover! sleeping now!\n"); + spin_unlock(&dlm->spinlock); + /* return to main thread loop and sleep. */ + return 0; + } + mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", + dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), + dlm->reco.dead_node); + spin_unlock(&dlm->spinlock); + + /* take write barrier */ + /* (stops the list reshuffling thread, proxy ast handling) */ + dlm_begin_recovery(dlm); + + if (dlm->reco.new_master == dlm->node_num) + goto master_here; + + if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { + /* choose a new master, returns 0 if this node + * is the master, -EEXIST if it's another node. + * this does not return until a new master is chosen + * or recovery completes entirely. */ + ret = dlm_pick_recovery_master(dlm); + if (!ret) { + /* already notified everyone. go. */ + goto master_here; + } + mlog(0, "another node will master this recovery session.\n"); + } + mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", + dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, + dlm->node_num, dlm->reco.dead_node); + + /* it is safe to start everything back up here + * because all of the dead node's lock resources + * have been marked as in-recovery */ + dlm_end_recovery(dlm); + + /* sleep out in main dlm_recovery_thread loop. */ + return 0; + +master_here: + mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " + "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task), + dlm->node_num, dlm->reco.dead_node, dlm->name); + + status = dlm_remaster_locks(dlm, dlm->reco.dead_node); + if (status < 0) { + /* we should never hit this anymore */ + mlog(ML_ERROR, "error %d remastering locks for node %u, " + "retrying.\n", status, dlm->reco.dead_node); + /* yield a bit to allow any final network messages + * to get handled on remaining nodes */ + msleep(100); + } else { + /* success! see if any other nodes need recovery */ + mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", + dlm->name, dlm->reco.dead_node, dlm->node_num); + dlm_reset_recovery(dlm); + } + dlm_end_recovery(dlm); + + /* continue and look for another dead node */ + return -EAGAIN; +} + +static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) +{ + int status = 0; + struct dlm_reco_node_data *ndata; + int all_nodes_done; + int destroy = 0; + int pass = 0; + + do { + /* we have become recovery master. there is no escaping + * this, so just keep trying until we get it. */ + status = dlm_init_recovery_area(dlm, dead_node); + if (status < 0) { + mlog(ML_ERROR, "%s: failed to alloc recovery area, " + "retrying\n", dlm->name); + msleep(1000); + } + } while (status != 0); + + /* safe to access the node data list without a lock, since this + * process is the only one to change the list */ + list_for_each_entry(ndata, &dlm->reco.node_data, list) { + BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); + ndata->state = DLM_RECO_NODE_DATA_REQUESTING; + + mlog(0, "requesting lock info from node %u\n", + ndata->node_num); + + if (ndata->node_num == dlm->node_num) { + ndata->state = DLM_RECO_NODE_DATA_DONE; + continue; + } + + do { + status = dlm_request_all_locks(dlm, ndata->node_num, + dead_node); + if (status < 0) { + mlog_errno(status); + if (dlm_is_host_down(status)) { + /* node died, ignore it for recovery */ + status = 0; + ndata->state = DLM_RECO_NODE_DATA_DEAD; + /* wait for the domain map to catch up + * with the network state. */ + wait_event_timeout(dlm->dlm_reco_thread_wq, + dlm_is_node_dead(dlm, + ndata->node_num), + msecs_to_jiffies(1000)); + mlog(0, "waited 1 sec for %u, " + "dead? %s\n", ndata->node_num, + dlm_is_node_dead(dlm, ndata->node_num) ? + "yes" : "no"); + } else { + /* -ENOMEM on the other node */ + mlog(0, "%s: node %u returned " + "%d during recovery, retrying " + "after a short wait\n", + dlm->name, ndata->node_num, + status); + msleep(100); + } + } + } while (status != 0); + + spin_lock(&dlm_reco_state_lock); + switch (ndata->state) { + case DLM_RECO_NODE_DATA_INIT: + case DLM_RECO_NODE_DATA_FINALIZE_SENT: + case DLM_RECO_NODE_DATA_REQUESTED: + BUG(); + break; + case DLM_RECO_NODE_DATA_DEAD: + mlog(0, "node %u died after requesting " + "recovery info for node %u\n", + ndata->node_num, dead_node); + /* fine. don't need this node's info. + * continue without it. */ + break; + case DLM_RECO_NODE_DATA_REQUESTING: + ndata->state = DLM_RECO_NODE_DATA_REQUESTED; + mlog(0, "now receiving recovery data from " + "node %u for dead node %u\n", + ndata->node_num, dead_node); + break; + case DLM_RECO_NODE_DATA_RECEIVING: + mlog(0, "already receiving recovery data from " + "node %u for dead node %u\n", + ndata->node_num, dead_node); + break; + case DLM_RECO_NODE_DATA_DONE: + mlog(0, "already DONE receiving recovery data " + "from node %u for dead node %u\n", + ndata->node_num, dead_node); + break; + } + spin_unlock(&dlm_reco_state_lock); + } + + mlog(0, "done requesting all lock info\n"); + + /* nodes should be sending reco data now + * just need to wait */ + + while (1) { + /* check all the nodes now to see if we are + * done, or if anyone died */ + all_nodes_done = 1; + spin_lock(&dlm_reco_state_lock); + list_for_each_entry(ndata, &dlm->reco.node_data, list) { + mlog(0, "checking recovery state of node %u\n", + ndata->node_num); + switch (ndata->state) { + case DLM_RECO_NODE_DATA_INIT: + case DLM_RECO_NODE_DATA_REQUESTING: + mlog(ML_ERROR, "bad ndata state for " + "node %u: state=%d\n", + ndata->node_num, ndata->state); + BUG(); + break; + case DLM_RECO_NODE_DATA_DEAD: + mlog(0, "node %u died after " + "requesting recovery info for " + "node %u\n", ndata->node_num, + dead_node); + break; + case DLM_RECO_NODE_DATA_RECEIVING: + case DLM_RECO_NODE_DATA_REQUESTED: + mlog(0, "%s: node %u still in state %s\n", + dlm->name, ndata->node_num, + ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? + "receiving" : "requested"); + all_nodes_done = 0; + break; + case DLM_RECO_NODE_DATA_DONE: + mlog(0, "%s: node %u state is done\n", + dlm->name, ndata->node_num); + break; + case DLM_RECO_NODE_DATA_FINALIZE_SENT: + mlog(0, "%s: node %u state is finalize\n", + dlm->name, ndata->node_num); + break; + } + } + spin_unlock(&dlm_reco_state_lock); + + mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, + all_nodes_done?"yes":"no"); + if (all_nodes_done) { + int ret; + + /* all nodes are now in DLM_RECO_NODE_DATA_DONE state + * just send a finalize message to everyone and + * clean up */ + mlog(0, "all nodes are done! send finalize\n"); + ret = dlm_send_finalize_reco_message(dlm); + if (ret < 0) + mlog_errno(ret); + + spin_lock(&dlm->spinlock); + dlm_finish_local_lockres_recovery(dlm, dead_node, + dlm->node_num); + spin_unlock(&dlm->spinlock); + mlog(0, "should be done with recovery!\n"); + + mlog(0, "finishing recovery of %s at %lu, " + "dead=%u, this=%u, new=%u\n", dlm->name, + jiffies, dlm->reco.dead_node, + dlm->node_num, dlm->reco.new_master); + destroy = 1; + status = 0; + /* rescan everything marked dirty along the way */ + dlm_kick_thread(dlm, NULL); + break; + } + /* wait to be signalled, with periodic timeout + * to check for node death */ + wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, + kthread_should_stop(), + msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); + + } + + if (destroy) + dlm_destroy_recovery_area(dlm, dead_node); + + mlog_exit(status); + return status; +} + +static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) +{ + int num=0; + struct dlm_reco_node_data *ndata; + + spin_lock(&dlm->spinlock); + memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); + /* nodes can only be removed (by dying) after dropping + * this lock, and death will be trapped later, so this should do */ + spin_unlock(&dlm->spinlock); + + while (1) { + num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); + if (num >= O2NM_MAX_NODES) { + break; + } + BUG_ON(num == dead_node); + + ndata = kzalloc(sizeof(*ndata), GFP_NOFS); + if (!ndata) { + dlm_destroy_recovery_area(dlm, dead_node); + return -ENOMEM; + } + ndata->node_num = num; + ndata->state = DLM_RECO_NODE_DATA_INIT; + spin_lock(&dlm_reco_state_lock); + list_add_tail(&ndata->list, &dlm->reco.node_data); + spin_unlock(&dlm_reco_state_lock); + num++; + } + + return 0; +} + +static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) +{ + struct dlm_reco_node_data *ndata, *next; + LIST_HEAD(tmplist); + + spin_lock(&dlm_reco_state_lock); + list_splice_init(&dlm->reco.node_data, &tmplist); + spin_unlock(&dlm_reco_state_lock); + + list_for_each_entry_safe(ndata, next, &tmplist, list) { + list_del_init(&ndata->list); + kfree(ndata); + } +} + +static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, + u8 dead_node) +{ + struct dlm_lock_request lr; + enum dlm_status ret; + + mlog(0, "\n"); + + + mlog(0, "dlm_request_all_locks: dead node is %u, sending request " + "to %u\n", dead_node, request_from); + + memset(&lr, 0, sizeof(lr)); + lr.node_idx = dlm->node_num; + lr.dead_node = dead_node; + + // send message + ret = DLM_NOLOCKMGR; + ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, + &lr, sizeof(lr), request_from, NULL); + + /* negative status is handled by caller */ + if (ret < 0) + mlog_errno(ret); + + // return from here, then + // sleep until all received or error + return ret; + +} + +int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; + char *buf = NULL; + struct dlm_work_item *item = NULL; + + if (!dlm_grab(dlm)) + return -EINVAL; + + if (lr->dead_node != dlm->reco.dead_node) { + mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " + "dead_node is %u\n", dlm->name, lr->node_idx, + lr->dead_node, dlm->reco.dead_node); + dlm_print_reco_node_status(dlm); + /* this is a hack */ + dlm_put(dlm); + return -ENOMEM; + } + BUG_ON(lr->dead_node != dlm->reco.dead_node); + + item = kzalloc(sizeof(*item), GFP_NOFS); + if (!item) { + dlm_put(dlm); + return -ENOMEM; + } + + /* this will get freed by dlm_request_all_locks_worker */ + buf = (char *) __get_free_page(GFP_NOFS); + if (!buf) { + kfree(item); + dlm_put(dlm); + return -ENOMEM; + } + + /* queue up work for dlm_request_all_locks_worker */ + dlm_grab(dlm); /* get an extra ref for the work item */ + dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); + item->u.ral.reco_master = lr->node_idx; + item->u.ral.dead_node = lr->dead_node; + spin_lock(&dlm->work_lock); + list_add_tail(&item->list, &dlm->work_list); + spin_unlock(&dlm->work_lock); + queue_work(dlm->dlm_worker, &dlm->dispatched_work); + + dlm_put(dlm); + return 0; +} + +static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) +{ + struct dlm_migratable_lockres *mres; + struct dlm_lock_resource *res; + struct dlm_ctxt *dlm; + LIST_HEAD(resources); + int ret; + u8 dead_node, reco_master; + int skip_all_done = 0; + + dlm = item->dlm; + dead_node = item->u.ral.dead_node; + reco_master = item->u.ral.reco_master; + mres = (struct dlm_migratable_lockres *)data; + + mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", + dlm->name, dead_node, reco_master); + + if (dead_node != dlm->reco.dead_node || + reco_master != dlm->reco.new_master) { + /* worker could have been created before the recovery master + * died. if so, do not continue, but do not error. */ + if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { + mlog(ML_NOTICE, "%s: will not send recovery state, " + "recovery master %u died, thread=(dead=%u,mas=%u)" + " current=(dead=%u,mas=%u)\n", dlm->name, + reco_master, dead_node, reco_master, + dlm->reco.dead_node, dlm->reco.new_master); + } else { + mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " + "master=%u), request(dead=%u, master=%u)\n", + dlm->name, dlm->reco.dead_node, + dlm->reco.new_master, dead_node, reco_master); + } + goto leave; + } + + /* lock resources should have already been moved to the + * dlm->reco.resources list. now move items from that list + * to a temp list if the dead owner matches. note that the + * whole cluster recovers only one node at a time, so we + * can safely move UNKNOWN lock resources for each recovery + * session. */ + dlm_move_reco_locks_to_list(dlm, &resources, dead_node); + + /* now we can begin blasting lockreses without the dlm lock */ + + /* any errors returned will be due to the new_master dying, + * the dlm_reco_thread should detect this */ + list_for_each_entry(res, &resources, recovering) { + ret = dlm_send_one_lockres(dlm, res, mres, reco_master, + DLM_MRES_RECOVERY); + if (ret < 0) { + mlog(ML_ERROR, "%s: node %u went down while sending " + "recovery state for dead node %u, ret=%d\n", dlm->name, + reco_master, dead_node, ret); + skip_all_done = 1; + break; + } + } + + /* move the resources back to the list */ + spin_lock(&dlm->spinlock); + list_splice_init(&resources, &dlm->reco.resources); + spin_unlock(&dlm->spinlock); + + if (!skip_all_done) { + ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); + if (ret < 0) { + mlog(ML_ERROR, "%s: node %u went down while sending " + "recovery all-done for dead node %u, ret=%d\n", + dlm->name, reco_master, dead_node, ret); + } + } +leave: + free_page((unsigned long)data); +} + + +static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) +{ + int ret, tmpret; + struct dlm_reco_data_done done_msg; + + memset(&done_msg, 0, sizeof(done_msg)); + done_msg.node_idx = dlm->node_num; + done_msg.dead_node = dead_node; + mlog(0, "sending DATA DONE message to %u, " + "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, + done_msg.dead_node); + + ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, + sizeof(done_msg), send_to, &tmpret); + if (ret < 0) { + if (!dlm_is_host_down(ret)) { + mlog_errno(ret); + mlog(ML_ERROR, "%s: unknown error sending data-done " + "to %u\n", dlm->name, send_to); + BUG(); + } + } else + ret = tmpret; + return ret; +} + + +int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; + struct dlm_reco_node_data *ndata = NULL; + int ret = -EINVAL; + + if (!dlm_grab(dlm)) + return -EINVAL; + + mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " + "node_idx=%u, this node=%u\n", done->dead_node, + dlm->reco.dead_node, done->node_idx, dlm->node_num); + + mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), + "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " + "node_idx=%u, this node=%u\n", done->dead_node, + dlm->reco.dead_node, done->node_idx, dlm->node_num); + + spin_lock(&dlm_reco_state_lock); + list_for_each_entry(ndata, &dlm->reco.node_data, list) { + if (ndata->node_num != done->node_idx) + continue; + + switch (ndata->state) { + /* should have moved beyond INIT but not to FINALIZE yet */ + case DLM_RECO_NODE_DATA_INIT: + case DLM_RECO_NODE_DATA_DEAD: + case DLM_RECO_NODE_DATA_FINALIZE_SENT: + mlog(ML_ERROR, "bad ndata state for node %u:" + " state=%d\n", ndata->node_num, + ndata->state); + BUG(); + break; + /* these states are possible at this point, anywhere along + * the line of recovery */ + case DLM_RECO_NODE_DATA_DONE: + case DLM_RECO_NODE_DATA_RECEIVING: + case DLM_RECO_NODE_DATA_REQUESTED: + case DLM_RECO_NODE_DATA_REQUESTING: + mlog(0, "node %u is DONE sending " + "recovery data!\n", + ndata->node_num); + + ndata->state = DLM_RECO_NODE_DATA_DONE; + ret = 0; + break; + } + } + spin_unlock(&dlm_reco_state_lock); + + /* wake the recovery thread, some node is done */ + if (!ret) + dlm_kick_recovery_thread(dlm); + + if (ret < 0) + mlog(ML_ERROR, "failed to find recovery node data for node " + "%u\n", done->node_idx); + dlm_put(dlm); + + mlog(0, "leaving reco data done handler, ret=%d\n", ret); + return ret; +} + +static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, + struct list_head *list, + u8 dead_node) +{ + struct dlm_lock_resource *res, *next; + struct dlm_lock *lock; + + spin_lock(&dlm->spinlock); + list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { + /* always prune any $RECOVERY entries for dead nodes, + * otherwise hangs can occur during later recovery */ + if (dlm_is_recovery_lock(res->lockname.name, + res->lockname.len)) { + spin_lock(&res->spinlock); + list_for_each_entry(lock, &res->granted, list) { + if (lock->ml.node == dead_node) { + mlog(0, "AHA! there was " + "a $RECOVERY lock for dead " + "node %u (%s)!\n", + dead_node, dlm->name); + list_del_init(&lock->list); + dlm_lock_put(lock); + break; + } + } + spin_unlock(&res->spinlock); + continue; + } + + if (res->owner == dead_node) { + mlog(0, "found lockres owned by dead node while " + "doing recovery for node %u. sending it.\n", + dead_node); + list_move_tail(&res->recovering, list); + } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { + mlog(0, "found UNKNOWN owner while doing recovery " + "for node %u. sending it.\n", dead_node); + list_move_tail(&res->recovering, list); + } + } + spin_unlock(&dlm->spinlock); +} + +static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) +{ + int total_locks = 0; + struct list_head *iter, *queue = &res->granted; + int i; + + for (i=0; i<3; i++) { + list_for_each(iter, queue) + total_locks++; + queue++; + } + return total_locks; +} + + +static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, + struct dlm_migratable_lockres *mres, + u8 send_to, + struct dlm_lock_resource *res, + int total_locks) +{ + u64 mig_cookie = be64_to_cpu(mres->mig_cookie); + int mres_total_locks = be32_to_cpu(mres->total_locks); + int sz, ret = 0, status = 0; + u8 orig_flags = mres->flags, + orig_master = mres->master; + + BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); + if (!mres->num_locks) + return 0; + + sz = sizeof(struct dlm_migratable_lockres) + + (mres->num_locks * sizeof(struct dlm_migratable_lock)); + + /* add an all-done flag if we reached the last lock */ + orig_flags = mres->flags; + BUG_ON(total_locks > mres_total_locks); + if (total_locks == mres_total_locks) + mres->flags |= DLM_MRES_ALL_DONE; + + mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", + dlm->name, res->lockname.len, res->lockname.name, + orig_flags & DLM_MRES_MIGRATION ? "migrate" : "recovery", + send_to); + + /* send it */ + ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, + sz, send_to, &status); + if (ret < 0) { + /* XXX: negative status is not handled. + * this will end up killing this node. */ + mlog_errno(ret); + } else { + /* might get an -ENOMEM back here */ + ret = status; + if (ret < 0) { + mlog_errno(ret); + + if (ret == -EFAULT) { + mlog(ML_ERROR, "node %u told me to kill " + "myself!\n", send_to); + BUG(); + } + } + } + + /* zero and reinit the message buffer */ + dlm_init_migratable_lockres(mres, res->lockname.name, + res->lockname.len, mres_total_locks, + mig_cookie, orig_flags, orig_master); + return ret; +} + +static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, + const char *lockname, int namelen, + int total_locks, u64 cookie, + u8 flags, u8 master) +{ + /* mres here is one full page */ + clear_page(mres); + mres->lockname_len = namelen; + memcpy(mres->lockname, lockname, namelen); + mres->num_locks = 0; + mres->total_locks = cpu_to_be32(total_locks); + mres->mig_cookie = cpu_to_be64(cookie); + mres->flags = flags; + mres->master = master; +} + + +/* returns 1 if this lock fills the network structure, + * 0 otherwise */ +static int dlm_add_lock_to_array(struct dlm_lock *lock, + struct dlm_migratable_lockres *mres, int queue) +{ + struct dlm_migratable_lock *ml; + int lock_num = mres->num_locks; + + ml = &(mres->ml[lock_num]); + ml->cookie = lock->ml.cookie; + ml->type = lock->ml.type; + ml->convert_type = lock->ml.convert_type; + ml->highest_blocked = lock->ml.highest_blocked; + ml->list = queue; + if (lock->lksb) { + ml->flags = lock->lksb->flags; + /* send our current lvb */ + if (ml->type == LKM_EXMODE || + ml->type == LKM_PRMODE) { + /* if it is already set, this had better be a PR + * and it has to match */ + if (!dlm_lvb_is_empty(mres->lvb) && + (ml->type == LKM_EXMODE || + memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { + mlog(ML_ERROR, "mismatched lvbs!\n"); + dlm_print_one_lock_resource(lock->lockres); + BUG(); + } + memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); + } + } + ml->node = lock->ml.node; + mres->num_locks++; + /* we reached the max, send this network message */ + if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) + return 1; + return 0; +} + +static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, + struct dlm_migratable_lockres *mres) +{ + struct dlm_lock dummy; + memset(&dummy, 0, sizeof(dummy)); + dummy.ml.cookie = 0; + dummy.ml.type = LKM_IVMODE; + dummy.ml.convert_type = LKM_IVMODE; + dummy.ml.highest_blocked = LKM_IVMODE; + dummy.lksb = NULL; + dummy.ml.node = dlm->node_num; + dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); +} + +static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, + struct dlm_migratable_lock *ml, + u8 *nodenum) +{ + if (unlikely(ml->cookie == 0 && + ml->type == LKM_IVMODE && + ml->convert_type == LKM_IVMODE && + ml->highest_blocked == LKM_IVMODE && + ml->list == DLM_BLOCKED_LIST)) { + *nodenum = ml->node; + return 1; + } + return 0; +} + +int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + struct dlm_migratable_lockres *mres, + u8 send_to, u8 flags) +{ + struct list_head *queue; + int total_locks, i; + u64 mig_cookie = 0; + struct dlm_lock *lock; + int ret = 0; + + BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); + + mlog(0, "sending to %u\n", send_to); + + total_locks = dlm_num_locks_in_lockres(res); + if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { + /* rare, but possible */ + mlog(0, "argh. lockres has %d locks. this will " + "require more than one network packet to " + "migrate\n", total_locks); + mig_cookie = dlm_get_next_mig_cookie(); + } + + dlm_init_migratable_lockres(mres, res->lockname.name, + res->lockname.len, total_locks, + mig_cookie, flags, res->owner); + + total_locks = 0; + for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { + queue = dlm_list_idx_to_ptr(res, i); + list_for_each_entry(lock, queue, list) { + /* add another lock. */ + total_locks++; + if (!dlm_add_lock_to_array(lock, mres, i)) + continue; + + /* this filled the lock message, + * we must send it immediately. */ + ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, + res, total_locks); + if (ret < 0) + goto error; + } + } + if (total_locks == 0) { + /* send a dummy lock to indicate a mastery reference only */ + mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", + dlm->name, res->lockname.len, res->lockname.name, + send_to, flags & DLM_MRES_RECOVERY ? "recovery" : + "migration"); + dlm_add_dummy_lock(dlm, mres); + } + /* flush any remaining locks */ + ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); + if (ret < 0) + goto error; + return ret; + +error: + mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", + dlm->name, ret); + if (!dlm_is_host_down(ret)) + BUG(); + mlog(0, "%s: node %u went down while sending %s " + "lockres %.*s\n", dlm->name, send_to, + flags & DLM_MRES_RECOVERY ? "recovery" : "migration", + res->lockname.len, res->lockname.name); + return ret; +} + + + +/* + * this message will contain no more than one page worth of + * recovery data, and it will work on only one lockres. + * there may be many locks in this page, and we may need to wait + * for additional packets to complete all the locks (rare, but + * possible). + */ +/* + * NOTE: the allocation error cases here are scary + * we really cannot afford to fail an alloc in recovery + * do we spin? returning an error only delays the problem really + */ + +int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_migratable_lockres *mres = + (struct dlm_migratable_lockres *)msg->buf; + int ret = 0; + u8 real_master; + u8 extra_refs = 0; + char *buf = NULL; + struct dlm_work_item *item = NULL; + struct dlm_lock_resource *res = NULL; + + if (!dlm_grab(dlm)) + return -EINVAL; + + BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); + + real_master = mres->master; + if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { + /* cannot migrate a lockres with no master */ + BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); + } + + mlog(0, "%s message received from node %u\n", + (mres->flags & DLM_MRES_RECOVERY) ? + "recovery" : "migration", mres->master); + if (mres->flags & DLM_MRES_ALL_DONE) + mlog(0, "all done flag. all lockres data received!\n"); + + ret = -ENOMEM; + buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); + item = kzalloc(sizeof(*item), GFP_NOFS); + if (!buf || !item) + goto leave; + + /* lookup the lock to see if we have a secondary queue for this + * already... just add the locks in and this will have its owner + * and RECOVERY flag changed when it completes. */ + res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); + if (res) { + /* this will get a ref on res */ + /* mark it as recovering/migrating and hash it */ + spin_lock(&res->spinlock); + if (mres->flags & DLM_MRES_RECOVERY) { + res->state |= DLM_LOCK_RES_RECOVERING; + } else { + if (res->state & DLM_LOCK_RES_MIGRATING) { + /* this is at least the second + * lockres message */ + mlog(0, "lock %.*s is already migrating\n", + mres->lockname_len, + mres->lockname); + } else if (res->state & DLM_LOCK_RES_RECOVERING) { + /* caller should BUG */ + mlog(ML_ERROR, "node is attempting to migrate " + "lock %.*s, but marked as recovering!\n", + mres->lockname_len, mres->lockname); + ret = -EFAULT; + spin_unlock(&res->spinlock); + goto leave; + } + res->state |= DLM_LOCK_RES_MIGRATING; + } + spin_unlock(&res->spinlock); + } else { + /* need to allocate, just like if it was + * mastered here normally */ + res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); + if (!res) + goto leave; + + /* to match the ref that we would have gotten if + * dlm_lookup_lockres had succeeded */ + dlm_lockres_get(res); + + /* mark it as recovering/migrating and hash it */ + if (mres->flags & DLM_MRES_RECOVERY) + res->state |= DLM_LOCK_RES_RECOVERING; + else + res->state |= DLM_LOCK_RES_MIGRATING; + + spin_lock(&dlm->spinlock); + __dlm_insert_lockres(dlm, res); + spin_unlock(&dlm->spinlock); + + /* Add an extra ref for this lock-less lockres lest the + * dlm_thread purges it before we get the chance to add + * locks to it */ + dlm_lockres_get(res); + + /* There are three refs that need to be put. + * 1. Taken above. + * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). + * 3. dlm_lookup_lockres() + * The first one is handled at the end of this function. The + * other two are handled in the worker thread after locks have + * been attached. Yes, we don't wait for purge time to match + * kref_init. The lockres will still have atleast one ref + * added because it is in the hash __dlm_insert_lockres() */ + extra_refs++; + + /* now that the new lockres is inserted, + * make it usable by other processes */ + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_IN_PROGRESS; + spin_unlock(&res->spinlock); + wake_up(&res->wq); + } + + /* at this point we have allocated everything we need, + * and we have a hashed lockres with an extra ref and + * the proper res->state flags. */ + ret = 0; + spin_lock(&res->spinlock); + /* drop this either when master requery finds a different master + * or when a lock is added by the recovery worker */ + dlm_lockres_grab_inflight_ref(dlm, res); + if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { + /* migration cannot have an unknown master */ + BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); + mlog(0, "recovery has passed me a lockres with an " + "unknown owner.. will need to requery: " + "%.*s\n", mres->lockname_len, mres->lockname); + } else { + /* take a reference now to pin the lockres, drop it + * when locks are added in the worker */ + dlm_change_lockres_owner(dlm, res, dlm->node_num); + } + spin_unlock(&res->spinlock); + + /* queue up work for dlm_mig_lockres_worker */ + dlm_grab(dlm); /* get an extra ref for the work item */ + memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ + dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); + item->u.ml.lockres = res; /* already have a ref */ + item->u.ml.real_master = real_master; + item->u.ml.extra_ref = extra_refs; + spin_lock(&dlm->work_lock); + list_add_tail(&item->list, &dlm->work_list); + spin_unlock(&dlm->work_lock); + queue_work(dlm->dlm_worker, &dlm->dispatched_work); + +leave: + /* One extra ref taken needs to be put here */ + if (extra_refs) + dlm_lockres_put(res); + + dlm_put(dlm); + if (ret < 0) { + if (buf) + kfree(buf); + if (item) + kfree(item); + } + + mlog_exit(ret); + return ret; +} + + +static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) +{ + struct dlm_ctxt *dlm; + struct dlm_migratable_lockres *mres; + int ret = 0; + struct dlm_lock_resource *res; + u8 real_master; + u8 extra_ref; + + dlm = item->dlm; + mres = (struct dlm_migratable_lockres *)data; + + res = item->u.ml.lockres; + real_master = item->u.ml.real_master; + extra_ref = item->u.ml.extra_ref; + + if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { + /* this case is super-rare. only occurs if + * node death happens during migration. */ +again: + ret = dlm_lockres_master_requery(dlm, res, &real_master); + if (ret < 0) { + mlog(0, "dlm_lockres_master_requery ret=%d\n", + ret); + goto again; + } + if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { + mlog(0, "lockres %.*s not claimed. " + "this node will take it.\n", + res->lockname.len, res->lockname.name); + } else { + spin_lock(&res->spinlock); + dlm_lockres_drop_inflight_ref(dlm, res); + spin_unlock(&res->spinlock); + mlog(0, "master needs to respond to sender " + "that node %u still owns %.*s\n", + real_master, res->lockname.len, + res->lockname.name); + /* cannot touch this lockres */ + goto leave; + } + } + + ret = dlm_process_recovery_data(dlm, res, mres); + if (ret < 0) + mlog(0, "dlm_process_recovery_data returned %d\n", ret); + else + mlog(0, "dlm_process_recovery_data succeeded\n"); + + if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == + (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { + ret = dlm_finish_migration(dlm, res, mres->master); + if (ret < 0) + mlog_errno(ret); + } + +leave: + /* See comment in dlm_mig_lockres_handler() */ + if (res) { + if (extra_ref) + dlm_lockres_put(res); + dlm_lockres_put(res); + } + kfree(data); + mlog_exit(ret); +} + + + +static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + u8 *real_master) +{ + struct dlm_node_iter iter; + int nodenum; + int ret = 0; + + *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; + + /* we only reach here if one of the two nodes in a + * migration died while the migration was in progress. + * at this point we need to requery the master. we + * know that the new_master got as far as creating + * an mle on at least one node, but we do not know + * if any nodes had actually cleared the mle and set + * the master to the new_master. the old master + * is supposed to set the owner to UNKNOWN in the + * event of a new_master death, so the only possible + * responses that we can get from nodes here are + * that the master is new_master, or that the master + * is UNKNOWN. + * if all nodes come back with UNKNOWN then we know + * the lock needs remastering here. + * if any node comes back with a valid master, check + * to see if that master is the one that we are + * recovering. if so, then the new_master died and + * we need to remaster this lock. if not, then the + * new_master survived and that node will respond to + * other nodes about the owner. + * if there is an owner, this node needs to dump this + * lockres and alert the sender that this lockres + * was rejected. */ + spin_lock(&dlm->spinlock); + dlm_node_iter_init(dlm->domain_map, &iter); + spin_unlock(&dlm->spinlock); + + while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { + /* do not send to self */ + if (nodenum == dlm->node_num) + continue; + ret = dlm_do_master_requery(dlm, res, nodenum, real_master); + if (ret < 0) { + mlog_errno(ret); + if (!dlm_is_host_down(ret)) + BUG(); + /* host is down, so answer for that node would be + * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ + } + if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { + mlog(0, "lock master is %u\n", *real_master); + break; + } + } + return ret; +} + + +int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, + u8 nodenum, u8 *real_master) +{ + int ret = -EINVAL; + struct dlm_master_requery req; + int status = DLM_LOCK_RES_OWNER_UNKNOWN; + + memset(&req, 0, sizeof(req)); + req.node_idx = dlm->node_num; + req.namelen = res->lockname.len; + memcpy(req.name, res->lockname.name, res->lockname.len); + + ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, + &req, sizeof(req), nodenum, &status); + /* XXX: negative status not handled properly here. */ + if (ret < 0) + mlog_errno(ret); + else { + BUG_ON(status < 0); + BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); + *real_master = (u8) (status & 0xff); + mlog(0, "node %u responded to master requery with %u\n", + nodenum, *real_master); + ret = 0; + } + return ret; +} + + +/* this function cannot error, so unless the sending + * or receiving of the message failed, the owner can + * be trusted */ +int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; + struct dlm_lock_resource *res = NULL; + unsigned int hash; + int master = DLM_LOCK_RES_OWNER_UNKNOWN; + u32 flags = DLM_ASSERT_MASTER_REQUERY; + + if (!dlm_grab(dlm)) { + /* since the domain has gone away on this + * node, the proper response is UNKNOWN */ + return master; + } + + hash = dlm_lockid_hash(req->name, req->namelen); + + spin_lock(&dlm->spinlock); + res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); + if (res) { + spin_lock(&res->spinlock); + master = res->owner; + if (master == dlm->node_num) { + int ret = dlm_dispatch_assert_master(dlm, res, + 0, 0, flags); + if (ret < 0) { + mlog_errno(-ENOMEM); + /* retry!? */ + BUG(); + } + } else /* put.. incase we are not the master */ + dlm_lockres_put(res); + spin_unlock(&res->spinlock); + } + spin_unlock(&dlm->spinlock); + + dlm_put(dlm); + return master; +} + +static inline struct list_head * +dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) +{ + struct list_head *ret; + BUG_ON(list_num < 0); + BUG_ON(list_num > 2); + ret = &(res->granted); + ret += list_num; + return ret; +} +/* TODO: do ast flush business + * TODO: do MIGRATING and RECOVERING spinning + */ + +/* +* NOTE about in-flight requests during migration: +* +* Before attempting the migrate, the master has marked the lockres as +* MIGRATING and then flushed all of its pending ASTS. So any in-flight +* requests either got queued before the MIGRATING flag got set, in which +* case the lock data will reflect the change and a return message is on +* the way, or the request failed to get in before MIGRATING got set. In +* this case, the caller will be told to spin and wait for the MIGRATING +* flag to be dropped, then recheck the master. +* This holds true for the convert, cancel and unlock cases, and since lvb +* updates are tied to these same messages, it applies to lvb updates as +* well. For the lock case, there is no way a lock can be on the master +* queue and not be on the secondary queue since the lock is always added +* locally first. This means that the new target node will never be sent +* a lock that he doesn't already have on the list. +* In total, this means that the local lock is correct and should not be +* updated to match the one sent by the master. Any messages sent back +* from the master before the MIGRATING flag will bring the lock properly +* up-to-date, and the change will be ordered properly for the waiter. +* We will *not* attempt to modify the lock underneath the waiter. +*/ + +static int dlm_process_recovery_data(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_migratable_lockres *mres) +{ + struct dlm_migratable_lock *ml; + struct list_head *queue; + struct list_head *tmpq = NULL; + struct dlm_lock *newlock = NULL; + struct dlm_lockstatus *lksb = NULL; + int ret = 0; + int i, j, bad; + struct dlm_lock *lock = NULL; + u8 from = O2NM_MAX_NODES; + unsigned int added = 0; + + mlog(0, "running %d locks for this lockres\n", mres->num_locks); + for (i=0; i<mres->num_locks; i++) { + ml = &(mres->ml[i]); + + if (dlm_is_dummy_lock(dlm, ml, &from)) { + /* placeholder, just need to set the refmap bit */ + BUG_ON(mres->num_locks != 1); + mlog(0, "%s:%.*s: dummy lock for %u\n", + dlm->name, mres->lockname_len, mres->lockname, + from); + spin_lock(&res->spinlock); + dlm_lockres_set_refmap_bit(from, res); + spin_unlock(&res->spinlock); + added++; + break; + } + BUG_ON(ml->highest_blocked != LKM_IVMODE); + newlock = NULL; + lksb = NULL; + + queue = dlm_list_num_to_pointer(res, ml->list); + tmpq = NULL; + + /* if the lock is for the local node it needs to + * be moved to the proper location within the queue. + * do not allocate a new lock structure. */ + if (ml->node == dlm->node_num) { + /* MIGRATION ONLY! */ + BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); + + spin_lock(&res->spinlock); + for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { + tmpq = dlm_list_idx_to_ptr(res, j); + list_for_each_entry(lock, tmpq, list) { + if (lock->ml.cookie != ml->cookie) + lock = NULL; + else + break; + } + if (lock) + break; + } + + /* lock is always created locally first, and + * destroyed locally last. it must be on the list */ + if (!lock) { + __be64 c = ml->cookie; + mlog(ML_ERROR, "could not find local lock " + "with cookie %u:%llu!\n", + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c))); + __dlm_print_one_lock_resource(res); + BUG(); + } + BUG_ON(lock->ml.node != ml->node); + + if (tmpq != queue) { + mlog(0, "lock was on %u instead of %u for %.*s\n", + j, ml->list, res->lockname.len, res->lockname.name); + spin_unlock(&res->spinlock); + continue; + } + + /* see NOTE above about why we do not update + * to match the master here */ + + /* move the lock to its proper place */ + /* do not alter lock refcount. switching lists. */ + list_move_tail(&lock->list, queue); + spin_unlock(&res->spinlock); + added++; + + mlog(0, "just reordered a local lock!\n"); + continue; + } + + /* lock is for another node. */ + newlock = dlm_new_lock(ml->type, ml->node, + be64_to_cpu(ml->cookie), NULL); + if (!newlock) { + ret = -ENOMEM; + goto leave; + } + lksb = newlock->lksb; + dlm_lock_attach_lockres(newlock, res); + + if (ml->convert_type != LKM_IVMODE) { + BUG_ON(queue != &res->converting); + newlock->ml.convert_type = ml->convert_type; + } + lksb->flags |= (ml->flags & + (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); + + if (ml->type == LKM_NLMODE) + goto skip_lvb; + + if (!dlm_lvb_is_empty(mres->lvb)) { + if (lksb->flags & DLM_LKSB_PUT_LVB) { + /* other node was trying to update + * lvb when node died. recreate the + * lksb with the updated lvb. */ + memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); + /* the lock resource lvb update must happen + * NOW, before the spinlock is dropped. + * we no longer wait for the AST to update + * the lvb. */ + memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); + } else { + /* otherwise, the node is sending its + * most recent valid lvb info */ + BUG_ON(ml->type != LKM_EXMODE && + ml->type != LKM_PRMODE); + if (!dlm_lvb_is_empty(res->lvb) && + (ml->type == LKM_EXMODE || + memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { + int i; + mlog(ML_ERROR, "%s:%.*s: received bad " + "lvb! type=%d\n", dlm->name, + res->lockname.len, + res->lockname.name, ml->type); + printk("lockres lvb=["); + for (i=0; i<DLM_LVB_LEN; i++) + printk("%02x", res->lvb[i]); + printk("]\nmigrated lvb=["); + for (i=0; i<DLM_LVB_LEN; i++) + printk("%02x", mres->lvb[i]); + printk("]\n"); + dlm_print_one_lock_resource(res); + BUG(); + } + memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); + } + } +skip_lvb: + + /* NOTE: + * wrt lock queue ordering and recovery: + * 1. order of locks on granted queue is + * meaningless. + * 2. order of locks on converting queue is + * LOST with the node death. sorry charlie. + * 3. order of locks on the blocked queue is + * also LOST. + * order of locks does not affect integrity, it + * just means that a lock request may get pushed + * back in line as a result of the node death. + * also note that for a given node the lock order + * for its secondary queue locks is preserved + * relative to each other, but clearly *not* + * preserved relative to locks from other nodes. + */ + bad = 0; + spin_lock(&res->spinlock); + list_for_each_entry(lock, queue, list) { + if (lock->ml.cookie == ml->cookie) { + __be64 c = lock->ml.cookie; + mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " + "exists on this lockres!\n", dlm->name, + res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(c)), + dlm_get_lock_cookie_seq(be64_to_cpu(c))); + + mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " + "node=%u, cookie=%u:%llu, queue=%d\n", + ml->type, ml->convert_type, ml->node, + dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), + ml->list); + + __dlm_print_one_lock_resource(res); + bad = 1; + break; + } + } + if (!bad) { + dlm_lock_get(newlock); + list_add_tail(&newlock->list, queue); + mlog(0, "%s:%.*s: added lock for node %u, " + "setting refmap bit\n", dlm->name, + res->lockname.len, res->lockname.name, ml->node); + dlm_lockres_set_refmap_bit(ml->node, res); + added++; + } + spin_unlock(&res->spinlock); + } + mlog(0, "done running all the locks\n"); + +leave: + /* balance the ref taken when the work was queued */ + spin_lock(&res->spinlock); + dlm_lockres_drop_inflight_ref(dlm, res); + spin_unlock(&res->spinlock); + + if (ret < 0) { + mlog_errno(ret); + if (newlock) + dlm_lock_put(newlock); + } + + mlog_exit(ret); + return ret; +} + +void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + int i; + struct list_head *queue; + struct dlm_lock *lock, *next; + + res->state |= DLM_LOCK_RES_RECOVERING; + if (!list_empty(&res->recovering)) { + mlog(0, + "Recovering res %s:%.*s, is already on recovery list!\n", + dlm->name, res->lockname.len, res->lockname.name); + list_del_init(&res->recovering); + dlm_lockres_put(res); + } + /* We need to hold a reference while on the recovery list */ + dlm_lockres_get(res); + list_add_tail(&res->recovering, &dlm->reco.resources); + + /* find any pending locks and put them back on proper list */ + for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { + queue = dlm_list_idx_to_ptr(res, i); + list_for_each_entry_safe(lock, next, queue, list) { + dlm_lock_get(lock); + if (lock->convert_pending) { + /* move converting lock back to granted */ + BUG_ON(i != DLM_CONVERTING_LIST); + mlog(0, "node died with convert pending " + "on %.*s. move back to granted list.\n", + res->lockname.len, res->lockname.name); + dlm_revert_pending_convert(res, lock); + lock->convert_pending = 0; + } else if (lock->lock_pending) { + /* remove pending lock requests completely */ + BUG_ON(i != DLM_BLOCKED_LIST); + mlog(0, "node died with lock pending " + "on %.*s. remove from blocked list and skip.\n", + res->lockname.len, res->lockname.name); + /* lock will be floating until ref in + * dlmlock_remote is freed after the network + * call returns. ok for it to not be on any + * list since no ast can be called + * (the master is dead). */ + dlm_revert_pending_lock(res, lock); + lock->lock_pending = 0; + } else if (lock->unlock_pending) { + /* if an unlock was in progress, treat as + * if this had completed successfully + * before sending this lock state to the + * new master. note that the dlm_unlock + * call is still responsible for calling + * the unlockast. that will happen after + * the network call times out. for now, + * just move lists to prepare the new + * recovery master. */ + BUG_ON(i != DLM_GRANTED_LIST); + mlog(0, "node died with unlock pending " + "on %.*s. remove from blocked list and skip.\n", + res->lockname.len, res->lockname.name); + dlm_commit_pending_unlock(res, lock); + lock->unlock_pending = 0; + } else if (lock->cancel_pending) { + /* if a cancel was in progress, treat as + * if this had completed successfully + * before sending this lock state to the + * new master */ + BUG_ON(i != DLM_CONVERTING_LIST); + mlog(0, "node died with cancel pending " + "on %.*s. move back to granted list.\n", + res->lockname.len, res->lockname.name); + dlm_commit_pending_cancel(res, lock); + lock->cancel_pending = 0; + } + dlm_lock_put(lock); + } + } +} + + + +/* removes all recovered locks from the recovery list. + * sets the res->owner to the new master. + * unsets the RECOVERY flag and wakes waiters. */ +static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, + u8 dead_node, u8 new_master) +{ + int i; + struct hlist_node *hash_iter; + struct hlist_head *bucket; + struct dlm_lock_resource *res, *next; + + mlog_entry_void(); + + assert_spin_locked(&dlm->spinlock); + + list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { + if (res->owner == dead_node) { + list_del_init(&res->recovering); + spin_lock(&res->spinlock); + /* new_master has our reference from + * the lock state sent during recovery */ + dlm_change_lockres_owner(dlm, res, new_master); + res->state &= ~DLM_LOCK_RES_RECOVERING; + if (__dlm_lockres_has_locks(res)) + __dlm_dirty_lockres(dlm, res); + spin_unlock(&res->spinlock); + wake_up(&res->wq); + dlm_lockres_put(res); + } + } + + /* this will become unnecessary eventually, but + * for now we need to run the whole hash, clear + * the RECOVERING state and set the owner + * if necessary */ + for (i = 0; i < DLM_HASH_BUCKETS; i++) { + bucket = dlm_lockres_hash(dlm, i); + hlist_for_each_entry(res, hash_iter, bucket, hash_node) { + if (res->state & DLM_LOCK_RES_RECOVERING) { + if (res->owner == dead_node) { + mlog(0, "(this=%u) res %.*s owner=%u " + "was not on recovering list, but " + "clearing state anyway\n", + dlm->node_num, res->lockname.len, + res->lockname.name, new_master); + } else if (res->owner == dlm->node_num) { + mlog(0, "(this=%u) res %.*s owner=%u " + "was not on recovering list, " + "owner is THIS node, clearing\n", + dlm->node_num, res->lockname.len, + res->lockname.name, new_master); + } else + continue; + + if (!list_empty(&res->recovering)) { + mlog(0, "%s:%.*s: lockres was " + "marked RECOVERING, owner=%u\n", + dlm->name, res->lockname.len, + res->lockname.name, res->owner); + list_del_init(&res->recovering); + dlm_lockres_put(res); + } + spin_lock(&res->spinlock); + /* new_master has our reference from + * the lock state sent during recovery */ + dlm_change_lockres_owner(dlm, res, new_master); + res->state &= ~DLM_LOCK_RES_RECOVERING; + if (__dlm_lockres_has_locks(res)) + __dlm_dirty_lockres(dlm, res); + spin_unlock(&res->spinlock); + wake_up(&res->wq); + } + } + } +} + +static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) +{ + if (local) { + if (lock->ml.type != LKM_EXMODE && + lock->ml.type != LKM_PRMODE) + return 1; + } else if (lock->ml.type == LKM_EXMODE) + return 1; + return 0; +} + +static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, u8 dead_node) +{ + struct list_head *queue; + struct dlm_lock *lock; + int blank_lvb = 0, local = 0; + int i; + u8 search_node; + + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&res->spinlock); + + if (res->owner == dlm->node_num) + /* if this node owned the lockres, and if the dead node + * had an EX when he died, blank out the lvb */ + search_node = dead_node; + else { + /* if this is a secondary lockres, and we had no EX or PR + * locks granted, we can no longer trust the lvb */ + search_node = dlm->node_num; + local = 1; /* check local state for valid lvb */ + } + + for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { + queue = dlm_list_idx_to_ptr(res, i); + list_for_each_entry(lock, queue, list) { + if (lock->ml.node == search_node) { + if (dlm_lvb_needs_invalidation(lock, local)) { + /* zero the lksb lvb and lockres lvb */ + blank_lvb = 1; + memset(lock->lksb->lvb, 0, DLM_LVB_LEN); + } + } + } + } + + if (blank_lvb) { + mlog(0, "clearing %.*s lvb, dead node %u had EX\n", + res->lockname.len, res->lockname.name, dead_node); + memset(res->lvb, 0, DLM_LVB_LEN); + } +} + +static void dlm_free_dead_locks(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, u8 dead_node) +{ + struct dlm_lock *lock, *next; + unsigned int freed = 0; + + /* this node is the lockres master: + * 1) remove any stale locks for the dead node + * 2) if the dead node had an EX when he died, blank out the lvb + */ + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&res->spinlock); + + /* We do two dlm_lock_put(). One for removing from list and the other is + * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ + + /* TODO: check pending_asts, pending_basts here */ + list_for_each_entry_safe(lock, next, &res->granted, list) { + if (lock->ml.node == dead_node) { + list_del_init(&lock->list); + dlm_lock_put(lock); + /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ + dlm_lock_put(lock); + freed++; + } + } + list_for_each_entry_safe(lock, next, &res->converting, list) { + if (lock->ml.node == dead_node) { + list_del_init(&lock->list); + dlm_lock_put(lock); + /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ + dlm_lock_put(lock); + freed++; + } + } + list_for_each_entry_safe(lock, next, &res->blocked, list) { + if (lock->ml.node == dead_node) { + list_del_init(&lock->list); + dlm_lock_put(lock); + /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ + dlm_lock_put(lock); + freed++; + } + } + + if (freed) { + mlog(0, "%s:%.*s: freed %u locks for dead node %u, " + "dropping ref from lockres\n", dlm->name, + res->lockname.len, res->lockname.name, freed, dead_node); + BUG_ON(!test_bit(dead_node, res->refmap)); + dlm_lockres_clear_refmap_bit(dead_node, res); + } else if (test_bit(dead_node, res->refmap)) { + mlog(0, "%s:%.*s: dead node %u had a ref, but had " + "no locks and had not purged before dying\n", dlm->name, + res->lockname.len, res->lockname.name, dead_node); + dlm_lockres_clear_refmap_bit(dead_node, res); + } + + /* do not kick thread yet */ + __dlm_dirty_lockres(dlm, res); +} + +/* if this node is the recovery master, and there are no + * locks for a given lockres owned by this node that are in + * either PR or EX mode, zero out the lvb before requesting. + * + */ + + +static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) +{ + struct hlist_node *iter; + struct dlm_lock_resource *res; + int i; + struct hlist_head *bucket; + struct dlm_lock *lock; + + + /* purge any stale mles */ + dlm_clean_master_list(dlm, dead_node); + + /* + * now clean up all lock resources. there are two rules: + * + * 1) if the dead node was the master, move the lockres + * to the recovering list. set the RECOVERING flag. + * this lockres needs to be cleaned up before it can + * be used further. + * + * 2) if this node was the master, remove all locks from + * each of the lockres queues that were owned by the + * dead node. once recovery finishes, the dlm thread + * can be kicked again to see if any ASTs or BASTs + * need to be fired as a result. + */ + for (i = 0; i < DLM_HASH_BUCKETS; i++) { + bucket = dlm_lockres_hash(dlm, i); + hlist_for_each_entry(res, iter, bucket, hash_node) { + /* always prune any $RECOVERY entries for dead nodes, + * otherwise hangs can occur during later recovery */ + if (dlm_is_recovery_lock(res->lockname.name, + res->lockname.len)) { + spin_lock(&res->spinlock); + list_for_each_entry(lock, &res->granted, list) { + if (lock->ml.node == dead_node) { + mlog(0, "AHA! there was " + "a $RECOVERY lock for dead " + "node %u (%s)!\n", + dead_node, dlm->name); + list_del_init(&lock->list); + dlm_lock_put(lock); + break; + } + } + spin_unlock(&res->spinlock); + continue; + } + spin_lock(&res->spinlock); + /* zero the lvb if necessary */ + dlm_revalidate_lvb(dlm, res, dead_node); + if (res->owner == dead_node) { + if (res->state & DLM_LOCK_RES_DROPPING_REF) + mlog(0, "%s:%.*s: owned by " + "dead node %u, this node was " + "dropping its ref when it died. " + "continue, dropping the flag.\n", + dlm->name, res->lockname.len, + res->lockname.name, dead_node); + + /* the wake_up for this will happen when the + * RECOVERING flag is dropped later */ + res->state &= ~DLM_LOCK_RES_DROPPING_REF; + + dlm_move_lockres_to_recovery_list(dlm, res); + } else if (res->owner == dlm->node_num) { + dlm_free_dead_locks(dlm, res, dead_node); + __dlm_lockres_calc_usage(dlm, res); + } + spin_unlock(&res->spinlock); + } + } + +} + +static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) +{ + assert_spin_locked(&dlm->spinlock); + + if (dlm->reco.new_master == idx) { + mlog(0, "%s: recovery master %d just died\n", + dlm->name, idx); + if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { + /* finalize1 was reached, so it is safe to clear + * the new_master and dead_node. that recovery + * is complete. */ + mlog(0, "%s: dead master %d had reached " + "finalize1 state, clearing\n", dlm->name, idx); + dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; + __dlm_reset_recovery(dlm); + } + } + + /* Clean up join state on node death. */ + if (dlm->joining_node == idx) { + mlog(0, "Clearing join state for node %u\n", idx); + __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); + } + + /* check to see if the node is already considered dead */ + if (!test_bit(idx, dlm->live_nodes_map)) { + mlog(0, "for domain %s, node %d is already dead. " + "another node likely did recovery already.\n", + dlm->name, idx); + return; + } + + /* check to see if we do not care about this node */ + if (!test_bit(idx, dlm->domain_map)) { + /* This also catches the case that we get a node down + * but haven't joined the domain yet. */ + mlog(0, "node %u already removed from domain!\n", idx); + return; + } + + clear_bit(idx, dlm->live_nodes_map); + + /* make sure local cleanup occurs before the heartbeat events */ + if (!test_bit(idx, dlm->recovery_map)) + dlm_do_local_recovery_cleanup(dlm, idx); + + /* notify anything attached to the heartbeat events */ + dlm_hb_event_notify_attached(dlm, idx, 0); + + mlog(0, "node %u being removed from domain map!\n", idx); + clear_bit(idx, dlm->domain_map); + /* wake up migration waiters if a node goes down. + * perhaps later we can genericize this for other waiters. */ + wake_up(&dlm->migration_wq); + + if (test_bit(idx, dlm->recovery_map)) + mlog(0, "domain %s, node %u already added " + "to recovery map!\n", dlm->name, idx); + else + set_bit(idx, dlm->recovery_map); +} + +void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) +{ + struct dlm_ctxt *dlm = data; + + if (!dlm_grab(dlm)) + return; + + /* + * This will notify any dlm users that a node in our domain + * went away without notifying us first. + */ + if (test_bit(idx, dlm->domain_map)) + dlm_fire_domain_eviction_callbacks(dlm, idx); + + spin_lock(&dlm->spinlock); + __dlm_hb_node_down(dlm, idx); + spin_unlock(&dlm->spinlock); + + dlm_put(dlm); +} + +void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) +{ + struct dlm_ctxt *dlm = data; + + if (!dlm_grab(dlm)) + return; + + spin_lock(&dlm->spinlock); + set_bit(idx, dlm->live_nodes_map); + /* do NOT notify mle attached to the heartbeat events. + * new nodes are not interesting in mastery until joined. */ + spin_unlock(&dlm->spinlock); + + dlm_put(dlm); +} + +static void dlm_reco_ast(void *astdata) +{ + struct dlm_ctxt *dlm = astdata; + mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", + dlm->node_num, dlm->name); +} +static void dlm_reco_bast(void *astdata, int blocked_type) +{ + struct dlm_ctxt *dlm = astdata; + mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", + dlm->node_num, dlm->name); +} +static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) +{ + mlog(0, "unlockast for recovery lock fired!\n"); +} + +/* + * dlm_pick_recovery_master will continually attempt to use + * dlmlock() on the special "$RECOVERY" lockres with the + * LKM_NOQUEUE flag to get an EX. every thread that enters + * this function on each node racing to become the recovery + * master will not stop attempting this until either: + * a) this node gets the EX (and becomes the recovery master), + * or b) dlm->reco.new_master gets set to some nodenum + * != O2NM_INVALID_NODE_NUM (another node will do the reco). + * so each time a recovery master is needed, the entire cluster + * will sync at this point. if the new master dies, that will + * be detected in dlm_do_recovery */ +static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) +{ + enum dlm_status ret; + struct dlm_lockstatus lksb; + int status = -EINVAL; + + mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", + dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); +again: + memset(&lksb, 0, sizeof(lksb)); + + ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, + DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, + dlm_reco_ast, dlm, dlm_reco_bast); + + mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", + dlm->name, ret, lksb.status); + + if (ret == DLM_NORMAL) { + mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", + dlm->name, dlm->node_num); + + /* got the EX lock. check to see if another node + * just became the reco master */ + if (dlm_reco_master_ready(dlm)) { + mlog(0, "%s: got reco EX lock, but %u will " + "do the recovery\n", dlm->name, + dlm->reco.new_master); + status = -EEXIST; + } else { + status = 0; + + /* see if recovery was already finished elsewhere */ + spin_lock(&dlm->spinlock); + if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { + status = -EINVAL; + mlog(0, "%s: got reco EX lock, but " + "node got recovered already\n", dlm->name); + if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { + mlog(ML_ERROR, "%s: new master is %u " + "but no dead node!\n", + dlm->name, dlm->reco.new_master); + BUG(); + } + } + spin_unlock(&dlm->spinlock); + } + + /* if this node has actually become the recovery master, + * set the master and send the messages to begin recovery */ + if (!status) { + mlog(0, "%s: dead=%u, this=%u, sending " + "begin_reco now\n", dlm->name, + dlm->reco.dead_node, dlm->node_num); + status = dlm_send_begin_reco_message(dlm, + dlm->reco.dead_node); + /* this always succeeds */ + BUG_ON(status); + + /* set the new_master to this node */ + spin_lock(&dlm->spinlock); + dlm_set_reco_master(dlm, dlm->node_num); + spin_unlock(&dlm->spinlock); + } + + /* recovery lock is a special case. ast will not get fired, + * so just go ahead and unlock it. */ + ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); + if (ret == DLM_DENIED) { + mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); + ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); + } + if (ret != DLM_NORMAL) { + /* this would really suck. this could only happen + * if there was a network error during the unlock + * because of node death. this means the unlock + * is actually "done" and the lock structure is + * even freed. we can continue, but only + * because this specific lock name is special. */ + mlog(ML_ERROR, "dlmunlock returned %d\n", ret); + } + } else if (ret == DLM_NOTQUEUED) { + mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", + dlm->name, dlm->node_num); + /* another node is master. wait on + * reco.new_master != O2NM_INVALID_NODE_NUM + * for at most one second */ + wait_event_timeout(dlm->dlm_reco_thread_wq, + dlm_reco_master_ready(dlm), + msecs_to_jiffies(1000)); + if (!dlm_reco_master_ready(dlm)) { + mlog(0, "%s: reco master taking awhile\n", + dlm->name); + goto again; + } + /* another node has informed this one that it is reco master */ + mlog(0, "%s: reco master %u is ready to recover %u\n", + dlm->name, dlm->reco.new_master, dlm->reco.dead_node); + status = -EEXIST; + } else if (ret == DLM_RECOVERING) { + mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", + dlm->name, dlm->node_num); + goto again; + } else { + struct dlm_lock_resource *res; + + /* dlmlock returned something other than NOTQUEUED or NORMAL */ + mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " + "lksb.status=%s\n", dlm->name, dlm_errname(ret), + dlm_errname(lksb.status)); + res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, + DLM_RECOVERY_LOCK_NAME_LEN); + if (res) { + dlm_print_one_lock_resource(res); + dlm_lockres_put(res); + } else { + mlog(ML_ERROR, "recovery lock not found\n"); + } + BUG(); + } + + return status; +} + +static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) +{ + struct dlm_begin_reco br; + int ret = 0; + struct dlm_node_iter iter; + int nodenum; + int status; + + mlog_entry("%u\n", dead_node); + + mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); + + spin_lock(&dlm->spinlock); + dlm_node_iter_init(dlm->domain_map, &iter); + spin_unlock(&dlm->spinlock); + + clear_bit(dead_node, iter.node_map); + + memset(&br, 0, sizeof(br)); + br.node_idx = dlm->node_num; + br.dead_node = dead_node; + + while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { + ret = 0; + if (nodenum == dead_node) { + mlog(0, "not sending begin reco to dead node " + "%u\n", dead_node); + continue; + } + if (nodenum == dlm->node_num) { + mlog(0, "not sending begin reco to self\n"); + continue; + } +retry: + ret = -EINVAL; + mlog(0, "attempting to send begin reco msg to %d\n", + nodenum); + ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, + &br, sizeof(br), nodenum, &status); + /* negative status is handled ok by caller here */ + if (ret >= 0) + ret = status; + if (dlm_is_host_down(ret)) { + /* node is down. not involved in recovery + * so just keep going */ + mlog(0, "%s: node %u was down when sending " + "begin reco msg (%d)\n", dlm->name, nodenum, ret); + ret = 0; + } + if (ret < 0) { + struct dlm_lock_resource *res; + /* this is now a serious problem, possibly ENOMEM + * in the network stack. must retry */ + mlog_errno(ret); + mlog(ML_ERROR, "begin reco of dlm %s to node %u " + " returned %d\n", dlm->name, nodenum, ret); + res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, + DLM_RECOVERY_LOCK_NAME_LEN); + if (res) { + dlm_print_one_lock_resource(res); + dlm_lockres_put(res); + } else { + mlog(ML_ERROR, "recovery lock not found\n"); + } + /* sleep for a bit in hopes that we can avoid + * another ENOMEM */ + msleep(100); + goto retry; + } else if (ret == EAGAIN) { + mlog(0, "%s: trying to start recovery of node " + "%u, but node %u is waiting for last recovery " + "to complete, backoff for a bit\n", dlm->name, + dead_node, nodenum); + /* TODO Look into replacing msleep with cond_resched() */ + msleep(100); + goto retry; + } + } + + return ret; +} + +int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; + + /* ok to return 0, domain has gone away */ + if (!dlm_grab(dlm)) + return 0; + + spin_lock(&dlm->spinlock); + if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { + mlog(0, "%s: node %u wants to recover node %u (%u:%u) " + "but this node is in finalize state, waiting on finalize2\n", + dlm->name, br->node_idx, br->dead_node, + dlm->reco.dead_node, dlm->reco.new_master); + spin_unlock(&dlm->spinlock); + return EAGAIN; + } + spin_unlock(&dlm->spinlock); + + mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", + dlm->name, br->node_idx, br->dead_node, + dlm->reco.dead_node, dlm->reco.new_master); + + dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); + + spin_lock(&dlm->spinlock); + if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { + if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { + mlog(0, "%s: new_master %u died, changing " + "to %u\n", dlm->name, dlm->reco.new_master, + br->node_idx); + } else { + mlog(0, "%s: new_master %u NOT DEAD, changing " + "to %u\n", dlm->name, dlm->reco.new_master, + br->node_idx); + /* may not have seen the new master as dead yet */ + } + } + if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { + mlog(ML_NOTICE, "%s: dead_node previously set to %u, " + "node %u changing it to %u\n", dlm->name, + dlm->reco.dead_node, br->node_idx, br->dead_node); + } + dlm_set_reco_master(dlm, br->node_idx); + dlm_set_reco_dead_node(dlm, br->dead_node); + if (!test_bit(br->dead_node, dlm->recovery_map)) { + mlog(0, "recovery master %u sees %u as dead, but this " + "node has not yet. marking %u as dead\n", + br->node_idx, br->dead_node, br->dead_node); + if (!test_bit(br->dead_node, dlm->domain_map) || + !test_bit(br->dead_node, dlm->live_nodes_map)) + mlog(0, "%u not in domain/live_nodes map " + "so setting it in reco map manually\n", + br->dead_node); + /* force the recovery cleanup in __dlm_hb_node_down + * both of these will be cleared in a moment */ + set_bit(br->dead_node, dlm->domain_map); + set_bit(br->dead_node, dlm->live_nodes_map); + __dlm_hb_node_down(dlm, br->dead_node); + } + spin_unlock(&dlm->spinlock); + + dlm_kick_recovery_thread(dlm); + + mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", + dlm->name, br->node_idx, br->dead_node, + dlm->reco.dead_node, dlm->reco.new_master); + + dlm_put(dlm); + return 0; +} + +#define DLM_FINALIZE_STAGE2 0x01 +static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) +{ + int ret = 0; + struct dlm_finalize_reco fr; + struct dlm_node_iter iter; + int nodenum; + int status; + int stage = 1; + + mlog(0, "finishing recovery for node %s:%u, " + "stage %d\n", dlm->name, dlm->reco.dead_node, stage); + + spin_lock(&dlm->spinlock); + dlm_node_iter_init(dlm->domain_map, &iter); + spin_unlock(&dlm->spinlock); + +stage2: + memset(&fr, 0, sizeof(fr)); + fr.node_idx = dlm->node_num; + fr.dead_node = dlm->reco.dead_node; + if (stage == 2) + fr.flags |= DLM_FINALIZE_STAGE2; + + while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { + if (nodenum == dlm->node_num) + continue; + ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, + &fr, sizeof(fr), nodenum, &status); + if (ret >= 0) + ret = status; + if (ret < 0) { + mlog_errno(ret); + if (dlm_is_host_down(ret)) { + /* this has no effect on this recovery + * session, so set the status to zero to + * finish out the last recovery */ + mlog(ML_ERROR, "node %u went down after this " + "node finished recovery.\n", nodenum); + ret = 0; + continue; + } + break; + } + } + if (stage == 1) { + /* reset the node_iter back to the top and send finalize2 */ + iter.curnode = -1; + stage = 2; + goto stage2; + } + + return ret; +} + +int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; + int stage = 1; + + /* ok to return 0, domain has gone away */ + if (!dlm_grab(dlm)) + return 0; + + if (fr->flags & DLM_FINALIZE_STAGE2) + stage = 2; + + mlog(0, "%s: node %u finalizing recovery stage%d of " + "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, + fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); + + spin_lock(&dlm->spinlock); + + if (dlm->reco.new_master != fr->node_idx) { + mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " + "%u is supposed to be the new master, dead=%u\n", + fr->node_idx, dlm->reco.new_master, fr->dead_node); + BUG(); + } + if (dlm->reco.dead_node != fr->dead_node) { + mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " + "node %u, but node %u is supposed to be dead\n", + fr->node_idx, fr->dead_node, dlm->reco.dead_node); + BUG(); + } + + switch (stage) { + case 1: + dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); + if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { + mlog(ML_ERROR, "%s: received finalize1 from " + "new master %u for dead node %u, but " + "this node has already received it!\n", + dlm->name, fr->node_idx, fr->dead_node); + dlm_print_reco_node_status(dlm); + BUG(); + } + dlm->reco.state |= DLM_RECO_STATE_FINALIZE; + spin_unlock(&dlm->spinlock); + break; + case 2: + if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { + mlog(ML_ERROR, "%s: received finalize2 from " + "new master %u for dead node %u, but " + "this node did not have finalize1!\n", + dlm->name, fr->node_idx, fr->dead_node); + dlm_print_reco_node_status(dlm); + BUG(); + } + dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; + spin_unlock(&dlm->spinlock); + dlm_reset_recovery(dlm); + dlm_kick_recovery_thread(dlm); + break; + default: + BUG(); + } + + mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", + dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); + + dlm_put(dlm); + return 0; +} diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c new file mode 100644 index 0000000..4060bb3 --- /dev/null +++ b/fs/ocfs2/dlm/dlmthread.c @@ -0,0 +1,740 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmthread.c + * + * standalone DLM module + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/sysctl.h> +#include <linux/random.h> +#include <linux/blkdev.h> +#include <linux/socket.h> +#include <linux/inet.h> +#include <linux/timer.h> +#include <linux/kthread.h> +#include <linux/delay.h> + + +#include "cluster/heartbeat.h" +#include "cluster/nodemanager.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" +#include "dlmcommon.h" +#include "dlmdomain.h" + +#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD) +#include "cluster/masklog.h" + +static int dlm_thread(void *data); +static void dlm_flush_asts(struct dlm_ctxt *dlm); + +#define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num) + +/* will exit holding res->spinlock, but may drop in function */ +/* waits until flags are cleared on res->state */ +void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags) +{ + DECLARE_WAITQUEUE(wait, current); + + assert_spin_locked(&res->spinlock); + + add_wait_queue(&res->wq, &wait); +repeat: + set_current_state(TASK_UNINTERRUPTIBLE); + if (res->state & flags) { + spin_unlock(&res->spinlock); + schedule(); + spin_lock(&res->spinlock); + goto repeat; + } + remove_wait_queue(&res->wq, &wait); + __set_current_state(TASK_RUNNING); +} + +int __dlm_lockres_has_locks(struct dlm_lock_resource *res) +{ + if (list_empty(&res->granted) && + list_empty(&res->converting) && + list_empty(&res->blocked)) + return 0; + return 1; +} + +/* "unused": the lockres has no locks, is not on the dirty list, + * has no inflight locks (in the gap between mastery and acquiring + * the first lock), and has no bits in its refmap. + * truly ready to be freed. */ +int __dlm_lockres_unused(struct dlm_lock_resource *res) +{ + if (!__dlm_lockres_has_locks(res) && + (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) { + /* try not to scan the bitmap unless the first two + * conditions are already true */ + int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); + if (bit >= O2NM_MAX_NODES) { + /* since the bit for dlm->node_num is not + * set, inflight_locks better be zero */ + BUG_ON(res->inflight_locks != 0); + return 1; + } + } + return 0; +} + + +/* Call whenever you may have added or deleted something from one of + * the lockres queue's. This will figure out whether it belongs on the + * unused list or not and does the appropriate thing. */ +void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); + + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&res->spinlock); + + if (__dlm_lockres_unused(res)){ + if (list_empty(&res->purge)) { + mlog(0, "putting lockres %.*s:%p onto purge list\n", + res->lockname.len, res->lockname.name, res); + + res->last_used = jiffies; + dlm_lockres_get(res); + list_add_tail(&res->purge, &dlm->purge_list); + dlm->purge_count++; + } + } else if (!list_empty(&res->purge)) { + mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n", + res->lockname.len, res->lockname.name, res, res->owner); + + list_del_init(&res->purge); + dlm_lockres_put(res); + dlm->purge_count--; + } +} + +void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); + spin_lock(&dlm->spinlock); + spin_lock(&res->spinlock); + + __dlm_lockres_calc_usage(dlm, res); + + spin_unlock(&res->spinlock); + spin_unlock(&dlm->spinlock); +} + +static int dlm_purge_lockres(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + int master; + int ret = 0; + + spin_lock(&res->spinlock); + if (!__dlm_lockres_unused(res)) { + spin_unlock(&res->spinlock); + mlog(0, "%s:%.*s: tried to purge but not unused\n", + dlm->name, res->lockname.len, res->lockname.name); + return -ENOTEMPTY; + } + master = (res->owner == dlm->node_num); + if (!master) + res->state |= DLM_LOCK_RES_DROPPING_REF; + spin_unlock(&res->spinlock); + + mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, + res->lockname.name, master); + + if (!master) { + /* drop spinlock... retake below */ + spin_unlock(&dlm->spinlock); + + spin_lock(&res->spinlock); + /* This ensures that clear refmap is sent after the set */ + __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); + spin_unlock(&res->spinlock); + + /* clear our bit from the master's refmap, ignore errors */ + ret = dlm_drop_lockres_ref(dlm, res); + if (ret < 0) { + mlog_errno(ret); + if (!dlm_is_host_down(ret)) + BUG(); + } + mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", + dlm->name, res->lockname.len, res->lockname.name, ret); + spin_lock(&dlm->spinlock); + } + + if (!list_empty(&res->purge)) { + mlog(0, "removing lockres %.*s:%p from purgelist, " + "master = %d\n", res->lockname.len, res->lockname.name, + res, master); + list_del_init(&res->purge); + dlm_lockres_put(res); + dlm->purge_count--; + } + __dlm_unhash_lockres(res); + + /* lockres is not in the hash now. drop the flag and wake up + * any processes waiting in dlm_get_lock_resource. */ + if (!master) { + spin_lock(&res->spinlock); + res->state &= ~DLM_LOCK_RES_DROPPING_REF; + spin_unlock(&res->spinlock); + wake_up(&res->wq); + } + return 0; +} + +static void dlm_run_purge_list(struct dlm_ctxt *dlm, + int purge_now) +{ + unsigned int run_max, unused; + unsigned long purge_jiffies; + struct dlm_lock_resource *lockres; + + spin_lock(&dlm->spinlock); + run_max = dlm->purge_count; + + while(run_max && !list_empty(&dlm->purge_list)) { + run_max--; + + lockres = list_entry(dlm->purge_list.next, + struct dlm_lock_resource, purge); + + /* Status of the lockres *might* change so double + * check. If the lockres is unused, holding the dlm + * spinlock will prevent people from getting and more + * refs on it -- there's no need to keep the lockres + * spinlock. */ + spin_lock(&lockres->spinlock); + unused = __dlm_lockres_unused(lockres); + spin_unlock(&lockres->spinlock); + + if (!unused) + continue; + + purge_jiffies = lockres->last_used + + msecs_to_jiffies(DLM_PURGE_INTERVAL_MS); + + /* Make sure that we want to be processing this guy at + * this time. */ + if (!purge_now && time_after(purge_jiffies, jiffies)) { + /* Since resources are added to the purge list + * in tail order, we can stop at the first + * unpurgable resource -- anyone added after + * him will have a greater last_used value */ + break; + } + + dlm_lockres_get(lockres); + + /* This may drop and reacquire the dlm spinlock if it + * has to do migration. */ + if (dlm_purge_lockres(dlm, lockres)) + BUG(); + + dlm_lockres_put(lockres); + + /* Avoid adding any scheduling latencies */ + cond_resched_lock(&dlm->spinlock); + } + + spin_unlock(&dlm->spinlock); +} + +static void dlm_shuffle_lists(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + struct dlm_lock *lock, *target; + struct list_head *iter; + struct list_head *head; + int can_grant = 1; + + //mlog(0, "res->lockname.len=%d\n", res->lockname.len); + //mlog(0, "res->lockname.name=%p\n", res->lockname.name); + //mlog(0, "shuffle res %.*s\n", res->lockname.len, + // res->lockname.name); + + /* because this function is called with the lockres + * spinlock, and because we know that it is not migrating/ + * recovering/in-progress, it is fine to reserve asts and + * basts right before queueing them all throughout */ + assert_spin_locked(&res->spinlock); + BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| + DLM_LOCK_RES_RECOVERING| + DLM_LOCK_RES_IN_PROGRESS))); + +converting: + if (list_empty(&res->converting)) + goto blocked; + mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len, + res->lockname.name); + + target = list_entry(res->converting.next, struct dlm_lock, list); + if (target->ml.convert_type == LKM_IVMODE) { + mlog(ML_ERROR, "%.*s: converting a lock with no " + "convert_type!\n", res->lockname.len, res->lockname.name); + BUG(); + } + head = &res->granted; + list_for_each(iter, head) { + lock = list_entry(iter, struct dlm_lock, list); + if (lock==target) + continue; + if (!dlm_lock_compatible(lock->ml.type, + target->ml.convert_type)) { + can_grant = 0; + /* queue the BAST if not already */ + if (lock->ml.highest_blocked == LKM_IVMODE) { + __dlm_lockres_reserve_ast(res); + dlm_queue_bast(dlm, lock); + } + /* update the highest_blocked if needed */ + if (lock->ml.highest_blocked < target->ml.convert_type) + lock->ml.highest_blocked = + target->ml.convert_type; + } + } + head = &res->converting; + list_for_each(iter, head) { + lock = list_entry(iter, struct dlm_lock, list); + if (lock==target) + continue; + if (!dlm_lock_compatible(lock->ml.type, + target->ml.convert_type)) { + can_grant = 0; + if (lock->ml.highest_blocked == LKM_IVMODE) { + __dlm_lockres_reserve_ast(res); + dlm_queue_bast(dlm, lock); + } + if (lock->ml.highest_blocked < target->ml.convert_type) + lock->ml.highest_blocked = + target->ml.convert_type; + } + } + + /* we can convert the lock */ + if (can_grant) { + spin_lock(&target->spinlock); + BUG_ON(target->ml.highest_blocked != LKM_IVMODE); + + mlog(0, "calling ast for converting lock: %.*s, have: %d, " + "granting: %d, node: %u\n", res->lockname.len, + res->lockname.name, target->ml.type, + target->ml.convert_type, target->ml.node); + + target->ml.type = target->ml.convert_type; + target->ml.convert_type = LKM_IVMODE; + list_move_tail(&target->list, &res->granted); + + BUG_ON(!target->lksb); + target->lksb->status = DLM_NORMAL; + + spin_unlock(&target->spinlock); + + __dlm_lockres_reserve_ast(res); + dlm_queue_ast(dlm, target); + /* go back and check for more */ + goto converting; + } + +blocked: + if (list_empty(&res->blocked)) + goto leave; + target = list_entry(res->blocked.next, struct dlm_lock, list); + + head = &res->granted; + list_for_each(iter, head) { + lock = list_entry(iter, struct dlm_lock, list); + if (lock==target) + continue; + if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { + can_grant = 0; + if (lock->ml.highest_blocked == LKM_IVMODE) { + __dlm_lockres_reserve_ast(res); + dlm_queue_bast(dlm, lock); + } + if (lock->ml.highest_blocked < target->ml.type) + lock->ml.highest_blocked = target->ml.type; + } + } + + head = &res->converting; + list_for_each(iter, head) { + lock = list_entry(iter, struct dlm_lock, list); + if (lock==target) + continue; + if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { + can_grant = 0; + if (lock->ml.highest_blocked == LKM_IVMODE) { + __dlm_lockres_reserve_ast(res); + dlm_queue_bast(dlm, lock); + } + if (lock->ml.highest_blocked < target->ml.type) + lock->ml.highest_blocked = target->ml.type; + } + } + + /* we can grant the blocked lock (only + * possible if converting list empty) */ + if (can_grant) { + spin_lock(&target->spinlock); + BUG_ON(target->ml.highest_blocked != LKM_IVMODE); + + mlog(0, "calling ast for blocked lock: %.*s, granting: %d, " + "node: %u\n", res->lockname.len, res->lockname.name, + target->ml.type, target->ml.node); + + // target->ml.type is already correct + list_move_tail(&target->list, &res->granted); + + BUG_ON(!target->lksb); + target->lksb->status = DLM_NORMAL; + + spin_unlock(&target->spinlock); + + __dlm_lockres_reserve_ast(res); + dlm_queue_ast(dlm, target); + /* go back and check for more */ + goto converting; + } + +leave: + return; +} + +/* must have NO locks when calling this with res !=NULL * */ +void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) +{ + mlog_entry("dlm=%p, res=%p\n", dlm, res); + if (res) { + spin_lock(&dlm->spinlock); + spin_lock(&res->spinlock); + __dlm_dirty_lockres(dlm, res); + spin_unlock(&res->spinlock); + spin_unlock(&dlm->spinlock); + } + wake_up(&dlm->dlm_thread_wq); +} + +void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) +{ + mlog_entry("dlm=%p, res=%p\n", dlm, res); + + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&res->spinlock); + + /* don't shuffle secondary queues */ + if ((res->owner == dlm->node_num)) { + if (res->state & (DLM_LOCK_RES_MIGRATING | + DLM_LOCK_RES_BLOCK_DIRTY)) + return; + + if (list_empty(&res->dirty)) { + /* ref for dirty_list */ + dlm_lockres_get(res); + list_add_tail(&res->dirty, &dlm->dirty_list); + res->state |= DLM_LOCK_RES_DIRTY; + } + } +} + + +/* Launch the NM thread for the mounted volume */ +int dlm_launch_thread(struct dlm_ctxt *dlm) +{ + mlog(0, "starting dlm thread...\n"); + + dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); + if (IS_ERR(dlm->dlm_thread_task)) { + mlog_errno(PTR_ERR(dlm->dlm_thread_task)); + dlm->dlm_thread_task = NULL; + return -EINVAL; + } + + return 0; +} + +void dlm_complete_thread(struct dlm_ctxt *dlm) +{ + if (dlm->dlm_thread_task) { + mlog(ML_KTHREAD, "waiting for dlm thread to exit\n"); + kthread_stop(dlm->dlm_thread_task); + dlm->dlm_thread_task = NULL; + } +} + +static int dlm_dirty_list_empty(struct dlm_ctxt *dlm) +{ + int empty; + + spin_lock(&dlm->spinlock); + empty = list_empty(&dlm->dirty_list); + spin_unlock(&dlm->spinlock); + + return empty; +} + +static void dlm_flush_asts(struct dlm_ctxt *dlm) +{ + int ret; + struct dlm_lock *lock; + struct dlm_lock_resource *res; + u8 hi; + + spin_lock(&dlm->ast_lock); + while (!list_empty(&dlm->pending_asts)) { + lock = list_entry(dlm->pending_asts.next, + struct dlm_lock, ast_list); + /* get an extra ref on lock */ + dlm_lock_get(lock); + res = lock->lockres; + mlog(0, "delivering an ast for this lockres\n"); + + BUG_ON(!lock->ast_pending); + + /* remove from list (including ref) */ + list_del_init(&lock->ast_list); + dlm_lock_put(lock); + spin_unlock(&dlm->ast_lock); + + if (lock->ml.node != dlm->node_num) { + ret = dlm_do_remote_ast(dlm, res, lock); + if (ret < 0) + mlog_errno(ret); + } else + dlm_do_local_ast(dlm, res, lock); + + spin_lock(&dlm->ast_lock); + + /* possible that another ast was queued while + * we were delivering the last one */ + if (!list_empty(&lock->ast_list)) { + mlog(0, "aha another ast got queued while " + "we were finishing the last one. will " + "keep the ast_pending flag set.\n"); + } else + lock->ast_pending = 0; + + /* drop the extra ref. + * this may drop it completely. */ + dlm_lock_put(lock); + dlm_lockres_release_ast(dlm, res); + } + + while (!list_empty(&dlm->pending_basts)) { + lock = list_entry(dlm->pending_basts.next, + struct dlm_lock, bast_list); + /* get an extra ref on lock */ + dlm_lock_get(lock); + res = lock->lockres; + + BUG_ON(!lock->bast_pending); + + /* get the highest blocked lock, and reset */ + spin_lock(&lock->spinlock); + BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE); + hi = lock->ml.highest_blocked; + lock->ml.highest_blocked = LKM_IVMODE; + spin_unlock(&lock->spinlock); + + /* remove from list (including ref) */ + list_del_init(&lock->bast_list); + dlm_lock_put(lock); + spin_unlock(&dlm->ast_lock); + + mlog(0, "delivering a bast for this lockres " + "(blocked = %d\n", hi); + + if (lock->ml.node != dlm->node_num) { + ret = dlm_send_proxy_bast(dlm, res, lock, hi); + if (ret < 0) + mlog_errno(ret); + } else + dlm_do_local_bast(dlm, res, lock, hi); + + spin_lock(&dlm->ast_lock); + + /* possible that another bast was queued while + * we were delivering the last one */ + if (!list_empty(&lock->bast_list)) { + mlog(0, "aha another bast got queued while " + "we were finishing the last one. will " + "keep the bast_pending flag set.\n"); + } else + lock->bast_pending = 0; + + /* drop the extra ref. + * this may drop it completely. */ + dlm_lock_put(lock); + dlm_lockres_release_ast(dlm, res); + } + wake_up(&dlm->ast_wq); + spin_unlock(&dlm->ast_lock); +} + + +#define DLM_THREAD_TIMEOUT_MS (4 * 1000) +#define DLM_THREAD_MAX_DIRTY 100 +#define DLM_THREAD_MAX_ASTS 10 + +static int dlm_thread(void *data) +{ + struct dlm_lock_resource *res; + struct dlm_ctxt *dlm = data; + unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS); + + mlog(0, "dlm thread running for %s...\n", dlm->name); + + while (!kthread_should_stop()) { + int n = DLM_THREAD_MAX_DIRTY; + + /* dlm_shutting_down is very point-in-time, but that + * doesn't matter as we'll just loop back around if we + * get false on the leading edge of a state + * transition. */ + dlm_run_purge_list(dlm, dlm_shutting_down(dlm)); + + /* We really don't want to hold dlm->spinlock while + * calling dlm_shuffle_lists on each lockres that + * needs to have its queues adjusted and AST/BASTs + * run. So let's pull each entry off the dirty_list + * and drop dlm->spinlock ASAP. Once off the list, + * res->spinlock needs to be taken again to protect + * the queues while calling dlm_shuffle_lists. */ + spin_lock(&dlm->spinlock); + while (!list_empty(&dlm->dirty_list)) { + int delay = 0; + res = list_entry(dlm->dirty_list.next, + struct dlm_lock_resource, dirty); + + /* peel a lockres off, remove it from the list, + * unset the dirty flag and drop the dlm lock */ + BUG_ON(!res); + dlm_lockres_get(res); + + spin_lock(&res->spinlock); + /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */ + list_del_init(&res->dirty); + spin_unlock(&res->spinlock); + spin_unlock(&dlm->spinlock); + /* Drop dirty_list ref */ + dlm_lockres_put(res); + + /* lockres can be re-dirtied/re-added to the + * dirty_list in this gap, but that is ok */ + + spin_lock(&res->spinlock); + if (res->owner != dlm->node_num) { + __dlm_print_one_lock_resource(res); + mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n", + res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no", + res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no", + res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no", + res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); + } + BUG_ON(res->owner != dlm->node_num); + + /* it is now ok to move lockreses in these states + * to the dirty list, assuming that they will only be + * dirty for a short while. */ + BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); + if (res->state & (DLM_LOCK_RES_IN_PROGRESS | + DLM_LOCK_RES_RECOVERING)) { + /* move it to the tail and keep going */ + res->state &= ~DLM_LOCK_RES_DIRTY; + spin_unlock(&res->spinlock); + mlog(0, "delaying list shuffling for in-" + "progress lockres %.*s, state=%d\n", + res->lockname.len, res->lockname.name, + res->state); + delay = 1; + goto in_progress; + } + + /* at this point the lockres is not migrating/ + * recovering/in-progress. we have the lockres + * spinlock and do NOT have the dlm lock. + * safe to reserve/queue asts and run the lists. */ + + mlog(0, "calling dlm_shuffle_lists with dlm=%s, " + "res=%.*s\n", dlm->name, + res->lockname.len, res->lockname.name); + + /* called while holding lockres lock */ + dlm_shuffle_lists(dlm, res); + res->state &= ~DLM_LOCK_RES_DIRTY; + spin_unlock(&res->spinlock); + + dlm_lockres_calc_usage(dlm, res); + +in_progress: + + spin_lock(&dlm->spinlock); + /* if the lock was in-progress, stick + * it on the back of the list */ + if (delay) { + spin_lock(&res->spinlock); + __dlm_dirty_lockres(dlm, res); + spin_unlock(&res->spinlock); + } + dlm_lockres_put(res); + + /* unlikely, but we may need to give time to + * other tasks */ + if (!--n) { + mlog(0, "throttling dlm_thread\n"); + break; + } + } + + spin_unlock(&dlm->spinlock); + dlm_flush_asts(dlm); + + /* yield and continue right away if there is more work to do */ + if (!n) { + cond_resched(); + continue; + } + + wait_event_interruptible_timeout(dlm->dlm_thread_wq, + !dlm_dirty_list_empty(dlm) || + kthread_should_stop(), + timeout); + } + + mlog(0, "quitting DLM thread\n"); + return 0; +} diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c new file mode 100644 index 0000000..86ca085 --- /dev/null +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -0,0 +1,695 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmunlock.c + * + * underlying calls for unlocking locks + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + */ + + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/sysctl.h> +#include <linux/random.h> +#include <linux/blkdev.h> +#include <linux/socket.h> +#include <linux/inet.h> +#include <linux/spinlock.h> +#include <linux/delay.h> + +#include "cluster/heartbeat.h" +#include "cluster/nodemanager.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" +#include "dlmcommon.h" + +#define MLOG_MASK_PREFIX ML_DLM +#include "cluster/masklog.h" + +#define DLM_UNLOCK_FREE_LOCK 0x00000001 +#define DLM_UNLOCK_CALL_AST 0x00000002 +#define DLM_UNLOCK_REMOVE_LOCK 0x00000004 +#define DLM_UNLOCK_REGRANT_LOCK 0x00000008 +#define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010 + + +static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + struct dlm_lockstatus *lksb, + int *actions); +static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + struct dlm_lockstatus *lksb, + int *actions); + +static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + struct dlm_lockstatus *lksb, + int flags, + u8 owner); + + +/* + * according to the spec: + * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf + * + * flags & LKM_CANCEL != 0: must be converting or blocked + * flags & LKM_CANCEL == 0: must be granted + * + * So to unlock a converting lock, you must first cancel the + * convert (passing LKM_CANCEL in flags), then call the unlock + * again (with no LKM_CANCEL in flags). + */ + + +/* + * locking: + * caller needs: none + * taken: res->spinlock and lock->spinlock taken and dropped + * held on exit: none + * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network + * all callers should have taken an extra ref on lock coming in + */ +static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + struct dlm_lockstatus *lksb, + int flags, int *call_ast, + int master_node) +{ + enum dlm_status status; + int actions = 0; + int in_use; + u8 owner; + + mlog(0, "master_node = %d, valblk = %d\n", master_node, + flags & LKM_VALBLK); + + if (master_node) + BUG_ON(res->owner != dlm->node_num); + else + BUG_ON(res->owner == dlm->node_num); + + spin_lock(&dlm->spinlock); + /* We want to be sure that we're not freeing a lock + * that still has AST's pending... */ + in_use = !list_empty(&lock->ast_list); + spin_unlock(&dlm->spinlock); + if (in_use) { + mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " + "while waiting for an ast!", res->lockname.len, + res->lockname.name); + return DLM_BADPARAM; + } + + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_IN_PROGRESS) { + if (master_node) { + mlog(ML_ERROR, "lockres in progress!\n"); + spin_unlock(&res->spinlock); + return DLM_FORWARD; + } + /* ok for this to sleep if not in a network handler */ + __dlm_wait_on_lockres(res); + res->state |= DLM_LOCK_RES_IN_PROGRESS; + } + spin_lock(&lock->spinlock); + + if (res->state & DLM_LOCK_RES_RECOVERING) { + status = DLM_RECOVERING; + goto leave; + } + + if (res->state & DLM_LOCK_RES_MIGRATING) { + status = DLM_MIGRATING; + goto leave; + } + + /* see above for what the spec says about + * LKM_CANCEL and the lock queue state */ + if (flags & LKM_CANCEL) + status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions); + else + status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); + + if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node)) + goto leave; + + /* By now this has been masked out of cancel requests. */ + if (flags & LKM_VALBLK) { + /* make the final update to the lvb */ + if (master_node) + memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); + else + flags |= LKM_PUT_LVB; /* let the send function + * handle it. */ + } + + if (!master_node) { + owner = res->owner; + /* drop locks and send message */ + if (flags & LKM_CANCEL) + lock->cancel_pending = 1; + else + lock->unlock_pending = 1; + spin_unlock(&lock->spinlock); + spin_unlock(&res->spinlock); + status = dlm_send_remote_unlock_request(dlm, res, lock, lksb, + flags, owner); + spin_lock(&res->spinlock); + spin_lock(&lock->spinlock); + /* if the master told us the lock was already granted, + * let the ast handle all of these actions */ + if (status == DLM_CANCELGRANT) { + actions &= ~(DLM_UNLOCK_REMOVE_LOCK| + DLM_UNLOCK_REGRANT_LOCK| + DLM_UNLOCK_CLEAR_CONVERT_TYPE); + } else if (status == DLM_RECOVERING || + status == DLM_MIGRATING || + status == DLM_FORWARD) { + /* must clear the actions because this unlock + * is about to be retried. cannot free or do + * any list manipulation. */ + mlog(0, "%s:%.*s: clearing actions, %s\n", + dlm->name, res->lockname.len, + res->lockname.name, + status==DLM_RECOVERING?"recovering": + (status==DLM_MIGRATING?"migrating": + "forward")); + actions = 0; + } + if (flags & LKM_CANCEL) + lock->cancel_pending = 0; + else + lock->unlock_pending = 0; + + } + + /* get an extra ref on lock. if we are just switching + * lists here, we dont want the lock to go away. */ + dlm_lock_get(lock); + + if (actions & DLM_UNLOCK_REMOVE_LOCK) { + list_del_init(&lock->list); + dlm_lock_put(lock); + } + if (actions & DLM_UNLOCK_REGRANT_LOCK) { + dlm_lock_get(lock); + list_add_tail(&lock->list, &res->granted); + } + if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) { + mlog(0, "clearing convert_type at %smaster node\n", + master_node ? "" : "non-"); + lock->ml.convert_type = LKM_IVMODE; + } + + /* remove the extra ref on lock */ + dlm_lock_put(lock); + +leave: + res->state &= ~DLM_LOCK_RES_IN_PROGRESS; + if (!dlm_lock_on_list(&res->converting, lock)) + BUG_ON(lock->ml.convert_type != LKM_IVMODE); + else + BUG_ON(lock->ml.convert_type == LKM_IVMODE); + spin_unlock(&lock->spinlock); + spin_unlock(&res->spinlock); + wake_up(&res->wq); + + /* let the caller's final dlm_lock_put handle the actual kfree */ + if (actions & DLM_UNLOCK_FREE_LOCK) { + /* this should always be coupled with list removal */ + BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK)); + mlog(0, "lock %u:%llu should be gone now! refs=%d\n", + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + atomic_read(&lock->lock_refs.refcount)-1); + dlm_lock_put(lock); + } + if (actions & DLM_UNLOCK_CALL_AST) + *call_ast = 1; + + /* if cancel or unlock succeeded, lvb work is done */ + if (status == DLM_NORMAL) + lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); + + return status; +} + +void dlm_commit_pending_unlock(struct dlm_lock_resource *res, + struct dlm_lock *lock) +{ + /* leave DLM_LKSB_PUT_LVB on the lksb so any final + * update of the lvb will be sent to the new master */ + list_del_init(&lock->list); +} + +void dlm_commit_pending_cancel(struct dlm_lock_resource *res, + struct dlm_lock *lock) +{ + list_move_tail(&lock->list, &res->granted); + lock->ml.convert_type = LKM_IVMODE; +} + + +static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + struct dlm_lockstatus *lksb, + int flags, + int *call_ast) +{ + return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1); +} + +static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + struct dlm_lockstatus *lksb, + int flags, int *call_ast) +{ + return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0); +} + +/* + * locking: + * caller needs: none + * taken: none + * held on exit: none + * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network + */ +static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + struct dlm_lockstatus *lksb, + int flags, + u8 owner) +{ + struct dlm_unlock_lock unlock; + int tmpret; + enum dlm_status ret; + int status = 0; + struct kvec vec[2]; + size_t veclen = 1; + + mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); + + if (owner == dlm->node_num) { + /* ended up trying to contact ourself. this means + * that the lockres had been remote but became local + * via a migration. just retry it, now as local */ + mlog(0, "%s:%.*s: this node became the master due to a " + "migration, re-evaluate now\n", dlm->name, + res->lockname.len, res->lockname.name); + return DLM_FORWARD; + } + + memset(&unlock, 0, sizeof(unlock)); + unlock.node_idx = dlm->node_num; + unlock.flags = cpu_to_be32(flags); + unlock.cookie = lock->ml.cookie; + unlock.namelen = res->lockname.len; + memcpy(unlock.name, res->lockname.name, unlock.namelen); + + vec[0].iov_len = sizeof(struct dlm_unlock_lock); + vec[0].iov_base = &unlock; + + if (flags & LKM_PUT_LVB) { + /* extra data to send if we are updating lvb */ + vec[1].iov_len = DLM_LVB_LEN; + vec[1].iov_base = lock->lksb->lvb; + veclen++; + } + + tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key, + vec, veclen, owner, &status); + if (tmpret >= 0) { + // successfully sent and received + if (status == DLM_FORWARD) + mlog(0, "master was in-progress. retry\n"); + ret = status; + } else { + mlog_errno(tmpret); + if (dlm_is_host_down(tmpret)) { + /* NOTE: this seems strange, but it is what we want. + * when the master goes down during a cancel or + * unlock, the recovery code completes the operation + * as if the master had not died, then passes the + * updated state to the recovery master. this thread + * just needs to finish out the operation and call + * the unlockast. */ + ret = DLM_NORMAL; + } else { + /* something bad. this will BUG in ocfs2 */ + ret = dlm_err_to_dlm_status(tmpret); + } + } + + return ret; +} + +/* + * locking: + * caller needs: none + * taken: takes and drops res->spinlock + * held on exit: none + * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID, + * return value from dlmunlock_master + */ +int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data, + void **ret_data) +{ + struct dlm_ctxt *dlm = data; + struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf; + struct dlm_lock_resource *res = NULL; + struct list_head *iter; + struct dlm_lock *lock = NULL; + enum dlm_status status = DLM_NORMAL; + int found = 0, i; + struct dlm_lockstatus *lksb = NULL; + int ignore; + u32 flags; + struct list_head *queue; + + flags = be32_to_cpu(unlock->flags); + + if (flags & LKM_GET_LVB) { + mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n"); + return DLM_BADARGS; + } + + if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) { + mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL " + "request!\n"); + return DLM_BADARGS; + } + + if (unlock->namelen > DLM_LOCKID_NAME_MAX) { + mlog(ML_ERROR, "Invalid name length in unlock handler!\n"); + return DLM_IVBUFLEN; + } + + if (!dlm_grab(dlm)) + return DLM_REJECTED; + + mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), + "Domain %s not fully joined!\n", dlm->name); + + mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none"); + + res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen); + if (!res) { + /* We assume here that a no lock resource simply means + * it was migrated away and destroyed before the other + * node could detect it. */ + mlog(0, "returning DLM_FORWARD -- res no longer exists\n"); + status = DLM_FORWARD; + goto not_found; + } + + queue=&res->granted; + found = 0; + spin_lock(&res->spinlock); + if (res->state & DLM_LOCK_RES_RECOVERING) { + spin_unlock(&res->spinlock); + mlog(0, "returning DLM_RECOVERING\n"); + status = DLM_RECOVERING; + goto leave; + } + + if (res->state & DLM_LOCK_RES_MIGRATING) { + spin_unlock(&res->spinlock); + mlog(0, "returning DLM_MIGRATING\n"); + status = DLM_MIGRATING; + goto leave; + } + + if (res->owner != dlm->node_num) { + spin_unlock(&res->spinlock); + mlog(0, "returning DLM_FORWARD -- not master\n"); + status = DLM_FORWARD; + goto leave; + } + + for (i=0; i<3; i++) { + list_for_each(iter, queue) { + lock = list_entry(iter, struct dlm_lock, list); + if (lock->ml.cookie == unlock->cookie && + lock->ml.node == unlock->node_idx) { + dlm_lock_get(lock); + found = 1; + break; + } + } + if (found) + break; + /* scan granted -> converting -> blocked queues */ + queue++; + } + spin_unlock(&res->spinlock); + if (!found) { + status = DLM_IVLOCKID; + goto not_found; + } + + /* lock was found on queue */ + lksb = lock->lksb; + if (flags & (LKM_VALBLK|LKM_PUT_LVB) && + lock->ml.type != LKM_EXMODE) + flags &= ~(LKM_VALBLK|LKM_PUT_LVB); + + /* unlockast only called on originating node */ + if (flags & LKM_PUT_LVB) { + lksb->flags |= DLM_LKSB_PUT_LVB; + memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN); + } + + /* if this is in-progress, propagate the DLM_FORWARD + * all the way back out */ + status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore); + if (status == DLM_FORWARD) + mlog(0, "lockres is in progress\n"); + + if (flags & LKM_PUT_LVB) + lksb->flags &= ~DLM_LKSB_PUT_LVB; + + dlm_lockres_calc_usage(dlm, res); + dlm_kick_thread(dlm, res); + +not_found: + if (!found) + mlog(ML_ERROR, "failed to find lock to unlock! " + "cookie=%u:%llu\n", + dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie))); + else + dlm_lock_put(lock); + +leave: + if (res) + dlm_lockres_put(res); + + dlm_put(dlm); + + return status; +} + + +static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + struct dlm_lockstatus *lksb, + int *actions) +{ + enum dlm_status status; + + if (dlm_lock_on_list(&res->blocked, lock)) { + /* cancel this outright */ + status = DLM_NORMAL; + *actions = (DLM_UNLOCK_CALL_AST | + DLM_UNLOCK_REMOVE_LOCK); + } else if (dlm_lock_on_list(&res->converting, lock)) { + /* cancel the request, put back on granted */ + status = DLM_NORMAL; + *actions = (DLM_UNLOCK_CALL_AST | + DLM_UNLOCK_REMOVE_LOCK | + DLM_UNLOCK_REGRANT_LOCK | + DLM_UNLOCK_CLEAR_CONVERT_TYPE); + } else if (dlm_lock_on_list(&res->granted, lock)) { + /* too late, already granted. */ + status = DLM_CANCELGRANT; + *actions = DLM_UNLOCK_CALL_AST; + } else { + mlog(ML_ERROR, "lock to cancel is not on any list!\n"); + status = DLM_IVLOCKID; + *actions = 0; + } + return status; +} + +static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, + struct dlm_lock *lock, + struct dlm_lockstatus *lksb, + int *actions) +{ + enum dlm_status status; + + /* unlock request */ + if (!dlm_lock_on_list(&res->granted, lock)) { + status = DLM_DENIED; + dlm_error(status); + *actions = 0; + } else { + /* unlock granted lock */ + status = DLM_NORMAL; + *actions = (DLM_UNLOCK_FREE_LOCK | + DLM_UNLOCK_CALL_AST | + DLM_UNLOCK_REMOVE_LOCK); + } + return status; +} + +/* there seems to be no point in doing this async + * since (even for the remote case) there is really + * no work to queue up... so just do it and fire the + * unlockast by hand when done... */ +enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb, + int flags, dlm_astunlockfunc_t *unlockast, void *data) +{ + enum dlm_status status; + struct dlm_lock_resource *res; + struct dlm_lock *lock = NULL; + int call_ast, is_master; + + mlog_entry_void(); + + if (!lksb) { + dlm_error(DLM_BADARGS); + return DLM_BADARGS; + } + + if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) { + dlm_error(DLM_BADPARAM); + return DLM_BADPARAM; + } + + if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) { + mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n"); + flags &= ~LKM_VALBLK; + } + + if (!lksb->lockid || !lksb->lockid->lockres) { + dlm_error(DLM_BADPARAM); + return DLM_BADPARAM; + } + + lock = lksb->lockid; + BUG_ON(!lock); + dlm_lock_get(lock); + + res = lock->lockres; + BUG_ON(!res); + dlm_lockres_get(res); +retry: + call_ast = 0; + /* need to retry up here because owner may have changed */ + mlog(0, "lock=%p res=%p\n", lock, res); + + spin_lock(&res->spinlock); + is_master = (res->owner == dlm->node_num); + if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE) + flags &= ~LKM_VALBLK; + spin_unlock(&res->spinlock); + + if (is_master) { + status = dlmunlock_master(dlm, res, lock, lksb, flags, + &call_ast); + mlog(0, "done calling dlmunlock_master: returned %d, " + "call_ast is %d\n", status, call_ast); + } else { + status = dlmunlock_remote(dlm, res, lock, lksb, flags, + &call_ast); + mlog(0, "done calling dlmunlock_remote: returned %d, " + "call_ast is %d\n", status, call_ast); + } + + if (status == DLM_RECOVERING || + status == DLM_MIGRATING || + status == DLM_FORWARD) { + /* We want to go away for a tiny bit to allow recovery + * / migration to complete on this resource. I don't + * know of any wait queue we could sleep on as this + * may be happening on another node. Perhaps the + * proper solution is to queue up requests on the + * other end? */ + + /* do we want to yield(); ?? */ + msleep(50); + + mlog(0, "retrying unlock due to pending recovery/" + "migration/in-progress\n"); + goto retry; + } + + if (call_ast) { + mlog(0, "calling unlockast(%p, %d)\n", data, status); + if (is_master) { + /* it is possible that there is one last bast + * pending. make sure it is flushed, then + * call the unlockast. + * not an issue if this is a mastered remotely, + * since this lock has been removed from the + * lockres queues and cannot be found. */ + dlm_kick_thread(dlm, NULL); + wait_event(dlm->ast_wq, + dlm_lock_basts_flushed(dlm, lock)); + } + (*unlockast)(data, status); + } + + if (status == DLM_CANCELGRANT) + status = DLM_NORMAL; + + if (status == DLM_NORMAL) { + mlog(0, "kicking the thread\n"); + dlm_kick_thread(dlm, res); + } else + dlm_error(status); + + dlm_lockres_calc_usage(dlm, res); + dlm_lockres_put(res); + dlm_lock_put(lock); + + mlog(0, "returning status=%d!\n", status); + return status; +} +EXPORT_SYMBOL_GPL(dlmunlock); + diff --git a/fs/ocfs2/dlm/dlmver.c b/fs/ocfs2/dlm/dlmver.c new file mode 100644 index 0000000..dfc0da4 --- /dev/null +++ b/fs/ocfs2/dlm/dlmver.c @@ -0,0 +1,42 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmver.c + * + * version string + * + * Copyright (C) 2002, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/module.h> +#include <linux/kernel.h> + +#include "dlmver.h" + +#define DLM_BUILD_VERSION "1.5.0" + +#define VERSION_STR "OCFS2 DLM " DLM_BUILD_VERSION + +void dlm_print_version(void) +{ + printk(KERN_INFO "%s\n", VERSION_STR); +} + +MODULE_DESCRIPTION(VERSION_STR); + +MODULE_VERSION(DLM_BUILD_VERSION); diff --git a/fs/ocfs2/dlm/dlmver.h b/fs/ocfs2/dlm/dlmver.h new file mode 100644 index 0000000..f674aee --- /dev/null +++ b/fs/ocfs2/dlm/dlmver.h @@ -0,0 +1,31 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmfsver.h + * + * Function prototypes + * + * Copyright (C) 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef DLM_VER_H +#define DLM_VER_H + +void dlm_print_version(void); + +#endif /* DLM_VER_H */ diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c new file mode 100644 index 0000000..4cb1d3d --- /dev/null +++ b/fs/ocfs2/dlm/userdlm.c @@ -0,0 +1,676 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * userdlm.c + * + * Code which implements the kernel side of a minimal userspace + * interface to our DLM. + * + * Many of the functions here are pared down versions of dlmglue.c + * functions. + * + * Copyright (C) 2003, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/signal.h> + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/crc32.h> + + +#include "cluster/nodemanager.h" +#include "cluster/heartbeat.h" +#include "cluster/tcp.h" + +#include "dlmapi.h" + +#include "userdlm.h" + +#define MLOG_MASK_PREFIX ML_DLMFS +#include "cluster/masklog.h" + +static inline int user_check_wait_flag(struct user_lock_res *lockres, + int flag) +{ + int ret; + + spin_lock(&lockres->l_lock); + ret = lockres->l_flags & flag; + spin_unlock(&lockres->l_lock); + + return ret; +} + +static inline void user_wait_on_busy_lock(struct user_lock_res *lockres) + +{ + wait_event(lockres->l_event, + !user_check_wait_flag(lockres, USER_LOCK_BUSY)); +} + +static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres) + +{ + wait_event(lockres->l_event, + !user_check_wait_flag(lockres, USER_LOCK_BLOCKED)); +} + +/* I heart container_of... */ +static inline struct dlm_ctxt * +dlm_ctxt_from_user_lockres(struct user_lock_res *lockres) +{ + struct dlmfs_inode_private *ip; + + ip = container_of(lockres, + struct dlmfs_inode_private, + ip_lockres); + return ip->ip_dlm; +} + +static struct inode * +user_dlm_inode_from_user_lockres(struct user_lock_res *lockres) +{ + struct dlmfs_inode_private *ip; + + ip = container_of(lockres, + struct dlmfs_inode_private, + ip_lockres); + return &ip->ip_vfs_inode; +} + +static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) +{ + spin_lock(&lockres->l_lock); + lockres->l_flags &= ~USER_LOCK_BUSY; + spin_unlock(&lockres->l_lock); +} + +#define user_log_dlm_error(_func, _stat, _lockres) do { \ + mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ + "resource %.*s: %s\n", dlm_errname(_stat), _func, \ + _lockres->l_namelen, _lockres->l_name, dlm_errmsg(_stat)); \ +} while (0) + +/* WARNING: This function lives in a world where the only three lock + * levels are EX, PR, and NL. It *will* have to be adjusted when more + * lock types are added. */ +static inline int user_highest_compat_lock_level(int level) +{ + int new_level = LKM_EXMODE; + + if (level == LKM_EXMODE) + new_level = LKM_NLMODE; + else if (level == LKM_PRMODE) + new_level = LKM_PRMODE; + return new_level; +} + +static void user_ast(void *opaque) +{ + struct user_lock_res *lockres = opaque; + struct dlm_lockstatus *lksb; + + mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen, + lockres->l_name); + + spin_lock(&lockres->l_lock); + + lksb = &(lockres->l_lksb); + if (lksb->status != DLM_NORMAL) { + mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", + lksb->status, lockres->l_namelen, lockres->l_name); + spin_unlock(&lockres->l_lock); + return; + } + + mlog_bug_on_msg(lockres->l_requested == LKM_IVMODE, + "Lockres %.*s, requested ivmode. flags 0x%x\n", + lockres->l_namelen, lockres->l_name, lockres->l_flags); + + /* we're downconverting. */ + if (lockres->l_requested < lockres->l_level) { + if (lockres->l_requested <= + user_highest_compat_lock_level(lockres->l_blocking)) { + lockres->l_blocking = LKM_NLMODE; + lockres->l_flags &= ~USER_LOCK_BLOCKED; + } + } + + lockres->l_level = lockres->l_requested; + lockres->l_requested = LKM_IVMODE; + lockres->l_flags |= USER_LOCK_ATTACHED; + lockres->l_flags &= ~USER_LOCK_BUSY; + + spin_unlock(&lockres->l_lock); + + wake_up(&lockres->l_event); +} + +static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) +{ + struct inode *inode; + inode = user_dlm_inode_from_user_lockres(lockres); + if (!igrab(inode)) + BUG(); +} + +static void user_dlm_unblock_lock(struct work_struct *work); + +static void __user_dlm_queue_lockres(struct user_lock_res *lockres) +{ + if (!(lockres->l_flags & USER_LOCK_QUEUED)) { + user_dlm_grab_inode_ref(lockres); + + INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); + + queue_work(user_dlm_worker, &lockres->l_work); + lockres->l_flags |= USER_LOCK_QUEUED; + } +} + +static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres) +{ + int queue = 0; + + if (!(lockres->l_flags & USER_LOCK_BLOCKED)) + return; + + switch (lockres->l_blocking) { + case LKM_EXMODE: + if (!lockres->l_ex_holders && !lockres->l_ro_holders) + queue = 1; + break; + case LKM_PRMODE: + if (!lockres->l_ex_holders) + queue = 1; + break; + default: + BUG(); + } + + if (queue) + __user_dlm_queue_lockres(lockres); +} + +static void user_bast(void *opaque, int level) +{ + struct user_lock_res *lockres = opaque; + + mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n", + lockres->l_namelen, lockres->l_name, level); + + spin_lock(&lockres->l_lock); + lockres->l_flags |= USER_LOCK_BLOCKED; + if (level > lockres->l_blocking) + lockres->l_blocking = level; + + __user_dlm_queue_lockres(lockres); + spin_unlock(&lockres->l_lock); + + wake_up(&lockres->l_event); +} + +static void user_unlock_ast(void *opaque, enum dlm_status status) +{ + struct user_lock_res *lockres = opaque; + + mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen, + lockres->l_name); + + if (status != DLM_NORMAL && status != DLM_CANCELGRANT) + mlog(ML_ERROR, "Dlm returns status %d\n", status); + + spin_lock(&lockres->l_lock); + /* The teardown flag gets set early during the unlock process, + * so test the cancel flag to make sure that this ast isn't + * for a concurrent cancel. */ + if (lockres->l_flags & USER_LOCK_IN_TEARDOWN + && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { + lockres->l_level = LKM_IVMODE; + } else if (status == DLM_CANCELGRANT) { + /* We tried to cancel a convert request, but it was + * already granted. Don't clear the busy flag - the + * ast should've done this already. */ + BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); + lockres->l_flags &= ~USER_LOCK_IN_CANCEL; + goto out_noclear; + } else { + BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); + /* Cancel succeeded, we want to re-queue */ + lockres->l_requested = LKM_IVMODE; /* cancel an + * upconvert + * request. */ + lockres->l_flags &= ~USER_LOCK_IN_CANCEL; + /* we want the unblock thread to look at it again + * now. */ + if (lockres->l_flags & USER_LOCK_BLOCKED) + __user_dlm_queue_lockres(lockres); + } + + lockres->l_flags &= ~USER_LOCK_BUSY; +out_noclear: + spin_unlock(&lockres->l_lock); + + wake_up(&lockres->l_event); +} + +static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) +{ + struct inode *inode; + inode = user_dlm_inode_from_user_lockres(lockres); + iput(inode); +} + +static void user_dlm_unblock_lock(struct work_struct *work) +{ + int new_level, status; + struct user_lock_res *lockres = + container_of(work, struct user_lock_res, l_work); + struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); + + mlog(0, "processing lockres %.*s\n", lockres->l_namelen, + lockres->l_name); + + spin_lock(&lockres->l_lock); + + mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED), + "Lockres %.*s, flags 0x%x\n", + lockres->l_namelen, lockres->l_name, lockres->l_flags); + + /* notice that we don't clear USER_LOCK_BLOCKED here. If it's + * set, we want user_ast clear it. */ + lockres->l_flags &= ~USER_LOCK_QUEUED; + + /* It's valid to get here and no longer be blocked - if we get + * several basts in a row, we might be queued by the first + * one, the unblock thread might run and clear the queued + * flag, and finally we might get another bast which re-queues + * us before our ast for the downconvert is called. */ + if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { + spin_unlock(&lockres->l_lock); + goto drop_ref; + } + + if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { + spin_unlock(&lockres->l_lock); + goto drop_ref; + } + + if (lockres->l_flags & USER_LOCK_BUSY) { + if (lockres->l_flags & USER_LOCK_IN_CANCEL) { + spin_unlock(&lockres->l_lock); + goto drop_ref; + } + + lockres->l_flags |= USER_LOCK_IN_CANCEL; + spin_unlock(&lockres->l_lock); + + status = dlmunlock(dlm, + &lockres->l_lksb, + LKM_CANCEL, + user_unlock_ast, + lockres); + if (status != DLM_NORMAL) + user_log_dlm_error("dlmunlock", status, lockres); + goto drop_ref; + } + + /* If there are still incompat holders, we can exit safely + * without worrying about re-queueing this lock as that will + * happen on the last call to user_cluster_unlock. */ + if ((lockres->l_blocking == LKM_EXMODE) + && (lockres->l_ex_holders || lockres->l_ro_holders)) { + spin_unlock(&lockres->l_lock); + mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n", + lockres->l_ro_holders, lockres->l_ex_holders); + goto drop_ref; + } + + if ((lockres->l_blocking == LKM_PRMODE) + && lockres->l_ex_holders) { + spin_unlock(&lockres->l_lock); + mlog(0, "can't downconvert for pr: ex = %u\n", + lockres->l_ex_holders); + goto drop_ref; + } + + /* yay, we can downconvert now. */ + new_level = user_highest_compat_lock_level(lockres->l_blocking); + lockres->l_requested = new_level; + lockres->l_flags |= USER_LOCK_BUSY; + mlog(0, "Downconvert lock from %d to %d\n", + lockres->l_level, new_level); + spin_unlock(&lockres->l_lock); + + /* need lock downconvert request now... */ + status = dlmlock(dlm, + new_level, + &lockres->l_lksb, + LKM_CONVERT|LKM_VALBLK, + lockres->l_name, + lockres->l_namelen, + user_ast, + lockres, + user_bast); + if (status != DLM_NORMAL) { + user_log_dlm_error("dlmlock", status, lockres); + user_recover_from_dlm_error(lockres); + } + +drop_ref: + user_dlm_drop_inode_ref(lockres); +} + +static inline void user_dlm_inc_holders(struct user_lock_res *lockres, + int level) +{ + switch(level) { + case LKM_EXMODE: + lockres->l_ex_holders++; + break; + case LKM_PRMODE: + lockres->l_ro_holders++; + break; + default: + BUG(); + } +} + +/* predict what lock level we'll be dropping down to on behalf + * of another node, and return true if the currently wanted + * level will be compatible with it. */ +static inline int +user_may_continue_on_blocked_lock(struct user_lock_res *lockres, + int wanted) +{ + BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED)); + + return wanted <= user_highest_compat_lock_level(lockres->l_blocking); +} + +int user_dlm_cluster_lock(struct user_lock_res *lockres, + int level, + int lkm_flags) +{ + int status, local_flags; + struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); + + if (level != LKM_EXMODE && + level != LKM_PRMODE) { + mlog(ML_ERROR, "lockres %.*s: invalid request!\n", + lockres->l_namelen, lockres->l_name); + status = -EINVAL; + goto bail; + } + + mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n", + lockres->l_namelen, lockres->l_name, + (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE", + lkm_flags); + +again: + if (signal_pending(current)) { + status = -ERESTARTSYS; + goto bail; + } + + spin_lock(&lockres->l_lock); + + /* We only compare against the currently granted level + * here. If the lock is blocked waiting on a downconvert, + * we'll get caught below. */ + if ((lockres->l_flags & USER_LOCK_BUSY) && + (level > lockres->l_level)) { + /* is someone sitting in dlm_lock? If so, wait on + * them. */ + spin_unlock(&lockres->l_lock); + + user_wait_on_busy_lock(lockres); + goto again; + } + + if ((lockres->l_flags & USER_LOCK_BLOCKED) && + (!user_may_continue_on_blocked_lock(lockres, level))) { + /* is the lock is currently blocked on behalf of + * another node */ + spin_unlock(&lockres->l_lock); + + user_wait_on_blocked_lock(lockres); + goto again; + } + + if (level > lockres->l_level) { + local_flags = lkm_flags | LKM_VALBLK; + if (lockres->l_level != LKM_IVMODE) + local_flags |= LKM_CONVERT; + + lockres->l_requested = level; + lockres->l_flags |= USER_LOCK_BUSY; + spin_unlock(&lockres->l_lock); + + BUG_ON(level == LKM_IVMODE); + BUG_ON(level == LKM_NLMODE); + + /* call dlm_lock to upgrade lock now */ + status = dlmlock(dlm, + level, + &lockres->l_lksb, + local_flags, + lockres->l_name, + lockres->l_namelen, + user_ast, + lockres, + user_bast); + if (status != DLM_NORMAL) { + if ((lkm_flags & LKM_NOQUEUE) && + (status == DLM_NOTQUEUED)) + status = -EAGAIN; + else { + user_log_dlm_error("dlmlock", status, lockres); + status = -EINVAL; + } + user_recover_from_dlm_error(lockres); + goto bail; + } + + user_wait_on_busy_lock(lockres); + goto again; + } + + user_dlm_inc_holders(lockres, level); + spin_unlock(&lockres->l_lock); + + status = 0; +bail: + return status; +} + +static inline void user_dlm_dec_holders(struct user_lock_res *lockres, + int level) +{ + switch(level) { + case LKM_EXMODE: + BUG_ON(!lockres->l_ex_holders); + lockres->l_ex_holders--; + break; + case LKM_PRMODE: + BUG_ON(!lockres->l_ro_holders); + lockres->l_ro_holders--; + break; + default: + BUG(); + } +} + +void user_dlm_cluster_unlock(struct user_lock_res *lockres, + int level) +{ + if (level != LKM_EXMODE && + level != LKM_PRMODE) { + mlog(ML_ERROR, "lockres %.*s: invalid request!\n", + lockres->l_namelen, lockres->l_name); + return; + } + + spin_lock(&lockres->l_lock); + user_dlm_dec_holders(lockres, level); + __user_dlm_cond_queue_lockres(lockres); + spin_unlock(&lockres->l_lock); +} + +void user_dlm_write_lvb(struct inode *inode, + const char *val, + unsigned int len) +{ + struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; + char *lvb = lockres->l_lksb.lvb; + + BUG_ON(len > DLM_LVB_LEN); + + spin_lock(&lockres->l_lock); + + BUG_ON(lockres->l_level < LKM_EXMODE); + memcpy(lvb, val, len); + + spin_unlock(&lockres->l_lock); +} + +void user_dlm_read_lvb(struct inode *inode, + char *val, + unsigned int len) +{ + struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; + char *lvb = lockres->l_lksb.lvb; + + BUG_ON(len > DLM_LVB_LEN); + + spin_lock(&lockres->l_lock); + + BUG_ON(lockres->l_level < LKM_PRMODE); + memcpy(val, lvb, len); + + spin_unlock(&lockres->l_lock); +} + +void user_dlm_lock_res_init(struct user_lock_res *lockres, + struct dentry *dentry) +{ + memset(lockres, 0, sizeof(*lockres)); + + spin_lock_init(&lockres->l_lock); + init_waitqueue_head(&lockres->l_event); + lockres->l_level = LKM_IVMODE; + lockres->l_requested = LKM_IVMODE; + lockres->l_blocking = LKM_IVMODE; + + /* should have been checked before getting here. */ + BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN); + + memcpy(lockres->l_name, + dentry->d_name.name, + dentry->d_name.len); + lockres->l_namelen = dentry->d_name.len; +} + +int user_dlm_destroy_lock(struct user_lock_res *lockres) +{ + int status = -EBUSY; + struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); + + mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name); + + spin_lock(&lockres->l_lock); + if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { + spin_unlock(&lockres->l_lock); + return 0; + } + + lockres->l_flags |= USER_LOCK_IN_TEARDOWN; + + while (lockres->l_flags & USER_LOCK_BUSY) { + spin_unlock(&lockres->l_lock); + + user_wait_on_busy_lock(lockres); + + spin_lock(&lockres->l_lock); + } + + if (lockres->l_ro_holders || lockres->l_ex_holders) { + spin_unlock(&lockres->l_lock); + goto bail; + } + + status = 0; + if (!(lockres->l_flags & USER_LOCK_ATTACHED)) { + spin_unlock(&lockres->l_lock); + goto bail; + } + + lockres->l_flags &= ~USER_LOCK_ATTACHED; + lockres->l_flags |= USER_LOCK_BUSY; + spin_unlock(&lockres->l_lock); + + status = dlmunlock(dlm, + &lockres->l_lksb, + LKM_VALBLK, + user_unlock_ast, + lockres); + if (status != DLM_NORMAL) { + user_log_dlm_error("dlmunlock", status, lockres); + status = -EINVAL; + goto bail; + } + + user_wait_on_busy_lock(lockres); + + status = 0; +bail: + return status; +} + +struct dlm_ctxt *user_dlm_register_context(struct qstr *name, + struct dlm_protocol_version *proto) +{ + struct dlm_ctxt *dlm; + u32 dlm_key; + char *domain; + + domain = kmalloc(name->len + 1, GFP_NOFS); + if (!domain) { + mlog_errno(-ENOMEM); + return ERR_PTR(-ENOMEM); + } + + dlm_key = crc32_le(0, name->name, name->len); + + snprintf(domain, name->len + 1, "%.*s", name->len, name->name); + + dlm = dlm_register_domain(domain, dlm_key, proto); + if (IS_ERR(dlm)) + mlog_errno(PTR_ERR(dlm)); + + kfree(domain); + return dlm; +} + +void user_dlm_unregister_context(struct dlm_ctxt *dlm) +{ + dlm_unregister_domain(dlm); +} diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h new file mode 100644 index 0000000..0c3cc03 --- /dev/null +++ b/fs/ocfs2/dlm/userdlm.h @@ -0,0 +1,113 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * userdlm.h + * + * Userspace dlm defines + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + + +#ifndef USERDLM_H +#define USERDLM_H + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +/* user_lock_res->l_flags flags. */ +#define USER_LOCK_ATTACHED (0x00000001) /* we have initialized + * the lvb */ +#define USER_LOCK_BUSY (0x00000002) /* we are currently in + * dlm_lock */ +#define USER_LOCK_BLOCKED (0x00000004) /* blocked waiting to + * downconvert*/ +#define USER_LOCK_IN_TEARDOWN (0x00000008) /* we're currently + * destroying this + * lock. */ +#define USER_LOCK_QUEUED (0x00000010) /* lock is on the + * workqueue */ +#define USER_LOCK_IN_CANCEL (0x00000020) + +struct user_lock_res { + spinlock_t l_lock; + + int l_flags; + +#define USER_DLM_LOCK_ID_MAX_LEN 32 + char l_name[USER_DLM_LOCK_ID_MAX_LEN]; + int l_namelen; + int l_level; + unsigned int l_ro_holders; + unsigned int l_ex_holders; + struct dlm_lockstatus l_lksb; + + int l_requested; + int l_blocking; + + wait_queue_head_t l_event; + + struct work_struct l_work; +}; + +extern struct workqueue_struct *user_dlm_worker; + +void user_dlm_lock_res_init(struct user_lock_res *lockres, + struct dentry *dentry); +int user_dlm_destroy_lock(struct user_lock_res *lockres); +int user_dlm_cluster_lock(struct user_lock_res *lockres, + int level, + int lkm_flags); +void user_dlm_cluster_unlock(struct user_lock_res *lockres, + int level); +void user_dlm_write_lvb(struct inode *inode, + const char *val, + unsigned int len); +void user_dlm_read_lvb(struct inode *inode, + char *val, + unsigned int len); +struct dlm_ctxt *user_dlm_register_context(struct qstr *name, + struct dlm_protocol_version *proto); +void user_dlm_unregister_context(struct dlm_ctxt *dlm); + +struct dlmfs_inode_private { + struct dlm_ctxt *ip_dlm; + + struct user_lock_res ip_lockres; /* unused for directories. */ + struct inode *ip_parent; + + struct inode ip_vfs_inode; +}; + +static inline struct dlmfs_inode_private * +DLMFS_I(struct inode *inode) +{ + return container_of(inode, + struct dlmfs_inode_private, + ip_vfs_inode); +} + +struct dlmfs_filp_private { + int fp_lock_level; +}; + +#define DLMFS_MAGIC 0x76a9f425 + +#endif /* USERDLM_H */ diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c new file mode 100644 index 0000000..6e6cc0a --- /dev/null +++ b/fs/ocfs2/dlmglue.c @@ -0,0 +1,3659 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmglue.c + * + * Code which implements an OCFS2 specific interface to our DLM. + * + * Copyright (C) 2003, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/kthread.h> +#include <linux/pagemap.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/time.h> + +#define MLOG_MASK_PREFIX ML_DLM_GLUE +#include <cluster/masklog.h> + +#include "ocfs2.h" +#include "ocfs2_lockingver.h" + +#include "alloc.h" +#include "dcache.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "file.h" +#include "heartbeat.h" +#include "inode.h" +#include "journal.h" +#include "stackglue.h" +#include "slot_map.h" +#include "super.h" +#include "uptodate.h" + +#include "buffer_head_io.h" + +struct ocfs2_mask_waiter { + struct list_head mw_item; + int mw_status; + struct completion mw_complete; + unsigned long mw_mask; + unsigned long mw_goal; +#ifdef CONFIG_OCFS2_FS_STATS + unsigned long long mw_lock_start; +#endif +}; + +static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres); +static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres); +static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres); + +/* + * Return value from ->downconvert_worker functions. + * + * These control the precise actions of ocfs2_unblock_lock() + * and ocfs2_process_blocked_lock() + * + */ +enum ocfs2_unblock_action { + UNBLOCK_CONTINUE = 0, /* Continue downconvert */ + UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire + * ->post_unlock callback */ + UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire + * ->post_unlock() callback. */ +}; + +struct ocfs2_unblock_ctl { + int requeue; + enum ocfs2_unblock_action unblock_action; +}; + +static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, + int new_level); +static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres); + +static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, + int blocking); + +static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, + int blocking); + +static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres); + + +#define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres) + +/* This aids in debugging situations where a bad LVB might be involved. */ +static void ocfs2_dump_meta_lvb_info(u64 level, + const char *function, + unsigned int line, + struct ocfs2_lock_res *lockres) +{ + struct ocfs2_meta_lvb *lvb = + (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + + mlog(level, "LVB information for %s (called from %s:%u):\n", + lockres->l_name, function, line); + mlog(level, "version: %u, clusters: %u, generation: 0x%x\n", + lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters), + be32_to_cpu(lvb->lvb_igeneration)); + mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n", + (unsigned long long)be64_to_cpu(lvb->lvb_isize), + be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid), + be16_to_cpu(lvb->lvb_imode)); + mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, " + "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink), + (long long)be64_to_cpu(lvb->lvb_iatime_packed), + (long long)be64_to_cpu(lvb->lvb_ictime_packed), + (long long)be64_to_cpu(lvb->lvb_imtime_packed), + be32_to_cpu(lvb->lvb_iattr)); +} + + +/* + * OCFS2 Lock Resource Operations + * + * These fine tune the behavior of the generic dlmglue locking infrastructure. + * + * The most basic of lock types can point ->l_priv to their respective + * struct ocfs2_super and allow the default actions to manage things. + * + * Right now, each lock type also needs to implement an init function, + * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres() + * should be called when the lock is no longer needed (i.e., object + * destruction time). + */ +struct ocfs2_lock_res_ops { + /* + * Translate an ocfs2_lock_res * into an ocfs2_super *. Define + * this callback if ->l_priv is not an ocfs2_super pointer + */ + struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *); + + /* + * Optionally called in the downconvert thread after a + * successful downconvert. The lockres will not be referenced + * after this callback is called, so it is safe to free + * memory, etc. + * + * The exact semantics of when this is called are controlled + * by ->downconvert_worker() + */ + void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *); + + /* + * Allow a lock type to add checks to determine whether it is + * safe to downconvert a lock. Return 0 to re-queue the + * downconvert at a later time, nonzero to continue. + * + * For most locks, the default checks that there are no + * incompatible holders are sufficient. + * + * Called with the lockres spinlock held. + */ + int (*check_downconvert)(struct ocfs2_lock_res *, int); + + /* + * Allows a lock type to populate the lock value block. This + * is called on downconvert, and when we drop a lock. + * + * Locks that want to use this should set LOCK_TYPE_USES_LVB + * in the flags field. + * + * Called with the lockres spinlock held. + */ + void (*set_lvb)(struct ocfs2_lock_res *); + + /* + * Called from the downconvert thread when it is determined + * that a lock will be downconverted. This is called without + * any locks held so the function can do work that might + * schedule (syncing out data, etc). + * + * This should return any one of the ocfs2_unblock_action + * values, depending on what it wants the thread to do. + */ + int (*downconvert_worker)(struct ocfs2_lock_res *, int); + + /* + * LOCK_TYPE_* flags which describe the specific requirements + * of a lock type. Descriptions of each individual flag follow. + */ + int flags; +}; + +/* + * Some locks want to "refresh" potentially stale data when a + * meaningful (PRMODE or EXMODE) lock level is first obtained. If this + * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the + * individual lockres l_flags member from the ast function. It is + * expected that the locking wrapper will clear the + * OCFS2_LOCK_NEEDS_REFRESH flag when done. + */ +#define LOCK_TYPE_REQUIRES_REFRESH 0x1 + +/* + * Indicate that a lock type makes use of the lock value block. The + * ->set_lvb lock type callback must be defined. + */ +#define LOCK_TYPE_USES_LVB 0x2 + +static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = { + .get_osb = ocfs2_get_inode_osb, + .flags = 0, +}; + +static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = { + .get_osb = ocfs2_get_inode_osb, + .check_downconvert = ocfs2_check_meta_downconvert, + .set_lvb = ocfs2_set_meta_lvb, + .downconvert_worker = ocfs2_data_convert_worker, + .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB, +}; + +static struct ocfs2_lock_res_ops ocfs2_super_lops = { + .flags = LOCK_TYPE_REQUIRES_REFRESH, +}; + +static struct ocfs2_lock_res_ops ocfs2_rename_lops = { + .flags = 0, +}; + +static struct ocfs2_lock_res_ops ocfs2_dentry_lops = { + .get_osb = ocfs2_get_dentry_osb, + .post_unlock = ocfs2_dentry_post_unlock, + .downconvert_worker = ocfs2_dentry_convert_worker, + .flags = 0, +}; + +static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = { + .get_osb = ocfs2_get_inode_osb, + .flags = 0, +}; + +static struct ocfs2_lock_res_ops ocfs2_flock_lops = { + .get_osb = ocfs2_get_file_osb, + .flags = 0, +}; + +static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) +{ + return lockres->l_type == OCFS2_LOCK_TYPE_META || + lockres->l_type == OCFS2_LOCK_TYPE_RW || + lockres->l_type == OCFS2_LOCK_TYPE_OPEN; +} + +static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres) +{ + BUG_ON(!ocfs2_is_inode_lock(lockres)); + + return (struct inode *) lockres->l_priv; +} + +static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres) +{ + BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY); + + return (struct ocfs2_dentry_lock *)lockres->l_priv; +} + +static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres) +{ + if (lockres->l_ops->get_osb) + return lockres->l_ops->get_osb(lockres); + + return (struct ocfs2_super *)lockres->l_priv; +} + +static int ocfs2_lock_create(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int level, + u32 dlm_flags); +static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres, + int wanted); +static void ocfs2_cluster_unlock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int level); +static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres); +static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres); +static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres); +static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level); +static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres); +static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, + int convert); +#define ocfs2_log_dlm_error(_func, _err, _lockres) do { \ + mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \ + _err, _func, _lockres->l_name); \ +} while (0) +static int ocfs2_downconvert_thread(void *arg); +static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres); +static int ocfs2_inode_lock_update(struct inode *inode, + struct buffer_head **bh); +static void ocfs2_drop_osb_locks(struct ocfs2_super *osb); +static inline int ocfs2_highest_compat_lock_level(int level); +static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres, + int new_level); +static int ocfs2_downconvert_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int new_level, + int lvb, + unsigned int generation); +static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres); +static int ocfs2_cancel_convert(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres); + + +static void ocfs2_build_lock_name(enum ocfs2_lock_type type, + u64 blkno, + u32 generation, + char *name) +{ + int len; + + mlog_entry_void(); + + BUG_ON(type >= OCFS2_NUM_LOCK_TYPES); + + len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x", + ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD, + (long long)blkno, generation); + + BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1)); + + mlog(0, "built lock resource with name: %s\n", name); + + mlog_exit_void(); +} + +static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock); + +static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res, + struct ocfs2_dlm_debug *dlm_debug) +{ + mlog(0, "Add tracking for lockres %s\n", res->l_name); + + spin_lock(&ocfs2_dlm_tracking_lock); + list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking); + spin_unlock(&ocfs2_dlm_tracking_lock); +} + +static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res) +{ + spin_lock(&ocfs2_dlm_tracking_lock); + if (!list_empty(&res->l_debug_list)) + list_del_init(&res->l_debug_list); + spin_unlock(&ocfs2_dlm_tracking_lock); +} + +#ifdef CONFIG_OCFS2_FS_STATS +static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) +{ + res->l_lock_num_prmode = 0; + res->l_lock_num_prmode_failed = 0; + res->l_lock_total_prmode = 0; + res->l_lock_max_prmode = 0; + res->l_lock_num_exmode = 0; + res->l_lock_num_exmode_failed = 0; + res->l_lock_total_exmode = 0; + res->l_lock_max_exmode = 0; + res->l_lock_refresh = 0; +} + +static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, + struct ocfs2_mask_waiter *mw, int ret) +{ + unsigned long long *num, *sum; + unsigned int *max, *failed; + struct timespec ts = current_kernel_time(); + unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start; + + if (level == LKM_PRMODE) { + num = &res->l_lock_num_prmode; + sum = &res->l_lock_total_prmode; + max = &res->l_lock_max_prmode; + failed = &res->l_lock_num_prmode_failed; + } else if (level == LKM_EXMODE) { + num = &res->l_lock_num_exmode; + sum = &res->l_lock_total_exmode; + max = &res->l_lock_max_exmode; + failed = &res->l_lock_num_exmode_failed; + } else + return; + + (*num)++; + (*sum) += time; + if (time > *max) + *max = time; + if (ret) + (*failed)++; +} + +static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) +{ + lockres->l_lock_refresh++; +} + +static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) +{ + struct timespec ts = current_kernel_time(); + mw->mw_lock_start = timespec_to_ns(&ts); +} +#else +static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) +{ +} +static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, + int level, struct ocfs2_mask_waiter *mw, int ret) +{ +} +static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) +{ +} +static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) +{ +} +#endif + +static void ocfs2_lock_res_init_common(struct ocfs2_super *osb, + struct ocfs2_lock_res *res, + enum ocfs2_lock_type type, + struct ocfs2_lock_res_ops *ops, + void *priv) +{ + res->l_type = type; + res->l_ops = ops; + res->l_priv = priv; + + res->l_level = DLM_LOCK_IV; + res->l_requested = DLM_LOCK_IV; + res->l_blocking = DLM_LOCK_IV; + res->l_action = OCFS2_AST_INVALID; + res->l_unlock_action = OCFS2_UNLOCK_INVALID; + + res->l_flags = OCFS2_LOCK_INITIALIZED; + + ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug); + + ocfs2_init_lock_stats(res); +} + +void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res) +{ + /* This also clears out the lock status block */ + memset(res, 0, sizeof(struct ocfs2_lock_res)); + spin_lock_init(&res->l_lock); + init_waitqueue_head(&res->l_event); + INIT_LIST_HEAD(&res->l_blocked_list); + INIT_LIST_HEAD(&res->l_mask_waiters); +} + +void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res, + enum ocfs2_lock_type type, + unsigned int generation, + struct inode *inode) +{ + struct ocfs2_lock_res_ops *ops; + + switch(type) { + case OCFS2_LOCK_TYPE_RW: + ops = &ocfs2_inode_rw_lops; + break; + case OCFS2_LOCK_TYPE_META: + ops = &ocfs2_inode_inode_lops; + break; + case OCFS2_LOCK_TYPE_OPEN: + ops = &ocfs2_inode_open_lops; + break; + default: + mlog_bug_on_msg(1, "type: %d\n", type); + ops = NULL; /* thanks, gcc */ + break; + }; + + ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno, + generation, res->l_name); + ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode); +} + +static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres) +{ + struct inode *inode = ocfs2_lock_res_inode(lockres); + + return OCFS2_SB(inode->i_sb); +} + +static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres) +{ + struct ocfs2_file_private *fp = lockres->l_priv; + + return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb); +} + +static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres) +{ + __be64 inode_blkno_be; + + memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], + sizeof(__be64)); + + return be64_to_cpu(inode_blkno_be); +} + +static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres) +{ + struct ocfs2_dentry_lock *dl = lockres->l_priv; + + return OCFS2_SB(dl->dl_inode->i_sb); +} + +void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl, + u64 parent, struct inode *inode) +{ + int len; + u64 inode_blkno = OCFS2_I(inode)->ip_blkno; + __be64 inode_blkno_be = cpu_to_be64(inode_blkno); + struct ocfs2_lock_res *lockres = &dl->dl_lockres; + + ocfs2_lock_res_init_once(lockres); + + /* + * Unfortunately, the standard lock naming scheme won't work + * here because we have two 16 byte values to use. Instead, + * we'll stuff the inode number as a binary value. We still + * want error prints to show something without garbling the + * display, so drop a null byte in there before the inode + * number. A future version of OCFS2 will likely use all + * binary lock names. The stringified names have been a + * tremendous aid in debugging, but now that the debugfs + * interface exists, we can mangle things there if need be. + * + * NOTE: We also drop the standard "pad" value (the total lock + * name size stays the same though - the last part is all + * zeros due to the memset in ocfs2_lock_res_init_once() + */ + len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START, + "%c%016llx", + ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY), + (long long)parent); + + BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1)); + + memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be, + sizeof(__be64)); + + ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres, + OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops, + dl); +} + +static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res, + struct ocfs2_super *osb) +{ + /* Superblock lockres doesn't come from a slab so we call init + * once on it manually. */ + ocfs2_lock_res_init_once(res); + ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO, + 0, res->l_name); + ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER, + &ocfs2_super_lops, osb); +} + +static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res, + struct ocfs2_super *osb) +{ + /* Rename lockres doesn't come from a slab so we call init + * once on it manually. */ + ocfs2_lock_res_init_once(res); + ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name); + ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME, + &ocfs2_rename_lops, osb); +} + +void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres, + struct ocfs2_file_private *fp) +{ + struct inode *inode = fp->fp_file->f_mapping->host; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + ocfs2_lock_res_init_once(lockres); + ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno, + inode->i_generation, lockres->l_name); + ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres, + OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops, + fp); + lockres->l_flags |= OCFS2_LOCK_NOCACHE; +} + +void ocfs2_lock_res_free(struct ocfs2_lock_res *res) +{ + mlog_entry_void(); + + if (!(res->l_flags & OCFS2_LOCK_INITIALIZED)) + return; + + ocfs2_remove_lockres_tracking(res); + + mlog_bug_on_msg(!list_empty(&res->l_blocked_list), + "Lockres %s is on the blocked list\n", + res->l_name); + mlog_bug_on_msg(!list_empty(&res->l_mask_waiters), + "Lockres %s has mask waiters pending\n", + res->l_name); + mlog_bug_on_msg(spin_is_locked(&res->l_lock), + "Lockres %s is locked\n", + res->l_name); + mlog_bug_on_msg(res->l_ro_holders, + "Lockres %s has %u ro holders\n", + res->l_name, res->l_ro_holders); + mlog_bug_on_msg(res->l_ex_holders, + "Lockres %s has %u ex holders\n", + res->l_name, res->l_ex_holders); + + /* Need to clear out the lock status block for the dlm */ + memset(&res->l_lksb, 0, sizeof(res->l_lksb)); + + res->l_flags = 0UL; + mlog_exit_void(); +} + +static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres, + int level) +{ + mlog_entry_void(); + + BUG_ON(!lockres); + + switch(level) { + case DLM_LOCK_EX: + lockres->l_ex_holders++; + break; + case DLM_LOCK_PR: + lockres->l_ro_holders++; + break; + default: + BUG(); + } + + mlog_exit_void(); +} + +static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres, + int level) +{ + mlog_entry_void(); + + BUG_ON(!lockres); + + switch(level) { + case DLM_LOCK_EX: + BUG_ON(!lockres->l_ex_holders); + lockres->l_ex_holders--; + break; + case DLM_LOCK_PR: + BUG_ON(!lockres->l_ro_holders); + lockres->l_ro_holders--; + break; + default: + BUG(); + } + mlog_exit_void(); +} + +/* WARNING: This function lives in a world where the only three lock + * levels are EX, PR, and NL. It *will* have to be adjusted when more + * lock types are added. */ +static inline int ocfs2_highest_compat_lock_level(int level) +{ + int new_level = DLM_LOCK_EX; + + if (level == DLM_LOCK_EX) + new_level = DLM_LOCK_NL; + else if (level == DLM_LOCK_PR) + new_level = DLM_LOCK_PR; + return new_level; +} + +static void lockres_set_flags(struct ocfs2_lock_res *lockres, + unsigned long newflags) +{ + struct ocfs2_mask_waiter *mw, *tmp; + + assert_spin_locked(&lockres->l_lock); + + lockres->l_flags = newflags; + + list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) { + if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal) + continue; + + list_del_init(&mw->mw_item); + mw->mw_status = 0; + complete(&mw->mw_complete); + } +} +static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or) +{ + lockres_set_flags(lockres, lockres->l_flags | or); +} +static void lockres_clear_flags(struct ocfs2_lock_res *lockres, + unsigned long clear) +{ + lockres_set_flags(lockres, lockres->l_flags & ~clear); +} + +static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres) +{ + mlog_entry_void(); + + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); + BUG_ON(lockres->l_blocking <= DLM_LOCK_NL); + + lockres->l_level = lockres->l_requested; + if (lockres->l_level <= + ocfs2_highest_compat_lock_level(lockres->l_blocking)) { + lockres->l_blocking = DLM_LOCK_NL; + lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); + } + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + + mlog_exit_void(); +} + +static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres) +{ + mlog_entry_void(); + + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); + + /* Convert from RO to EX doesn't really need anything as our + * information is already up to data. Convert from NL to + * *anything* however should mark ourselves as needing an + * update */ + if (lockres->l_level == DLM_LOCK_NL && + lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) + lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); + + lockres->l_level = lockres->l_requested; + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + + mlog_exit_void(); +} + +static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres) +{ + mlog_entry_void(); + + BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY))); + BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); + + if (lockres->l_requested > DLM_LOCK_NL && + !(lockres->l_flags & OCFS2_LOCK_LOCAL) && + lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) + lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); + + lockres->l_level = lockres->l_requested; + lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED); + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + + mlog_exit_void(); +} + +static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, + int level) +{ + int needs_downconvert = 0; + mlog_entry_void(); + + assert_spin_locked(&lockres->l_lock); + + lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); + + if (level > lockres->l_blocking) { + /* only schedule a downconvert if we haven't already scheduled + * one that goes low enough to satisfy the level we're + * blocking. this also catches the case where we get + * duplicate BASTs */ + if (ocfs2_highest_compat_lock_level(level) < + ocfs2_highest_compat_lock_level(lockres->l_blocking)) + needs_downconvert = 1; + + lockres->l_blocking = level; + } + + mlog_exit(needs_downconvert); + return needs_downconvert; +} + +/* + * OCFS2_LOCK_PENDING and l_pending_gen. + * + * Why does OCFS2_LOCK_PENDING exist? To close a race between setting + * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock() + * for more details on the race. + * + * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces + * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock() + * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear + * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns, + * the caller is going to try to clear PENDING again. If nothing else is + * happening, __lockres_clear_pending() sees PENDING is unset and does + * nothing. + * + * But what if another path (eg downconvert thread) has just started a + * new locking action? The other path has re-set PENDING. Our path + * cannot clear PENDING, because that will re-open the original race + * window. + * + * [Example] + * + * ocfs2_meta_lock() + * ocfs2_cluster_lock() + * set BUSY + * set PENDING + * drop l_lock + * ocfs2_dlm_lock() + * ocfs2_locking_ast() ocfs2_downconvert_thread() + * clear PENDING ocfs2_unblock_lock() + * take_l_lock + * !BUSY + * ocfs2_prepare_downconvert() + * set BUSY + * set PENDING + * drop l_lock + * take l_lock + * clear PENDING + * drop l_lock + * <window> + * ocfs2_dlm_lock() + * + * So as you can see, we now have a window where l_lock is not held, + * PENDING is not set, and ocfs2_dlm_lock() has not been called. + * + * The core problem is that ocfs2_cluster_lock() has cleared the PENDING + * set by ocfs2_prepare_downconvert(). That wasn't nice. + * + * To solve this we introduce l_pending_gen. A call to + * lockres_clear_pending() will only do so when it is passed a generation + * number that matches the lockres. lockres_set_pending() will return the + * current generation number. When ocfs2_cluster_lock() goes to clear + * PENDING, it passes the generation it got from set_pending(). In our + * example above, the generation numbers will *not* match. Thus, + * ocfs2_cluster_lock() will not clear the PENDING set by + * ocfs2_prepare_downconvert(). + */ + +/* Unlocked version for ocfs2_locking_ast() */ +static void __lockres_clear_pending(struct ocfs2_lock_res *lockres, + unsigned int generation, + struct ocfs2_super *osb) +{ + assert_spin_locked(&lockres->l_lock); + + /* + * The ast and locking functions can race us here. The winner + * will clear pending, the loser will not. + */ + if (!(lockres->l_flags & OCFS2_LOCK_PENDING) || + (lockres->l_pending_gen != generation)) + return; + + lockres_clear_flags(lockres, OCFS2_LOCK_PENDING); + lockres->l_pending_gen++; + + /* + * The downconvert thread may have skipped us because we + * were PENDING. Wake it up. + */ + if (lockres->l_flags & OCFS2_LOCK_BLOCKED) + ocfs2_wake_downconvert_thread(osb); +} + +/* Locked version for callers of ocfs2_dlm_lock() */ +static void lockres_clear_pending(struct ocfs2_lock_res *lockres, + unsigned int generation, + struct ocfs2_super *osb) +{ + unsigned long flags; + + spin_lock_irqsave(&lockres->l_lock, flags); + __lockres_clear_pending(lockres, generation, osb); + spin_unlock_irqrestore(&lockres->l_lock, flags); +} + +static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres) +{ + assert_spin_locked(&lockres->l_lock); + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); + + lockres_or_flags(lockres, OCFS2_LOCK_PENDING); + + return lockres->l_pending_gen; +} + + +static void ocfs2_blocking_ast(void *opaque, int level) +{ + struct ocfs2_lock_res *lockres = opaque; + struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); + int needs_downconvert; + unsigned long flags; + + BUG_ON(level <= DLM_LOCK_NL); + + mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n", + lockres->l_name, level, lockres->l_level, + ocfs2_lock_type_string(lockres->l_type)); + + /* + * We can skip the bast for locks which don't enable caching - + * they'll be dropped at the earliest possible time anyway. + */ + if (lockres->l_flags & OCFS2_LOCK_NOCACHE) + return; + + spin_lock_irqsave(&lockres->l_lock, flags); + needs_downconvert = ocfs2_generic_handle_bast(lockres, level); + if (needs_downconvert) + ocfs2_schedule_blocked_lock(osb, lockres); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + wake_up(&lockres->l_event); + + ocfs2_wake_downconvert_thread(osb); +} + +static void ocfs2_locking_ast(void *opaque) +{ + struct ocfs2_lock_res *lockres = opaque; + struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); + unsigned long flags; + int status; + + spin_lock_irqsave(&lockres->l_lock, flags); + + status = ocfs2_dlm_lock_status(&lockres->l_lksb); + + if (status == -EAGAIN) { + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + goto out; + } + + if (status) { + mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n", + lockres->l_name, status); + spin_unlock_irqrestore(&lockres->l_lock, flags); + return; + } + + switch(lockres->l_action) { + case OCFS2_AST_ATTACH: + ocfs2_generic_handle_attach_action(lockres); + lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL); + break; + case OCFS2_AST_CONVERT: + ocfs2_generic_handle_convert_action(lockres); + break; + case OCFS2_AST_DOWNCONVERT: + ocfs2_generic_handle_downconvert_action(lockres); + break; + default: + mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u " + "lockres flags = 0x%lx, unlock action: %u\n", + lockres->l_name, lockres->l_action, lockres->l_flags, + lockres->l_unlock_action); + BUG(); + } +out: + /* set it to something invalid so if we get called again we + * can catch it. */ + lockres->l_action = OCFS2_AST_INVALID; + + /* Did we try to cancel this lock? Clear that state */ + if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) + lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; + + /* + * We may have beaten the locking functions here. We certainly + * know that dlm_lock() has been called :-) + * Because we can't have two lock calls in flight at once, we + * can use lockres->l_pending_gen. + */ + __lockres_clear_pending(lockres, lockres->l_pending_gen, osb); + + wake_up(&lockres->l_event); + spin_unlock_irqrestore(&lockres->l_lock, flags); +} + +static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, + int convert) +{ + unsigned long flags; + + mlog_entry_void(); + spin_lock_irqsave(&lockres->l_lock, flags); + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + if (convert) + lockres->l_action = OCFS2_AST_INVALID; + else + lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; + spin_unlock_irqrestore(&lockres->l_lock, flags); + + wake_up(&lockres->l_event); + mlog_exit_void(); +} + +/* Note: If we detect another process working on the lock (i.e., + * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller + * to do the right thing in that case. + */ +static int ocfs2_lock_create(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int level, + u32 dlm_flags) +{ + int ret = 0; + unsigned long flags; + unsigned int gen; + + mlog_entry_void(); + + mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level, + dlm_flags); + + spin_lock_irqsave(&lockres->l_lock, flags); + if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) || + (lockres->l_flags & OCFS2_LOCK_BUSY)) { + spin_unlock_irqrestore(&lockres->l_lock, flags); + goto bail; + } + + lockres->l_action = OCFS2_AST_ATTACH; + lockres->l_requested = level; + lockres_or_flags(lockres, OCFS2_LOCK_BUSY); + gen = lockres_set_pending(lockres); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + ret = ocfs2_dlm_lock(osb->cconn, + level, + &lockres->l_lksb, + dlm_flags, + lockres->l_name, + OCFS2_LOCK_ID_MAX_LEN - 1, + lockres); + lockres_clear_pending(lockres, gen, osb); + if (ret) { + ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); + ocfs2_recover_from_dlm_error(lockres, 1); + } + + mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name); + +bail: + mlog_exit(ret); + return ret; +} + +static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres, + int flag) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&lockres->l_lock, flags); + ret = lockres->l_flags & flag; + spin_unlock_irqrestore(&lockres->l_lock, flags); + + return ret; +} + +static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres) + +{ + wait_event(lockres->l_event, + !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY)); +} + +static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres) + +{ + wait_event(lockres->l_event, + !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING)); +} + +/* predict what lock level we'll be dropping down to on behalf + * of another node, and return true if the currently wanted + * level will be compatible with it. */ +static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres, + int wanted) +{ + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); + + return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking); +} + +static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw) +{ + INIT_LIST_HEAD(&mw->mw_item); + init_completion(&mw->mw_complete); + ocfs2_init_start_time(mw); +} + +static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw) +{ + wait_for_completion(&mw->mw_complete); + /* Re-arm the completion in case we want to wait on it again */ + INIT_COMPLETION(mw->mw_complete); + return mw->mw_status; +} + +static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres, + struct ocfs2_mask_waiter *mw, + unsigned long mask, + unsigned long goal) +{ + BUG_ON(!list_empty(&mw->mw_item)); + + assert_spin_locked(&lockres->l_lock); + + list_add_tail(&mw->mw_item, &lockres->l_mask_waiters); + mw->mw_mask = mask; + mw->mw_goal = goal; +} + +/* returns 0 if the mw that was removed was already satisfied, -EBUSY + * if the mask still hadn't reached its goal */ +static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres, + struct ocfs2_mask_waiter *mw) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&lockres->l_lock, flags); + if (!list_empty(&mw->mw_item)) { + if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal) + ret = -EBUSY; + + list_del_init(&mw->mw_item); + init_completion(&mw->mw_complete); + } + spin_unlock_irqrestore(&lockres->l_lock, flags); + + return ret; + +} + +static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw, + struct ocfs2_lock_res *lockres) +{ + int ret; + + ret = wait_for_completion_interruptible(&mw->mw_complete); + if (ret) + lockres_remove_mask_waiter(lockres, mw); + else + ret = mw->mw_status; + /* Re-arm the completion in case we want to wait on it again */ + INIT_COMPLETION(mw->mw_complete); + return ret; +} + +static int ocfs2_cluster_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int level, + u32 lkm_flags, + int arg_flags) +{ + struct ocfs2_mask_waiter mw; + int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR); + int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */ + unsigned long flags; + unsigned int gen; + int noqueue_attempted = 0; + + mlog_entry_void(); + + ocfs2_init_mask_waiter(&mw); + + if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) + lkm_flags |= DLM_LKF_VALBLK; + +again: + wait = 0; + + if (catch_signals && signal_pending(current)) { + ret = -ERESTARTSYS; + goto out; + } + + spin_lock_irqsave(&lockres->l_lock, flags); + + mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, + "Cluster lock called on freeing lockres %s! flags " + "0x%lx\n", lockres->l_name, lockres->l_flags); + + /* We only compare against the currently granted level + * here. If the lock is blocked waiting on a downconvert, + * we'll get caught below. */ + if (lockres->l_flags & OCFS2_LOCK_BUSY && + level > lockres->l_level) { + /* is someone sitting in dlm_lock? If so, wait on + * them. */ + lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); + wait = 1; + goto unlock; + } + + if (lockres->l_flags & OCFS2_LOCK_BLOCKED && + !ocfs2_may_continue_on_blocked_lock(lockres, level)) { + /* is the lock is currently blocked on behalf of + * another node */ + lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0); + wait = 1; + goto unlock; + } + + if (level > lockres->l_level) { + if (noqueue_attempted > 0) { + ret = -EAGAIN; + goto unlock; + } + if (lkm_flags & DLM_LKF_NOQUEUE) + noqueue_attempted = 1; + + if (lockres->l_action != OCFS2_AST_INVALID) + mlog(ML_ERROR, "lockres %s has action %u pending\n", + lockres->l_name, lockres->l_action); + + if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) { + lockres->l_action = OCFS2_AST_ATTACH; + lkm_flags &= ~DLM_LKF_CONVERT; + } else { + lockres->l_action = OCFS2_AST_CONVERT; + lkm_flags |= DLM_LKF_CONVERT; + } + + lockres->l_requested = level; + lockres_or_flags(lockres, OCFS2_LOCK_BUSY); + gen = lockres_set_pending(lockres); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + BUG_ON(level == DLM_LOCK_IV); + BUG_ON(level == DLM_LOCK_NL); + + mlog(0, "lock %s, convert from %d to level = %d\n", + lockres->l_name, lockres->l_level, level); + + /* call dlm_lock to upgrade lock now */ + ret = ocfs2_dlm_lock(osb->cconn, + level, + &lockres->l_lksb, + lkm_flags, + lockres->l_name, + OCFS2_LOCK_ID_MAX_LEN - 1, + lockres); + lockres_clear_pending(lockres, gen, osb); + if (ret) { + if (!(lkm_flags & DLM_LKF_NOQUEUE) || + (ret != -EAGAIN)) { + ocfs2_log_dlm_error("ocfs2_dlm_lock", + ret, lockres); + } + ocfs2_recover_from_dlm_error(lockres, 1); + goto out; + } + + mlog(0, "lock %s, successfull return from ocfs2_dlm_lock\n", + lockres->l_name); + + /* At this point we've gone inside the dlm and need to + * complete our work regardless. */ + catch_signals = 0; + + /* wait for busy to clear and carry on */ + goto again; + } + + /* Ok, if we get here then we're good to go. */ + ocfs2_inc_holders(lockres, level); + + ret = 0; +unlock: + spin_unlock_irqrestore(&lockres->l_lock, flags); +out: + /* + * This is helping work around a lock inversion between the page lock + * and dlm locks. One path holds the page lock while calling aops + * which block acquiring dlm locks. The voting thread holds dlm + * locks while acquiring page locks while down converting data locks. + * This block is helping an aop path notice the inversion and back + * off to unlock its page lock before trying the dlm lock again. + */ + if (wait && arg_flags & OCFS2_LOCK_NONBLOCK && + mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) { + wait = 0; + if (lockres_remove_mask_waiter(lockres, &mw)) + ret = -EAGAIN; + else + goto again; + } + if (wait) { + ret = ocfs2_wait_for_mask(&mw); + if (ret == 0) + goto again; + mlog_errno(ret); + } + ocfs2_update_lock_stats(lockres, level, &mw, ret); + + mlog_exit(ret); + return ret; +} + +static void ocfs2_cluster_unlock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int level) +{ + unsigned long flags; + + mlog_entry_void(); + spin_lock_irqsave(&lockres->l_lock, flags); + ocfs2_dec_holders(lockres, level); + ocfs2_downconvert_on_unlock(osb, lockres); + spin_unlock_irqrestore(&lockres->l_lock, flags); + mlog_exit_void(); +} + +static int ocfs2_create_new_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int ex, + int local) +{ + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + unsigned long flags; + u32 lkm_flags = local ? DLM_LKF_LOCAL : 0; + + spin_lock_irqsave(&lockres->l_lock, flags); + BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); + lockres_or_flags(lockres, OCFS2_LOCK_LOCAL); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + return ocfs2_lock_create(osb, lockres, level, lkm_flags); +} + +/* Grants us an EX lock on the data and metadata resources, skipping + * the normal cluster directory lookup. Use this ONLY on newly created + * inodes which other nodes can't possibly see, and which haven't been + * hashed in the inode hash yet. This can give us a good performance + * increase as it'll skip the network broadcast normally associated + * with creating a new lock resource. */ +int ocfs2_create_new_inode_locks(struct inode *inode) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + BUG_ON(!inode); + BUG_ON(!ocfs2_inode_is_new(inode)); + + mlog_entry_void(); + + mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); + + /* NOTE: That we don't increment any of the holder counts, nor + * do we add anything to a journal handle. Since this is + * supposed to be a new inode which the cluster doesn't know + * about yet, there is no need to. As far as the LVB handling + * is concerned, this is basically like acquiring an EX lock + * on a resource which has an invalid one -- we'll set it + * valid when we release the EX. */ + + ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1); + if (ret) { + mlog_errno(ret); + goto bail; + } + + /* + * We don't want to use DLM_LKF_LOCAL on a meta data lock as they + * don't use a generation in their lock names. + */ + ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0); + if (ret) { + mlog_errno(ret); + goto bail; + } + + ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0); + if (ret) { + mlog_errno(ret); + goto bail; + } + +bail: + mlog_exit(ret); + return ret; +} + +int ocfs2_rw_lock(struct inode *inode, int write) +{ + int status, level; + struct ocfs2_lock_res *lockres; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + BUG_ON(!inode); + + mlog_entry_void(); + + mlog(0, "inode %llu take %s RW lock\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + write ? "EXMODE" : "PRMODE"); + + if (ocfs2_mount_local(osb)) + return 0; + + lockres = &OCFS2_I(inode)->ip_rw_lockres; + + level = write ? DLM_LOCK_EX : DLM_LOCK_PR; + + status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0, + 0); + if (status < 0) + mlog_errno(status); + + mlog_exit(status); + return status; +} + +void ocfs2_rw_unlock(struct inode *inode, int write) +{ + int level = write ? DLM_LOCK_EX : DLM_LOCK_PR; + struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + mlog_entry_void(); + + mlog(0, "inode %llu drop %s RW lock\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + write ? "EXMODE" : "PRMODE"); + + if (!ocfs2_mount_local(osb)) + ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); + + mlog_exit_void(); +} + +/* + * ocfs2_open_lock always get PR mode lock. + */ +int ocfs2_open_lock(struct inode *inode) +{ + int status = 0; + struct ocfs2_lock_res *lockres; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + BUG_ON(!inode); + + mlog_entry_void(); + + mlog(0, "inode %llu take PRMODE open lock\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + + if (ocfs2_mount_local(osb)) + goto out; + + lockres = &OCFS2_I(inode)->ip_open_lockres; + + status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, + DLM_LOCK_PR, 0, 0); + if (status < 0) + mlog_errno(status); + +out: + mlog_exit(status); + return status; +} + +int ocfs2_try_open_lock(struct inode *inode, int write) +{ + int status = 0, level; + struct ocfs2_lock_res *lockres; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + BUG_ON(!inode); + + mlog_entry_void(); + + mlog(0, "inode %llu try to take %s open lock\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + write ? "EXMODE" : "PRMODE"); + + if (ocfs2_mount_local(osb)) + goto out; + + lockres = &OCFS2_I(inode)->ip_open_lockres; + + level = write ? DLM_LOCK_EX : DLM_LOCK_PR; + + /* + * The file system may already holding a PRMODE/EXMODE open lock. + * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on + * other nodes and the -EAGAIN will indicate to the caller that + * this inode is still in use. + */ + status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, + level, DLM_LKF_NOQUEUE, 0); + +out: + mlog_exit(status); + return status; +} + +/* + * ocfs2_open_unlock unlock PR and EX mode open locks. + */ +void ocfs2_open_unlock(struct inode *inode) +{ + struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + mlog_entry_void(); + + mlog(0, "inode %llu drop open lock\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + + if (ocfs2_mount_local(osb)) + goto out; + + if(lockres->l_ro_holders) + ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, + DLM_LOCK_PR); + if(lockres->l_ex_holders) + ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, + DLM_LOCK_EX); + +out: + mlog_exit_void(); +} + +static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres, + int level) +{ + int ret; + struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); + unsigned long flags; + struct ocfs2_mask_waiter mw; + + ocfs2_init_mask_waiter(&mw); + +retry_cancel: + spin_lock_irqsave(&lockres->l_lock, flags); + if (lockres->l_flags & OCFS2_LOCK_BUSY) { + ret = ocfs2_prepare_cancel_convert(osb, lockres); + if (ret) { + spin_unlock_irqrestore(&lockres->l_lock, flags); + ret = ocfs2_cancel_convert(osb, lockres); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + goto retry_cancel; + } + lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + ocfs2_wait_for_mask(&mw); + goto retry_cancel; + } + + ret = -ERESTARTSYS; + /* + * We may still have gotten the lock, in which case there's no + * point to restarting the syscall. + */ + if (lockres->l_level == level) + ret = 0; + + mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret, + lockres->l_flags, lockres->l_level, lockres->l_action); + + spin_unlock_irqrestore(&lockres->l_lock, flags); + +out: + return ret; +} + +/* + * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of + * flock() calls. The locking approach this requires is sufficiently + * different from all other cluster lock types that we implement a + * seperate path to the "low-level" dlm calls. In particular: + * + * - No optimization of lock levels is done - we take at exactly + * what's been requested. + * + * - No lock caching is employed. We immediately downconvert to + * no-lock at unlock time. This also means flock locks never go on + * the blocking list). + * + * - Since userspace can trivially deadlock itself with flock, we make + * sure to allow cancellation of a misbehaving applications flock() + * request. + * + * - Access to any flock lockres doesn't require concurrency, so we + * can simplify the code by requiring the caller to guarantee + * serialization of dlmglue flock calls. + */ +int ocfs2_file_lock(struct file *file, int ex, int trylock) +{ + int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0; + unsigned long flags; + struct ocfs2_file_private *fp = file->private_data; + struct ocfs2_lock_res *lockres = &fp->fp_flock; + struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb); + struct ocfs2_mask_waiter mw; + + ocfs2_init_mask_waiter(&mw); + + if ((lockres->l_flags & OCFS2_LOCK_BUSY) || + (lockres->l_level > DLM_LOCK_NL)) { + mlog(ML_ERROR, + "File lock \"%s\" has busy or locked state: flags: 0x%lx, " + "level: %u\n", lockres->l_name, lockres->l_flags, + lockres->l_level); + return -EINVAL; + } + + spin_lock_irqsave(&lockres->l_lock, flags); + if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) { + lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + /* + * Get the lock at NLMODE to start - that way we + * can cancel the upconvert request if need be. + */ + ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_wait_for_mask(&mw); + if (ret) { + mlog_errno(ret); + goto out; + } + spin_lock_irqsave(&lockres->l_lock, flags); + } + + lockres->l_action = OCFS2_AST_CONVERT; + lkm_flags |= DLM_LKF_CONVERT; + lockres->l_requested = level; + lockres_or_flags(lockres, OCFS2_LOCK_BUSY); + + lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags, + lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1, + lockres); + if (ret) { + if (!trylock || (ret != -EAGAIN)) { + ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); + ret = -EINVAL; + } + + ocfs2_recover_from_dlm_error(lockres, 1); + lockres_remove_mask_waiter(lockres, &mw); + goto out; + } + + ret = ocfs2_wait_for_mask_interruptible(&mw, lockres); + if (ret == -ERESTARTSYS) { + /* + * Userspace can cause deadlock itself with + * flock(). Current behavior locally is to allow the + * deadlock, but abort the system call if a signal is + * received. We follow this example, otherwise a + * poorly written program could sit in kernel until + * reboot. + * + * Handling this is a bit more complicated for Ocfs2 + * though. We can't exit this function with an + * outstanding lock request, so a cancel convert is + * required. We intentionally overwrite 'ret' - if the + * cancel fails and the lock was granted, it's easier + * to just bubble sucess back up to the user. + */ + ret = ocfs2_flock_handle_signal(lockres, level); + } else if (!ret && (level > lockres->l_level)) { + /* Trylock failed asynchronously */ + BUG_ON(!trylock); + ret = -EAGAIN; + } + +out: + + mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n", + lockres->l_name, ex, trylock, ret); + return ret; +} + +void ocfs2_file_unlock(struct file *file) +{ + int ret; + unsigned int gen; + unsigned long flags; + struct ocfs2_file_private *fp = file->private_data; + struct ocfs2_lock_res *lockres = &fp->fp_flock; + struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb); + struct ocfs2_mask_waiter mw; + + ocfs2_init_mask_waiter(&mw); + + if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) + return; + + if (lockres->l_level == DLM_LOCK_NL) + return; + + mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n", + lockres->l_name, lockres->l_flags, lockres->l_level, + lockres->l_action); + + spin_lock_irqsave(&lockres->l_lock, flags); + /* + * Fake a blocking ast for the downconvert code. + */ + lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); + lockres->l_blocking = DLM_LOCK_EX; + + gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL); + lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen); + if (ret) { + mlog_errno(ret); + return; + } + + ret = ocfs2_wait_for_mask(&mw); + if (ret) + mlog_errno(ret); +} + +static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) +{ + int kick = 0; + + mlog_entry_void(); + + /* If we know that another node is waiting on our lock, kick + * the downconvert thread * pre-emptively when we reach a release + * condition. */ + if (lockres->l_flags & OCFS2_LOCK_BLOCKED) { + switch(lockres->l_blocking) { + case DLM_LOCK_EX: + if (!lockres->l_ex_holders && !lockres->l_ro_holders) + kick = 1; + break; + case DLM_LOCK_PR: + if (!lockres->l_ex_holders) + kick = 1; + break; + default: + BUG(); + } + } + + if (kick) + ocfs2_wake_downconvert_thread(osb); + + mlog_exit_void(); +} + +#define OCFS2_SEC_BITS 34 +#define OCFS2_SEC_SHIFT (64 - 34) +#define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1) + +/* LVB only has room for 64 bits of time here so we pack it for + * now. */ +static u64 ocfs2_pack_timespec(struct timespec *spec) +{ + u64 res; + u64 sec = spec->tv_sec; + u32 nsec = spec->tv_nsec; + + res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK); + + return res; +} + +/* Call this with the lockres locked. I am reasonably sure we don't + * need ip_lock in this function as anyone who would be changing those + * values is supposed to be blocked in ocfs2_inode_lock right now. */ +static void __ocfs2_stuff_meta_lvb(struct inode *inode) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; + struct ocfs2_meta_lvb *lvb; + + mlog_entry_void(); + + lvb = (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + + /* + * Invalidate the LVB of a deleted inode - this way other + * nodes are forced to go to disk and discover the new inode + * status. + */ + if (oi->ip_flags & OCFS2_INODE_DELETED) { + lvb->lvb_version = 0; + goto out; + } + + lvb->lvb_version = OCFS2_LVB_VERSION; + lvb->lvb_isize = cpu_to_be64(i_size_read(inode)); + lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters); + lvb->lvb_iuid = cpu_to_be32(inode->i_uid); + lvb->lvb_igid = cpu_to_be32(inode->i_gid); + lvb->lvb_imode = cpu_to_be16(inode->i_mode); + lvb->lvb_inlink = cpu_to_be16(inode->i_nlink); + lvb->lvb_iatime_packed = + cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime)); + lvb->lvb_ictime_packed = + cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime)); + lvb->lvb_imtime_packed = + cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime)); + lvb->lvb_iattr = cpu_to_be32(oi->ip_attr); + lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features); + lvb->lvb_igeneration = cpu_to_be32(inode->i_generation); + +out: + mlog_meta_lvb(0, lockres); + + mlog_exit_void(); +} + +static void ocfs2_unpack_timespec(struct timespec *spec, + u64 packed_time) +{ + spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT; + spec->tv_nsec = packed_time & OCFS2_NSEC_MASK; +} + +static void ocfs2_refresh_inode_from_lvb(struct inode *inode) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; + struct ocfs2_meta_lvb *lvb; + + mlog_entry_void(); + + mlog_meta_lvb(0, lockres); + + lvb = (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + + /* We're safe here without the lockres lock... */ + spin_lock(&oi->ip_lock); + oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters); + i_size_write(inode, be64_to_cpu(lvb->lvb_isize)); + + oi->ip_attr = be32_to_cpu(lvb->lvb_iattr); + oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures); + ocfs2_set_inode_flags(inode); + + /* fast-symlinks are a special case */ + if (S_ISLNK(inode->i_mode) && !oi->ip_clusters) + inode->i_blocks = 0; + else + inode->i_blocks = ocfs2_inode_sector_count(inode); + + inode->i_uid = be32_to_cpu(lvb->lvb_iuid); + inode->i_gid = be32_to_cpu(lvb->lvb_igid); + inode->i_mode = be16_to_cpu(lvb->lvb_imode); + inode->i_nlink = be16_to_cpu(lvb->lvb_inlink); + ocfs2_unpack_timespec(&inode->i_atime, + be64_to_cpu(lvb->lvb_iatime_packed)); + ocfs2_unpack_timespec(&inode->i_mtime, + be64_to_cpu(lvb->lvb_imtime_packed)); + ocfs2_unpack_timespec(&inode->i_ctime, + be64_to_cpu(lvb->lvb_ictime_packed)); + spin_unlock(&oi->ip_lock); + + mlog_exit_void(); +} + +static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode, + struct ocfs2_lock_res *lockres) +{ + struct ocfs2_meta_lvb *lvb = + (struct ocfs2_meta_lvb *)ocfs2_dlm_lvb(&lockres->l_lksb); + + if (lvb->lvb_version == OCFS2_LVB_VERSION + && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation) + return 1; + return 0; +} + +/* Determine whether a lock resource needs to be refreshed, and + * arbitrate who gets to refresh it. + * + * 0 means no refresh needed. + * + * > 0 means you need to refresh this and you MUST call + * ocfs2_complete_lock_res_refresh afterwards. */ +static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres) +{ + unsigned long flags; + int status = 0; + + mlog_entry_void(); + +refresh_check: + spin_lock_irqsave(&lockres->l_lock, flags); + if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) { + spin_unlock_irqrestore(&lockres->l_lock, flags); + goto bail; + } + + if (lockres->l_flags & OCFS2_LOCK_REFRESHING) { + spin_unlock_irqrestore(&lockres->l_lock, flags); + + ocfs2_wait_on_refreshing_lock(lockres); + goto refresh_check; + } + + /* Ok, I'll be the one to refresh this lock. */ + lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + status = 1; +bail: + mlog_exit(status); + return status; +} + +/* If status is non zero, I'll mark it as not being in refresh + * anymroe, but i won't clear the needs refresh flag. */ +static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres, + int status) +{ + unsigned long flags; + mlog_entry_void(); + + spin_lock_irqsave(&lockres->l_lock, flags); + lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING); + if (!status) + lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + wake_up(&lockres->l_event); + + mlog_exit_void(); +} + +/* may or may not return a bh if it went to disk. */ +static int ocfs2_inode_lock_update(struct inode *inode, + struct buffer_head **bh) +{ + int status = 0; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; + struct ocfs2_dinode *fe; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + mlog_entry_void(); + + if (ocfs2_mount_local(osb)) + goto bail; + + spin_lock(&oi->ip_lock); + if (oi->ip_flags & OCFS2_INODE_DELETED) { + mlog(0, "Orphaned inode %llu was deleted while we " + "were waiting on a lock. ip_flags = 0x%x\n", + (unsigned long long)oi->ip_blkno, oi->ip_flags); + spin_unlock(&oi->ip_lock); + status = -ENOENT; + goto bail; + } + spin_unlock(&oi->ip_lock); + + if (!ocfs2_should_refresh_lock_res(lockres)) + goto bail; + + /* This will discard any caching information we might have had + * for the inode metadata. */ + ocfs2_metadata_cache_purge(inode); + + ocfs2_extent_map_trunc(inode, 0); + + if (ocfs2_meta_lvb_is_trustable(inode, lockres)) { + mlog(0, "Trusting LVB on inode %llu\n", + (unsigned long long)oi->ip_blkno); + ocfs2_refresh_inode_from_lvb(inode); + } else { + /* Boo, we have to go to disk. */ + /* read bh, cast, ocfs2_refresh_inode */ + status = ocfs2_read_block(inode, oi->ip_blkno, bh); + if (status < 0) { + mlog_errno(status); + goto bail_refresh; + } + fe = (struct ocfs2_dinode *) (*bh)->b_data; + + /* This is a good chance to make sure we're not + * locking an invalid object. + * + * We bug on a stale inode here because we checked + * above whether it was wiped from disk. The wiping + * node provides a guarantee that we receive that + * message and can mark the inode before dropping any + * locks associated with it. */ + if (!OCFS2_IS_VALID_DINODE(fe)) { + OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe); + status = -EIO; + goto bail_refresh; + } + mlog_bug_on_msg(inode->i_generation != + le32_to_cpu(fe->i_generation), + "Invalid dinode %llu disk generation: %u " + "inode->i_generation: %u\n", + (unsigned long long)oi->ip_blkno, + le32_to_cpu(fe->i_generation), + inode->i_generation); + mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) || + !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)), + "Stale dinode %llu dtime: %llu flags: 0x%x\n", + (unsigned long long)oi->ip_blkno, + (unsigned long long)le64_to_cpu(fe->i_dtime), + le32_to_cpu(fe->i_flags)); + + ocfs2_refresh_inode(inode, fe); + ocfs2_track_lock_refresh(lockres); + } + + status = 0; +bail_refresh: + ocfs2_complete_lock_res_refresh(lockres, status); +bail: + mlog_exit(status); + return status; +} + +static int ocfs2_assign_bh(struct inode *inode, + struct buffer_head **ret_bh, + struct buffer_head *passed_bh) +{ + int status; + + if (passed_bh) { + /* Ok, the update went to disk for us, use the + * returned bh. */ + *ret_bh = passed_bh; + get_bh(*ret_bh); + + return 0; + } + + status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, ret_bh); + if (status < 0) + mlog_errno(status); + + return status; +} + +/* + * returns < 0 error if the callback will never be called, otherwise + * the result of the lock will be communicated via the callback. + */ +int ocfs2_inode_lock_full(struct inode *inode, + struct buffer_head **ret_bh, + int ex, + int arg_flags) +{ + int status, level, acquired; + u32 dlm_flags; + struct ocfs2_lock_res *lockres = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *local_bh = NULL; + + BUG_ON(!inode); + + mlog_entry_void(); + + mlog(0, "inode %llu, take %s META lock\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + ex ? "EXMODE" : "PRMODE"); + + status = 0; + acquired = 0; + /* We'll allow faking a readonly metadata lock for + * rodevices. */ + if (ocfs2_is_hard_readonly(osb)) { + if (ex) + status = -EROFS; + goto bail; + } + + if (ocfs2_mount_local(osb)) + goto local; + + if (!(arg_flags & OCFS2_META_LOCK_RECOVERY)) + ocfs2_wait_for_recovery(osb); + + lockres = &OCFS2_I(inode)->ip_inode_lockres; + level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + dlm_flags = 0; + if (arg_flags & OCFS2_META_LOCK_NOQUEUE) + dlm_flags |= DLM_LKF_NOQUEUE; + + status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags); + if (status < 0) { + if (status != -EAGAIN && status != -EIOCBRETRY) + mlog_errno(status); + goto bail; + } + + /* Notify the error cleanup path to drop the cluster lock. */ + acquired = 1; + + /* We wait twice because a node may have died while we were in + * the lower dlm layers. The second time though, we've + * committed to owning this lock so we don't allow signals to + * abort the operation. */ + if (!(arg_flags & OCFS2_META_LOCK_RECOVERY)) + ocfs2_wait_for_recovery(osb); + +local: + /* + * We only see this flag if we're being called from + * ocfs2_read_locked_inode(). It means we're locking an inode + * which hasn't been populated yet, so clear the refresh flag + * and let the caller handle it. + */ + if (inode->i_state & I_NEW) { + status = 0; + if (lockres) + ocfs2_complete_lock_res_refresh(lockres, 0); + goto bail; + } + + /* This is fun. The caller may want a bh back, or it may + * not. ocfs2_inode_lock_update definitely wants one in, but + * may or may not read one, depending on what's in the + * LVB. The result of all of this is that we've *only* gone to + * disk if we have to, so the complexity is worthwhile. */ + status = ocfs2_inode_lock_update(inode, &local_bh); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + goto bail; + } + + if (ret_bh) { + status = ocfs2_assign_bh(inode, ret_bh, local_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + +bail: + if (status < 0) { + if (ret_bh && (*ret_bh)) { + brelse(*ret_bh); + *ret_bh = NULL; + } + if (acquired) + ocfs2_inode_unlock(inode, ex); + } + + if (local_bh) + brelse(local_bh); + + mlog_exit(status); + return status; +} + +/* + * This is working around a lock inversion between tasks acquiring DLM + * locks while holding a page lock and the downconvert thread which + * blocks dlm lock acquiry while acquiring page locks. + * + * ** These _with_page variantes are only intended to be called from aop + * methods that hold page locks and return a very specific *positive* error + * code that aop methods pass up to the VFS -- test for errors with != 0. ** + * + * The DLM is called such that it returns -EAGAIN if it would have + * blocked waiting for the downconvert thread. In that case we unlock + * our page so the downconvert thread can make progress. Once we've + * done this we have to return AOP_TRUNCATED_PAGE so the aop method + * that called us can bubble that back up into the VFS who will then + * immediately retry the aop call. + * + * We do a blocking lock and immediate unlock before returning, though, so that + * the lock has a great chance of being cached on this node by the time the VFS + * calls back to retry the aop. This has a potential to livelock as nodes + * ping locks back and forth, but that's a risk we're willing to take to avoid + * the lock inversion simply. + */ +int ocfs2_inode_lock_with_page(struct inode *inode, + struct buffer_head **ret_bh, + int ex, + struct page *page) +{ + int ret; + + ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK); + if (ret == -EAGAIN) { + unlock_page(page); + if (ocfs2_inode_lock(inode, ret_bh, ex) == 0) + ocfs2_inode_unlock(inode, ex); + ret = AOP_TRUNCATED_PAGE; + } + + return ret; +} + +int ocfs2_inode_lock_atime(struct inode *inode, + struct vfsmount *vfsmnt, + int *level) +{ + int ret; + + mlog_entry_void(); + ret = ocfs2_inode_lock(inode, NULL, 0); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + /* + * If we should update atime, we will get EX lock, + * otherwise we just get PR lock. + */ + if (ocfs2_should_update_atime(inode, vfsmnt)) { + struct buffer_head *bh = NULL; + + ocfs2_inode_unlock(inode, 0); + ret = ocfs2_inode_lock(inode, &bh, 1); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + *level = 1; + if (ocfs2_should_update_atime(inode, vfsmnt)) + ocfs2_update_inode_atime(inode, bh); + if (bh) + brelse(bh); + } else + *level = 0; + + mlog_exit(ret); + return ret; +} + +void ocfs2_inode_unlock(struct inode *inode, + int ex) +{ + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + mlog_entry_void(); + + mlog(0, "inode %llu drop %s META lock\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + ex ? "EXMODE" : "PRMODE"); + + if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) && + !ocfs2_mount_local(osb)) + ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); + + mlog_exit_void(); +} + +int ocfs2_super_lock(struct ocfs2_super *osb, + int ex) +{ + int status = 0; + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; + + mlog_entry_void(); + + if (ocfs2_is_hard_readonly(osb)) + return -EROFS; + + if (ocfs2_mount_local(osb)) + goto bail; + + status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + /* The super block lock path is really in the best position to + * know when resources covered by the lock need to be + * refreshed, so we do it here. Of course, making sense of + * everything is up to the caller :) */ + status = ocfs2_should_refresh_lock_res(lockres); + if (status < 0) { + mlog_errno(status); + goto bail; + } + if (status) { + status = ocfs2_refresh_slot_info(osb); + + ocfs2_complete_lock_res_refresh(lockres, status); + + if (status < 0) + mlog_errno(status); + ocfs2_track_lock_refresh(lockres); + } +bail: + mlog_exit(status); + return status; +} + +void ocfs2_super_unlock(struct ocfs2_super *osb, + int ex) +{ + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; + + if (!ocfs2_mount_local(osb)) + ocfs2_cluster_unlock(osb, lockres, level); +} + +int ocfs2_rename_lock(struct ocfs2_super *osb) +{ + int status; + struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres; + + if (ocfs2_is_hard_readonly(osb)) + return -EROFS; + + if (ocfs2_mount_local(osb)) + return 0; + + status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0); + if (status < 0) + mlog_errno(status); + + return status; +} + +void ocfs2_rename_unlock(struct ocfs2_super *osb) +{ + struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres; + + if (!ocfs2_mount_local(osb)) + ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX); +} + +int ocfs2_dentry_lock(struct dentry *dentry, int ex) +{ + int ret; + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + struct ocfs2_dentry_lock *dl = dentry->d_fsdata; + struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); + + BUG_ON(!dl); + + if (ocfs2_is_hard_readonly(osb)) + return -EROFS; + + if (ocfs2_mount_local(osb)) + return 0; + + ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0); + if (ret < 0) + mlog_errno(ret); + + return ret; +} + +void ocfs2_dentry_unlock(struct dentry *dentry, int ex) +{ + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + struct ocfs2_dentry_lock *dl = dentry->d_fsdata; + struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); + + if (!ocfs2_mount_local(osb)) + ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); +} + +/* Reference counting of the dlm debug structure. We want this because + * open references on the debug inodes can live on after a mount, so + * we can't rely on the ocfs2_super to always exist. */ +static void ocfs2_dlm_debug_free(struct kref *kref) +{ + struct ocfs2_dlm_debug *dlm_debug; + + dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt); + + kfree(dlm_debug); +} + +void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug) +{ + if (dlm_debug) + kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free); +} + +static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug) +{ + kref_get(&debug->d_refcnt); +} + +struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void) +{ + struct ocfs2_dlm_debug *dlm_debug; + + dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL); + if (!dlm_debug) { + mlog_errno(-ENOMEM); + goto out; + } + + kref_init(&dlm_debug->d_refcnt); + INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking); + dlm_debug->d_locking_state = NULL; +out: + return dlm_debug; +} + +/* Access to this is arbitrated for us via seq_file->sem. */ +struct ocfs2_dlm_seq_priv { + struct ocfs2_dlm_debug *p_dlm_debug; + struct ocfs2_lock_res p_iter_res; + struct ocfs2_lock_res p_tmp_res; +}; + +static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start, + struct ocfs2_dlm_seq_priv *priv) +{ + struct ocfs2_lock_res *iter, *ret = NULL; + struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug; + + assert_spin_locked(&ocfs2_dlm_tracking_lock); + + list_for_each_entry(iter, &start->l_debug_list, l_debug_list) { + /* discover the head of the list */ + if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) { + mlog(0, "End of list found, %p\n", ret); + break; + } + + /* We track our "dummy" iteration lockres' by a NULL + * l_ops field. */ + if (iter->l_ops != NULL) { + ret = iter; + break; + } + } + + return ret; +} + +static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos) +{ + struct ocfs2_dlm_seq_priv *priv = m->private; + struct ocfs2_lock_res *iter; + + spin_lock(&ocfs2_dlm_tracking_lock); + iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv); + if (iter) { + /* Since lockres' have the lifetime of their container + * (which can be inodes, ocfs2_supers, etc) we want to + * copy this out to a temporary lockres while still + * under the spinlock. Obviously after this we can't + * trust any pointers on the copy returned, but that's + * ok as the information we want isn't typically held + * in them. */ + priv->p_tmp_res = *iter; + iter = &priv->p_tmp_res; + } + spin_unlock(&ocfs2_dlm_tracking_lock); + + return iter; +} + +static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v) +{ +} + +static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct ocfs2_dlm_seq_priv *priv = m->private; + struct ocfs2_lock_res *iter = v; + struct ocfs2_lock_res *dummy = &priv->p_iter_res; + + spin_lock(&ocfs2_dlm_tracking_lock); + iter = ocfs2_dlm_next_res(iter, priv); + list_del_init(&dummy->l_debug_list); + if (iter) { + list_add(&dummy->l_debug_list, &iter->l_debug_list); + priv->p_tmp_res = *iter; + iter = &priv->p_tmp_res; + } + spin_unlock(&ocfs2_dlm_tracking_lock); + + return iter; +} + +/* So that debugfs.ocfs2 can determine which format is being used */ +#define OCFS2_DLM_DEBUG_STR_VERSION 2 +static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) +{ + int i; + char *lvb; + struct ocfs2_lock_res *lockres = v; + + if (!lockres) + return -EINVAL; + + seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION); + + if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY) + seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1, + lockres->l_name, + (unsigned int)ocfs2_get_dentry_lock_ino(lockres)); + else + seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name); + + seq_printf(m, "%d\t" + "0x%lx\t" + "0x%x\t" + "0x%x\t" + "%u\t" + "%u\t" + "%d\t" + "%d\t", + lockres->l_level, + lockres->l_flags, + lockres->l_action, + lockres->l_unlock_action, + lockres->l_ro_holders, + lockres->l_ex_holders, + lockres->l_requested, + lockres->l_blocking); + + /* Dump the raw LVB */ + lvb = ocfs2_dlm_lvb(&lockres->l_lksb); + for(i = 0; i < DLM_LVB_LEN; i++) + seq_printf(m, "0x%x\t", lvb[i]); + +#ifdef CONFIG_OCFS2_FS_STATS +# define lock_num_prmode(_l) (_l)->l_lock_num_prmode +# define lock_num_exmode(_l) (_l)->l_lock_num_exmode +# define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed +# define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed +# define lock_total_prmode(_l) (_l)->l_lock_total_prmode +# define lock_total_exmode(_l) (_l)->l_lock_total_exmode +# define lock_max_prmode(_l) (_l)->l_lock_max_prmode +# define lock_max_exmode(_l) (_l)->l_lock_max_exmode +# define lock_refresh(_l) (_l)->l_lock_refresh +#else +# define lock_num_prmode(_l) (0ULL) +# define lock_num_exmode(_l) (0ULL) +# define lock_num_prmode_failed(_l) (0) +# define lock_num_exmode_failed(_l) (0) +# define lock_total_prmode(_l) (0ULL) +# define lock_total_exmode(_l) (0ULL) +# define lock_max_prmode(_l) (0) +# define lock_max_exmode(_l) (0) +# define lock_refresh(_l) (0) +#endif + /* The following seq_print was added in version 2 of this output */ + seq_printf(m, "%llu\t" + "%llu\t" + "%u\t" + "%u\t" + "%llu\t" + "%llu\t" + "%u\t" + "%u\t" + "%u\t", + lock_num_prmode(lockres), + lock_num_exmode(lockres), + lock_num_prmode_failed(lockres), + lock_num_exmode_failed(lockres), + lock_total_prmode(lockres), + lock_total_exmode(lockres), + lock_max_prmode(lockres), + lock_max_exmode(lockres), + lock_refresh(lockres)); + + /* End the line */ + seq_printf(m, "\n"); + return 0; +} + +static const struct seq_operations ocfs2_dlm_seq_ops = { + .start = ocfs2_dlm_seq_start, + .stop = ocfs2_dlm_seq_stop, + .next = ocfs2_dlm_seq_next, + .show = ocfs2_dlm_seq_show, +}; + +static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = (struct seq_file *) file->private_data; + struct ocfs2_dlm_seq_priv *priv = seq->private; + struct ocfs2_lock_res *res = &priv->p_iter_res; + + ocfs2_remove_lockres_tracking(res); + ocfs2_put_dlm_debug(priv->p_dlm_debug); + return seq_release_private(inode, file); +} + +static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file) +{ + int ret; + struct ocfs2_dlm_seq_priv *priv; + struct seq_file *seq; + struct ocfs2_super *osb; + + priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + osb = inode->i_private; + ocfs2_get_dlm_debug(osb->osb_dlm_debug); + priv->p_dlm_debug = osb->osb_dlm_debug; + INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list); + + ret = seq_open(file, &ocfs2_dlm_seq_ops); + if (ret) { + kfree(priv); + mlog_errno(ret); + goto out; + } + + seq = (struct seq_file *) file->private_data; + seq->private = priv; + + ocfs2_add_lockres_tracking(&priv->p_iter_res, + priv->p_dlm_debug); + +out: + return ret; +} + +static const struct file_operations ocfs2_dlm_debug_fops = { + .open = ocfs2_dlm_debug_open, + .release = ocfs2_dlm_debug_release, + .read = seq_read, + .llseek = seq_lseek, +}; + +static int ocfs2_dlm_init_debug(struct ocfs2_super *osb) +{ + int ret = 0; + struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug; + + dlm_debug->d_locking_state = debugfs_create_file("locking_state", + S_IFREG|S_IRUSR, + osb->osb_debug_root, + osb, + &ocfs2_dlm_debug_fops); + if (!dlm_debug->d_locking_state) { + ret = -EINVAL; + mlog(ML_ERROR, + "Unable to create locking state debugfs file.\n"); + goto out; + } + + ocfs2_get_dlm_debug(dlm_debug); +out: + return ret; +} + +static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb) +{ + struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug; + + if (dlm_debug) { + debugfs_remove(dlm_debug->d_locking_state); + ocfs2_put_dlm_debug(dlm_debug); + } +} + +int ocfs2_dlm_init(struct ocfs2_super *osb) +{ + int status = 0; + struct ocfs2_cluster_connection *conn = NULL; + + mlog_entry_void(); + + if (ocfs2_mount_local(osb)) { + osb->node_num = 0; + goto local; + } + + status = ocfs2_dlm_init_debug(osb); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + /* launch downconvert thread */ + osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc"); + if (IS_ERR(osb->dc_task)) { + status = PTR_ERR(osb->dc_task); + osb->dc_task = NULL; + mlog_errno(status); + goto bail; + } + + /* for now, uuid == domain */ + status = ocfs2_cluster_connect(osb->osb_cluster_stack, + osb->uuid_str, + strlen(osb->uuid_str), + ocfs2_do_node_down, osb, + &conn); + if (status) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_cluster_this_node(&osb->node_num); + if (status < 0) { + mlog_errno(status); + mlog(ML_ERROR, + "could not find this host's node number\n"); + ocfs2_cluster_disconnect(conn, 0); + goto bail; + } + +local: + ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb); + ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb); + + osb->cconn = conn; + + status = 0; +bail: + if (status < 0) { + ocfs2_dlm_shutdown_debug(osb); + if (osb->dc_task) + kthread_stop(osb->dc_task); + } + + mlog_exit(status); + return status; +} + +void ocfs2_dlm_shutdown(struct ocfs2_super *osb, + int hangup_pending) +{ + mlog_entry_void(); + + ocfs2_drop_osb_locks(osb); + + /* + * Now that we have dropped all locks and ocfs2_dismount_volume() + * has disabled recovery, the DLM won't be talking to us. It's + * safe to tear things down before disconnecting the cluster. + */ + + if (osb->dc_task) { + kthread_stop(osb->dc_task); + osb->dc_task = NULL; + } + + ocfs2_lock_res_free(&osb->osb_super_lockres); + ocfs2_lock_res_free(&osb->osb_rename_lockres); + + ocfs2_cluster_disconnect(osb->cconn, hangup_pending); + osb->cconn = NULL; + + ocfs2_dlm_shutdown_debug(osb); + + mlog_exit_void(); +} + +static void ocfs2_unlock_ast(void *opaque, int error) +{ + struct ocfs2_lock_res *lockres = opaque; + unsigned long flags; + + mlog_entry_void(); + + mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name, + lockres->l_unlock_action); + + spin_lock_irqsave(&lockres->l_lock, flags); + if (error) { + mlog(ML_ERROR, "Dlm passes error %d for lock %s, " + "unlock_action %d\n", error, lockres->l_name, + lockres->l_unlock_action); + spin_unlock_irqrestore(&lockres->l_lock, flags); + return; + } + + switch(lockres->l_unlock_action) { + case OCFS2_UNLOCK_CANCEL_CONVERT: + mlog(0, "Cancel convert success for %s\n", lockres->l_name); + lockres->l_action = OCFS2_AST_INVALID; + break; + case OCFS2_UNLOCK_DROP_LOCK: + lockres->l_level = DLM_LOCK_IV; + break; + default: + BUG(); + } + + lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); + lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; + wake_up(&lockres->l_event); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + mlog_exit_void(); +} + +static int ocfs2_drop_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) +{ + int ret; + unsigned long flags; + u32 lkm_flags = 0; + + /* We didn't get anywhere near actually using this lockres. */ + if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) + goto out; + + if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) + lkm_flags |= DLM_LKF_VALBLK; + + spin_lock_irqsave(&lockres->l_lock, flags); + + mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING), + "lockres %s, flags 0x%lx\n", + lockres->l_name, lockres->l_flags); + + while (lockres->l_flags & OCFS2_LOCK_BUSY) { + mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = " + "%u, unlock_action = %u\n", + lockres->l_name, lockres->l_flags, lockres->l_action, + lockres->l_unlock_action); + + spin_unlock_irqrestore(&lockres->l_lock, flags); + + /* XXX: Today we just wait on any busy + * locks... Perhaps we need to cancel converts in the + * future? */ + ocfs2_wait_on_busy_lock(lockres); + + spin_lock_irqsave(&lockres->l_lock, flags); + } + + if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) { + if (lockres->l_flags & OCFS2_LOCK_ATTACHED && + lockres->l_level == DLM_LOCK_EX && + !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) + lockres->l_ops->set_lvb(lockres); + } + + if (lockres->l_flags & OCFS2_LOCK_BUSY) + mlog(ML_ERROR, "destroying busy lock: \"%s\"\n", + lockres->l_name); + if (lockres->l_flags & OCFS2_LOCK_BLOCKED) + mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name); + + if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) { + spin_unlock_irqrestore(&lockres->l_lock, flags); + goto out; + } + + lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED); + + /* make sure we never get here while waiting for an ast to + * fire. */ + BUG_ON(lockres->l_action != OCFS2_AST_INVALID); + + /* is this necessary? */ + lockres_or_flags(lockres, OCFS2_LOCK_BUSY); + lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK; + spin_unlock_irqrestore(&lockres->l_lock, flags); + + mlog(0, "lock %s\n", lockres->l_name); + + ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags, + lockres); + if (ret) { + ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); + mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags); + ocfs2_dlm_dump_lksb(&lockres->l_lksb); + BUG(); + } + mlog(0, "lock %s, successfull return from ocfs2_dlm_unlock\n", + lockres->l_name); + + ocfs2_wait_on_busy_lock(lockres); +out: + mlog_exit(0); + return 0; +} + +/* Mark the lockres as being dropped. It will no longer be + * queued if blocking, but we still may have to wait on it + * being dequeued from the downconvert thread before we can consider + * it safe to drop. + * + * You can *not* attempt to call cluster_lock on this lockres anymore. */ +void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) +{ + int status; + struct ocfs2_mask_waiter mw; + unsigned long flags; + + ocfs2_init_mask_waiter(&mw); + + spin_lock_irqsave(&lockres->l_lock, flags); + lockres->l_flags |= OCFS2_LOCK_FREEING; + while (lockres->l_flags & OCFS2_LOCK_QUEUED) { + lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + mlog(0, "Waiting on lockres %s\n", lockres->l_name); + + status = ocfs2_wait_for_mask(&mw); + if (status) + mlog_errno(status); + + spin_lock_irqsave(&lockres->l_lock, flags); + } + spin_unlock_irqrestore(&lockres->l_lock, flags); +} + +void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) +{ + int ret; + + ocfs2_mark_lockres_freeing(lockres); + ret = ocfs2_drop_lock(osb, lockres); + if (ret) + mlog_errno(ret); +} + +static void ocfs2_drop_osb_locks(struct ocfs2_super *osb) +{ + ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres); + ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres); +} + +int ocfs2_drop_inode_locks(struct inode *inode) +{ + int status, err; + + mlog_entry_void(); + + /* No need to call ocfs2_mark_lockres_freeing here - + * ocfs2_clear_inode has done it for us. */ + + err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), + &OCFS2_I(inode)->ip_open_lockres); + if (err < 0) + mlog_errno(err); + + status = err; + + err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), + &OCFS2_I(inode)->ip_inode_lockres); + if (err < 0) + mlog_errno(err); + if (err < 0 && !status) + status = err; + + err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), + &OCFS2_I(inode)->ip_rw_lockres); + if (err < 0) + mlog_errno(err); + if (err < 0 && !status) + status = err; + + mlog_exit(status); + return status; +} + +static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres, + int new_level) +{ + assert_spin_locked(&lockres->l_lock); + + BUG_ON(lockres->l_blocking <= DLM_LOCK_NL); + + if (lockres->l_level <= new_level) { + mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n", + lockres->l_level, new_level); + BUG(); + } + + mlog(0, "lock %s, new_level = %d, l_blocking = %d\n", + lockres->l_name, new_level, lockres->l_blocking); + + lockres->l_action = OCFS2_AST_DOWNCONVERT; + lockres->l_requested = new_level; + lockres_or_flags(lockres, OCFS2_LOCK_BUSY); + return lockres_set_pending(lockres); +} + +static int ocfs2_downconvert_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + int new_level, + int lvb, + unsigned int generation) +{ + int ret; + u32 dlm_flags = DLM_LKF_CONVERT; + + mlog_entry_void(); + + if (lvb) + dlm_flags |= DLM_LKF_VALBLK; + + ret = ocfs2_dlm_lock(osb->cconn, + new_level, + &lockres->l_lksb, + dlm_flags, + lockres->l_name, + OCFS2_LOCK_ID_MAX_LEN - 1, + lockres); + lockres_clear_pending(lockres, generation, osb); + if (ret) { + ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); + ocfs2_recover_from_dlm_error(lockres, 1); + goto bail; + } + + ret = 0; +bail: + mlog_exit(ret); + return ret; +} + +/* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */ +static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) +{ + assert_spin_locked(&lockres->l_lock); + + mlog_entry_void(); + mlog(0, "lock %s\n", lockres->l_name); + + if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) { + /* If we're already trying to cancel a lock conversion + * then just drop the spinlock and allow the caller to + * requeue this lock. */ + + mlog(0, "Lockres %s, skip convert\n", lockres->l_name); + return 0; + } + + /* were we in a convert when we got the bast fire? */ + BUG_ON(lockres->l_action != OCFS2_AST_CONVERT && + lockres->l_action != OCFS2_AST_DOWNCONVERT); + /* set things up for the unlockast to know to just + * clear out the ast_action and unset busy, etc. */ + lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT; + + mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY), + "lock %s, invalid flags: 0x%lx\n", + lockres->l_name, lockres->l_flags); + + return 1; +} + +static int ocfs2_cancel_convert(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) +{ + int ret; + + mlog_entry_void(); + mlog(0, "lock %s\n", lockres->l_name); + + ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, + DLM_LKF_CANCEL, lockres); + if (ret) { + ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); + ocfs2_recover_from_dlm_error(lockres, 0); + } + + mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name); + + mlog_exit(ret); + return ret; +} + +static int ocfs2_unblock_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres, + struct ocfs2_unblock_ctl *ctl) +{ + unsigned long flags; + int blocking; + int new_level; + int ret = 0; + int set_lvb = 0; + unsigned int gen; + + mlog_entry_void(); + + spin_lock_irqsave(&lockres->l_lock, flags); + + BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); + +recheck: + if (lockres->l_flags & OCFS2_LOCK_BUSY) { + /* XXX + * This is a *big* race. The OCFS2_LOCK_PENDING flag + * exists entirely for one reason - another thread has set + * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock(). + * + * If we do ocfs2_cancel_convert() before the other thread + * calls dlm_lock(), our cancel will do nothing. We will + * get no ast, and we will have no way of knowing the + * cancel failed. Meanwhile, the other thread will call + * into dlm_lock() and wait...forever. + * + * Why forever? Because another node has asked for the + * lock first; that's why we're here in unblock_lock(). + * + * The solution is OCFS2_LOCK_PENDING. When PENDING is + * set, we just requeue the unblock. Only when the other + * thread has called dlm_lock() and cleared PENDING will + * we then cancel their request. + * + * All callers of dlm_lock() must set OCFS2_DLM_PENDING + * at the same time they set OCFS2_DLM_BUSY. They must + * clear OCFS2_DLM_PENDING after dlm_lock() returns. + */ + if (lockres->l_flags & OCFS2_LOCK_PENDING) + goto leave_requeue; + + ctl->requeue = 1; + ret = ocfs2_prepare_cancel_convert(osb, lockres); + spin_unlock_irqrestore(&lockres->l_lock, flags); + if (ret) { + ret = ocfs2_cancel_convert(osb, lockres); + if (ret < 0) + mlog_errno(ret); + } + goto leave; + } + + /* if we're blocking an exclusive and we have *any* holders, + * then requeue. */ + if ((lockres->l_blocking == DLM_LOCK_EX) + && (lockres->l_ex_holders || lockres->l_ro_holders)) + goto leave_requeue; + + /* If it's a PR we're blocking, then only + * requeue if we've got any EX holders */ + if (lockres->l_blocking == DLM_LOCK_PR && + lockres->l_ex_holders) + goto leave_requeue; + + /* + * Can we get a lock in this state if the holder counts are + * zero? The meta data unblock code used to check this. + */ + if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) + && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) + goto leave_requeue; + + new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking); + + if (lockres->l_ops->check_downconvert + && !lockres->l_ops->check_downconvert(lockres, new_level)) + goto leave_requeue; + + /* If we get here, then we know that there are no more + * incompatible holders (and anyone asking for an incompatible + * lock is blocked). We can now downconvert the lock */ + if (!lockres->l_ops->downconvert_worker) + goto downconvert; + + /* Some lockres types want to do a bit of work before + * downconverting a lock. Allow that here. The worker function + * may sleep, so we save off a copy of what we're blocking as + * it may change while we're not holding the spin lock. */ + blocking = lockres->l_blocking; + spin_unlock_irqrestore(&lockres->l_lock, flags); + + ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); + + if (ctl->unblock_action == UNBLOCK_STOP_POST) + goto leave; + + spin_lock_irqsave(&lockres->l_lock, flags); + if (blocking != lockres->l_blocking) { + /* If this changed underneath us, then we can't drop + * it just yet. */ + goto recheck; + } + +downconvert: + ctl->requeue = 0; + + if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) { + if (lockres->l_level == DLM_LOCK_EX) + set_lvb = 1; + + /* + * We only set the lvb if the lock has been fully + * refreshed - otherwise we risk setting stale + * data. Otherwise, there's no need to actually clear + * out the lvb here as it's value is still valid. + */ + if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) + lockres->l_ops->set_lvb(lockres); + } + + gen = ocfs2_prepare_downconvert(lockres, new_level); + spin_unlock_irqrestore(&lockres->l_lock, flags); + ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb, + gen); + +leave: + mlog_exit(ret); + return ret; + +leave_requeue: + spin_unlock_irqrestore(&lockres->l_lock, flags); + ctl->requeue = 1; + + mlog_exit(0); + return 0; +} + +static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, + int blocking) +{ + struct inode *inode; + struct address_space *mapping; + + inode = ocfs2_lock_res_inode(lockres); + mapping = inode->i_mapping; + + if (!S_ISREG(inode->i_mode)) + goto out; + + /* + * We need this before the filemap_fdatawrite() so that it can + * transfer the dirty bit from the PTE to the + * page. Unfortunately this means that even for EX->PR + * downconverts, we'll lose our mappings and have to build + * them up again. + */ + unmap_mapping_range(mapping, 0, 0, 0); + + if (filemap_fdatawrite(mapping)) { + mlog(ML_ERROR, "Could not sync inode %llu for downconvert!", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + } + sync_mapping_buffers(mapping); + if (blocking == DLM_LOCK_EX) { + truncate_inode_pages(mapping, 0); + } else { + /* We only need to wait on the I/O if we're not also + * truncating pages because truncate_inode_pages waits + * for us above. We don't truncate pages if we're + * blocking anything < EXMODE because we want to keep + * them around in that case. */ + filemap_fdatawait(mapping); + } + +out: + return UNBLOCK_CONTINUE; +} + +static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, + int new_level) +{ + struct inode *inode = ocfs2_lock_res_inode(lockres); + int checkpointed = ocfs2_inode_fully_checkpointed(inode); + + BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR); + BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed); + + if (checkpointed) + return 1; + + ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb)); + return 0; +} + +static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres) +{ + struct inode *inode = ocfs2_lock_res_inode(lockres); + + __ocfs2_stuff_meta_lvb(inode); +} + +/* + * Does the final reference drop on our dentry lock. Right now this + * happens in the downconvert thread, but we could choose to simplify the + * dlmglue API and push these off to the ocfs2_wq in the future. + */ +static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) +{ + struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres); + ocfs2_dentry_lock_put(osb, dl); +} + +/* + * d_delete() matching dentries before the lock downconvert. + * + * At this point, any process waiting to destroy the + * dentry_lock due to last ref count is stopped by the + * OCFS2_LOCK_QUEUED flag. + * + * We have two potential problems + * + * 1) If we do the last reference drop on our dentry_lock (via dput) + * we'll wind up in ocfs2_release_dentry_lock(), waiting on + * the downconvert to finish. Instead we take an elevated + * reference and push the drop until after we've completed our + * unblock processing. + * + * 2) There might be another process with a final reference, + * waiting on us to finish processing. If this is the case, we + * detect it and exit out - there's no more dentries anyway. + */ +static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, + int blocking) +{ + struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres); + struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode); + struct dentry *dentry; + unsigned long flags; + int extra_ref = 0; + + /* + * This node is blocking another node from getting a read + * lock. This happens when we've renamed within a + * directory. We've forced the other nodes to d_delete(), but + * we never actually dropped our lock because it's still + * valid. The downconvert code will retain a PR for this node, + * so there's no further work to do. + */ + if (blocking == DLM_LOCK_PR) + return UNBLOCK_CONTINUE; + + /* + * Mark this inode as potentially orphaned. The code in + * ocfs2_delete_inode() will figure out whether it actually + * needs to be freed or not. + */ + spin_lock(&oi->ip_lock); + oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; + spin_unlock(&oi->ip_lock); + + /* + * Yuck. We need to make sure however that the check of + * OCFS2_LOCK_FREEING and the extra reference are atomic with + * respect to a reference decrement or the setting of that + * flag. + */ + spin_lock_irqsave(&lockres->l_lock, flags); + spin_lock(&dentry_attach_lock); + if (!(lockres->l_flags & OCFS2_LOCK_FREEING) + && dl->dl_count) { + dl->dl_count++; + extra_ref = 1; + } + spin_unlock(&dentry_attach_lock); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + mlog(0, "extra_ref = %d\n", extra_ref); + + /* + * We have a process waiting on us in ocfs2_dentry_iput(), + * which means we can't have any more outstanding + * aliases. There's no need to do any more work. + */ + if (!extra_ref) + return UNBLOCK_CONTINUE; + + spin_lock(&dentry_attach_lock); + while (1) { + dentry = ocfs2_find_local_alias(dl->dl_inode, + dl->dl_parent_blkno, 1); + if (!dentry) + break; + spin_unlock(&dentry_attach_lock); + + mlog(0, "d_delete(%.*s);\n", dentry->d_name.len, + dentry->d_name.name); + + /* + * The following dcache calls may do an + * iput(). Normally we don't want that from the + * downconverting thread, but in this case it's ok + * because the requesting node already has an + * exclusive lock on the inode, so it can't be queued + * for a downconvert. + */ + d_delete(dentry); + dput(dentry); + + spin_lock(&dentry_attach_lock); + } + spin_unlock(&dentry_attach_lock); + + /* + * If we are the last holder of this dentry lock, there is no + * reason to downconvert so skip straight to the unlock. + */ + if (dl->dl_count == 1) + return UNBLOCK_STOP_POST; + + return UNBLOCK_CONTINUE_POST; +} + +/* + * This is the filesystem locking protocol. It provides the lock handling + * hooks for the underlying DLM. It has a maximum version number. + * The version number allows interoperability with systems running at + * the same major number and an equal or smaller minor number. + * + * Whenever the filesystem does new things with locks (adds or removes a + * lock, orders them differently, does different things underneath a lock), + * the version must be changed. The protocol is negotiated when joining + * the dlm domain. A node may join the domain if its major version is + * identical to all other nodes and its minor version is greater than + * or equal to all other nodes. When its minor version is greater than + * the other nodes, it will run at the minor version specified by the + * other nodes. + * + * If a locking change is made that will not be compatible with older + * versions, the major number must be increased and the minor version set + * to zero. If a change merely adds a behavior that can be disabled when + * speaking to older versions, the minor version must be increased. If a + * change adds a fully backwards compatible change (eg, LVB changes that + * are just ignored by older versions), the version does not need to be + * updated. + */ +static struct ocfs2_locking_protocol lproto = { + .lp_max_version = { + .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, + .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, + }, + .lp_lock_ast = ocfs2_locking_ast, + .lp_blocking_ast = ocfs2_blocking_ast, + .lp_unlock_ast = ocfs2_unlock_ast, +}; + +void ocfs2_set_locking_protocol(void) +{ + ocfs2_stack_glue_set_locking_protocol(&lproto); +} + + +static void ocfs2_process_blocked_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) +{ + int status; + struct ocfs2_unblock_ctl ctl = {0, 0,}; + unsigned long flags; + + /* Our reference to the lockres in this function can be + * considered valid until we remove the OCFS2_LOCK_QUEUED + * flag. */ + + mlog_entry_void(); + + BUG_ON(!lockres); + BUG_ON(!lockres->l_ops); + + mlog(0, "lockres %s blocked.\n", lockres->l_name); + + /* Detect whether a lock has been marked as going away while + * the downconvert thread was processing other things. A lock can + * still be marked with OCFS2_LOCK_FREEING after this check, + * but short circuiting here will still save us some + * performance. */ + spin_lock_irqsave(&lockres->l_lock, flags); + if (lockres->l_flags & OCFS2_LOCK_FREEING) + goto unqueue; + spin_unlock_irqrestore(&lockres->l_lock, flags); + + status = ocfs2_unblock_lock(osb, lockres, &ctl); + if (status < 0) + mlog_errno(status); + + spin_lock_irqsave(&lockres->l_lock, flags); +unqueue: + if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) { + lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED); + } else + ocfs2_schedule_blocked_lock(osb, lockres); + + mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name, + ctl.requeue ? "yes" : "no"); + spin_unlock_irqrestore(&lockres->l_lock, flags); + + if (ctl.unblock_action != UNBLOCK_CONTINUE + && lockres->l_ops->post_unlock) + lockres->l_ops->post_unlock(osb, lockres); + + mlog_exit_void(); +} + +static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres) +{ + mlog_entry_void(); + + assert_spin_locked(&lockres->l_lock); + + if (lockres->l_flags & OCFS2_LOCK_FREEING) { + /* Do not schedule a lock for downconvert when it's on + * the way to destruction - any nodes wanting access + * to the resource will get it soon. */ + mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n", + lockres->l_name, lockres->l_flags); + return; + } + + lockres_or_flags(lockres, OCFS2_LOCK_QUEUED); + + spin_lock(&osb->dc_task_lock); + if (list_empty(&lockres->l_blocked_list)) { + list_add_tail(&lockres->l_blocked_list, + &osb->blocked_lock_list); + osb->blocked_lock_count++; + } + spin_unlock(&osb->dc_task_lock); + + mlog_exit_void(); +} + +static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) +{ + unsigned long processed; + struct ocfs2_lock_res *lockres; + + mlog_entry_void(); + + spin_lock(&osb->dc_task_lock); + /* grab this early so we know to try again if a state change and + * wake happens part-way through our work */ + osb->dc_work_sequence = osb->dc_wake_sequence; + + processed = osb->blocked_lock_count; + while (processed) { + BUG_ON(list_empty(&osb->blocked_lock_list)); + + lockres = list_entry(osb->blocked_lock_list.next, + struct ocfs2_lock_res, l_blocked_list); + list_del_init(&lockres->l_blocked_list); + osb->blocked_lock_count--; + spin_unlock(&osb->dc_task_lock); + + BUG_ON(!processed); + processed--; + + ocfs2_process_blocked_lock(osb, lockres); + + spin_lock(&osb->dc_task_lock); + } + spin_unlock(&osb->dc_task_lock); + + mlog_exit_void(); +} + +static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb) +{ + int empty = 0; + + spin_lock(&osb->dc_task_lock); + if (list_empty(&osb->blocked_lock_list)) + empty = 1; + + spin_unlock(&osb->dc_task_lock); + return empty; +} + +static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb) +{ + int should_wake = 0; + + spin_lock(&osb->dc_task_lock); + if (osb->dc_work_sequence != osb->dc_wake_sequence) + should_wake = 1; + spin_unlock(&osb->dc_task_lock); + + return should_wake; +} + +static int ocfs2_downconvert_thread(void *arg) +{ + int status = 0; + struct ocfs2_super *osb = arg; + + /* only quit once we've been asked to stop and there is no more + * work available */ + while (!(kthread_should_stop() && + ocfs2_downconvert_thread_lists_empty(osb))) { + + wait_event_interruptible(osb->dc_event, + ocfs2_downconvert_thread_should_wake(osb) || + kthread_should_stop()); + + mlog(0, "downconvert_thread: awoken\n"); + + ocfs2_downconvert_thread_do_work(osb); + } + + osb->dc_task = NULL; + return status; +} + +void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb) +{ + spin_lock(&osb->dc_task_lock); + /* make sure the voting thread gets a swipe at whatever changes + * the caller may have made to the voting state */ + osb->dc_wake_sequence++; + spin_unlock(&osb->dc_task_lock); + wake_up(&osb->dc_event); +} diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h new file mode 100644 index 0000000..2bb01f0 --- /dev/null +++ b/fs/ocfs2/dlmglue.h @@ -0,0 +1,119 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * dlmglue.h + * + * description here + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + + +#ifndef DLMGLUE_H +#define DLMGLUE_H + +#include "dcache.h" + +#define OCFS2_LVB_VERSION 5 + +struct ocfs2_meta_lvb { + __u8 lvb_version; + __u8 lvb_reserved0; + __be16 lvb_idynfeatures; + __be32 lvb_iclusters; + __be32 lvb_iuid; + __be32 lvb_igid; + __be64 lvb_iatime_packed; + __be64 lvb_ictime_packed; + __be64 lvb_imtime_packed; + __be64 lvb_isize; + __be16 lvb_imode; + __be16 lvb_inlink; + __be32 lvb_iattr; + __be32 lvb_igeneration; + __be32 lvb_reserved2; +}; + +/* ocfs2_inode_lock_full() 'arg_flags' flags */ +/* don't wait on recovery. */ +#define OCFS2_META_LOCK_RECOVERY (0x01) +/* Instruct the dlm not to queue ourselves on the other node. */ +#define OCFS2_META_LOCK_NOQUEUE (0x02) +/* don't block waiting for the downconvert thread, instead return -EAGAIN */ +#define OCFS2_LOCK_NONBLOCK (0x04) + +int ocfs2_dlm_init(struct ocfs2_super *osb); +void ocfs2_dlm_shutdown(struct ocfs2_super *osb, int hangup_pending); +void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res); +void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res, + enum ocfs2_lock_type type, + unsigned int generation, + struct inode *inode); +void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl, + u64 parent, struct inode *inode); +struct ocfs2_file_private; +void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres, + struct ocfs2_file_private *fp); +void ocfs2_lock_res_free(struct ocfs2_lock_res *res); +int ocfs2_create_new_inode_locks(struct inode *inode); +int ocfs2_drop_inode_locks(struct inode *inode); +int ocfs2_rw_lock(struct inode *inode, int write); +void ocfs2_rw_unlock(struct inode *inode, int write); +int ocfs2_open_lock(struct inode *inode); +int ocfs2_try_open_lock(struct inode *inode, int write); +void ocfs2_open_unlock(struct inode *inode); +int ocfs2_inode_lock_atime(struct inode *inode, + struct vfsmount *vfsmnt, + int *level); +int ocfs2_inode_lock_full(struct inode *inode, + struct buffer_head **ret_bh, + int ex, + int arg_flags); +int ocfs2_inode_lock_with_page(struct inode *inode, + struct buffer_head **ret_bh, + int ex, + struct page *page); +/* 99% of the time we don't want to supply any additional flags -- + * those are for very specific cases only. */ +#define ocfs2_inode_lock(i, b, e) ocfs2_inode_lock_full(i, b, e, 0) +void ocfs2_inode_unlock(struct inode *inode, + int ex); +int ocfs2_super_lock(struct ocfs2_super *osb, + int ex); +void ocfs2_super_unlock(struct ocfs2_super *osb, + int ex); +int ocfs2_rename_lock(struct ocfs2_super *osb); +void ocfs2_rename_unlock(struct ocfs2_super *osb); +int ocfs2_dentry_lock(struct dentry *dentry, int ex); +void ocfs2_dentry_unlock(struct dentry *dentry, int ex); +int ocfs2_file_lock(struct file *file, int ex, int trylock); +void ocfs2_file_unlock(struct file *file); + +void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres); +void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, + struct ocfs2_lock_res *lockres); + +/* for the downconvert thread */ +void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb); + +struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void); +void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug); + +/* To set the locking protocol on module initialization */ +void ocfs2_set_locking_protocol(void); +#endif /* DLMGLUE_H */ diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c new file mode 100644 index 0000000..2f27b33 --- /dev/null +++ b/fs/ocfs2/export.c @@ -0,0 +1,212 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * export.c + * + * Functions to facilitate NFS exporting + * + * Copyright (C) 2002, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> + +#define MLOG_MASK_PREFIX ML_EXPORT +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "dir.h" +#include "dlmglue.h" +#include "dcache.h" +#include "export.h" +#include "inode.h" + +#include "buffer_head_io.h" + +struct ocfs2_inode_handle +{ + u64 ih_blkno; + u32 ih_generation; +}; + +static struct dentry *ocfs2_get_dentry(struct super_block *sb, + struct ocfs2_inode_handle *handle) +{ + struct inode *inode; + struct dentry *result; + + mlog_entry("(0x%p, 0x%p)\n", sb, handle); + + if (handle->ih_blkno == 0) { + mlog_errno(-ESTALE); + return ERR_PTR(-ESTALE); + } + + inode = ocfs2_iget(OCFS2_SB(sb), handle->ih_blkno, 0, 0); + + if (IS_ERR(inode)) + return (void *)inode; + + if (handle->ih_generation != inode->i_generation) { + iput(inode); + return ERR_PTR(-ESTALE); + } + + result = d_obtain_alias(inode); + if (!IS_ERR(result)) + result->d_op = &ocfs2_dentry_ops; + + mlog_exit_ptr(result); + return result; +} + +static struct dentry *ocfs2_get_parent(struct dentry *child) +{ + int status; + u64 blkno; + struct dentry *parent; + struct inode *dir = child->d_inode; + + mlog_entry("(0x%p, '%.*s')\n", child, + child->d_name.len, child->d_name.name); + + mlog(0, "find parent of directory %llu\n", + (unsigned long long)OCFS2_I(dir)->ip_blkno); + + status = ocfs2_inode_lock(dir, NULL, 0); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + parent = ERR_PTR(status); + goto bail; + } + + status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno); + if (status < 0) { + parent = ERR_PTR(-ENOENT); + goto bail_unlock; + } + + parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); + if (!IS_ERR(parent)) + parent->d_op = &ocfs2_dentry_ops; + +bail_unlock: + ocfs2_inode_unlock(dir, 0); + +bail: + mlog_exit_ptr(parent); + + return parent; +} + +static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, + int connectable) +{ + struct inode *inode = dentry->d_inode; + int len = *max_len; + int type = 1; + u64 blkno; + u32 generation; + __le32 *fh = (__force __le32 *) fh_in; + + mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry, + dentry->d_name.len, dentry->d_name.name, + fh, len, connectable); + + if (len < 3 || (connectable && len < 6)) { + mlog(ML_ERROR, "fh buffer is too small for encoding\n"); + type = 255; + goto bail; + } + + blkno = OCFS2_I(inode)->ip_blkno; + generation = inode->i_generation; + + mlog(0, "Encoding fh: blkno: %llu, generation: %u\n", + (unsigned long long)blkno, generation); + + len = 3; + fh[0] = cpu_to_le32((u32)(blkno >> 32)); + fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff)); + fh[2] = cpu_to_le32(generation); + + if (connectable && !S_ISDIR(inode->i_mode)) { + struct inode *parent; + + spin_lock(&dentry->d_lock); + + parent = dentry->d_parent->d_inode; + blkno = OCFS2_I(parent)->ip_blkno; + generation = parent->i_generation; + + fh[3] = cpu_to_le32((u32)(blkno >> 32)); + fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff)); + fh[5] = cpu_to_le32(generation); + + spin_unlock(&dentry->d_lock); + + len = 6; + type = 2; + + mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", + (unsigned long long)blkno, generation); + } + + *max_len = len; + +bail: + mlog_exit(type); + return type; +} + +static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb, + struct fid *fid, int fh_len, int fh_type) +{ + struct ocfs2_inode_handle handle; + + if (fh_len < 3 || fh_type > 2) + return NULL; + + handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32; + handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]); + handle.ih_generation = le32_to_cpu(fid->raw[2]); + return ocfs2_get_dentry(sb, &handle); +} + +static struct dentry *ocfs2_fh_to_parent(struct super_block *sb, + struct fid *fid, int fh_len, int fh_type) +{ + struct ocfs2_inode_handle parent; + + if (fh_type != 2 || fh_len < 6) + return NULL; + + parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32; + parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]); + parent.ih_generation = le32_to_cpu(fid->raw[5]); + return ocfs2_get_dentry(sb, &parent); +} + +const struct export_operations ocfs2_export_ops = { + .encode_fh = ocfs2_encode_fh, + .fh_to_dentry = ocfs2_fh_to_dentry, + .fh_to_parent = ocfs2_fh_to_parent, + .get_parent = ocfs2_get_parent, +}; diff --git a/fs/ocfs2/export.h b/fs/ocfs2/export.h new file mode 100644 index 0000000..41a7386 --- /dev/null +++ b/fs/ocfs2/export.h @@ -0,0 +1,33 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * export.h + * + * Function prototypes + * + * Copyright (C) 2002, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_EXPORT_H +#define OCFS2_EXPORT_H + +#include <linux/exportfs.h> + +extern const struct export_operations ocfs2_export_ops; + +#endif /* OCFS2_EXPORT_H */ diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c new file mode 100644 index 0000000..2baedac --- /dev/null +++ b/fs/ocfs2/extent_map.c @@ -0,0 +1,821 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * extent_map.c + * + * Block/Cluster mapping functions + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/fiemap.h> + +#define MLOG_MASK_PREFIX ML_EXTENT_MAP +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "inode.h" +#include "super.h" + +#include "buffer_head_io.h" + +/* + * The extent caching implementation is intentionally trivial. + * + * We only cache a small number of extents stored directly on the + * inode, so linear order operations are acceptable. If we ever want + * to increase the size of the extent map, then these algorithms must + * get smarter. + */ + +void ocfs2_extent_map_init(struct inode *inode) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + oi->ip_extent_map.em_num_items = 0; + INIT_LIST_HEAD(&oi->ip_extent_map.em_list); +} + +static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em, + unsigned int cpos, + struct ocfs2_extent_map_item **ret_emi) +{ + unsigned int range; + struct ocfs2_extent_map_item *emi; + + *ret_emi = NULL; + + list_for_each_entry(emi, &em->em_list, ei_list) { + range = emi->ei_cpos + emi->ei_clusters; + + if (cpos >= emi->ei_cpos && cpos < range) { + list_move(&emi->ei_list, &em->em_list); + + *ret_emi = emi; + break; + } + } +} + +static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos, + unsigned int *phys, unsigned int *len, + unsigned int *flags) +{ + unsigned int coff; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_extent_map_item *emi; + + spin_lock(&oi->ip_lock); + + __ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi); + if (emi) { + coff = cpos - emi->ei_cpos; + *phys = emi->ei_phys + coff; + if (len) + *len = emi->ei_clusters - coff; + if (flags) + *flags = emi->ei_flags; + } + + spin_unlock(&oi->ip_lock); + + if (emi == NULL) + return -ENOENT; + + return 0; +} + +/* + * Forget about all clusters equal to or greater than cpos. + */ +void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos) +{ + struct ocfs2_extent_map_item *emi, *n; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_extent_map *em = &oi->ip_extent_map; + LIST_HEAD(tmp_list); + unsigned int range; + + spin_lock(&oi->ip_lock); + list_for_each_entry_safe(emi, n, &em->em_list, ei_list) { + if (emi->ei_cpos >= cpos) { + /* Full truncate of this record. */ + list_move(&emi->ei_list, &tmp_list); + BUG_ON(em->em_num_items == 0); + em->em_num_items--; + continue; + } + + range = emi->ei_cpos + emi->ei_clusters; + if (range > cpos) { + /* Partial truncate */ + emi->ei_clusters = cpos - emi->ei_cpos; + } + } + spin_unlock(&oi->ip_lock); + + list_for_each_entry_safe(emi, n, &tmp_list, ei_list) { + list_del(&emi->ei_list); + kfree(emi); + } +} + +/* + * Is any part of emi2 contained within emi1 + */ +static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1, + struct ocfs2_extent_map_item *emi2) +{ + unsigned int range1, range2; + + /* + * Check if logical start of emi2 is inside emi1 + */ + range1 = emi1->ei_cpos + emi1->ei_clusters; + if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1) + return 1; + + /* + * Check if logical end of emi2 is inside emi1 + */ + range2 = emi2->ei_cpos + emi2->ei_clusters; + if (range2 > emi1->ei_cpos && range2 <= range1) + return 1; + + return 0; +} + +static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest, + struct ocfs2_extent_map_item *src) +{ + dest->ei_cpos = src->ei_cpos; + dest->ei_phys = src->ei_phys; + dest->ei_clusters = src->ei_clusters; + dest->ei_flags = src->ei_flags; +} + +/* + * Try to merge emi with ins. Returns 1 if merge succeeds, zero + * otherwise. + */ +static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi, + struct ocfs2_extent_map_item *ins) +{ + /* + * Handle contiguousness + */ + if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) && + ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) && + ins->ei_flags == emi->ei_flags) { + emi->ei_clusters += ins->ei_clusters; + return 1; + } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && + (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && + ins->ei_flags == emi->ei_flags) { + emi->ei_phys = ins->ei_phys; + emi->ei_cpos = ins->ei_cpos; + emi->ei_clusters += ins->ei_clusters; + return 1; + } + + /* + * Overlapping extents - this shouldn't happen unless we've + * split an extent to change it's flags. That is exceedingly + * rare, so there's no sense in trying to optimize it yet. + */ + if (ocfs2_ei_is_contained(emi, ins) || + ocfs2_ei_is_contained(ins, emi)) { + ocfs2_copy_emi_fields(emi, ins); + return 1; + } + + /* No merge was possible. */ + return 0; +} + +/* + * In order to reduce complexity on the caller, this insert function + * is intentionally liberal in what it will accept. + * + * The only rule is that the truncate call *must* be used whenever + * records have been deleted. This avoids inserting overlapping + * records with different physical mappings. + */ +void ocfs2_extent_map_insert_rec(struct inode *inode, + struct ocfs2_extent_rec *rec) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_extent_map *em = &oi->ip_extent_map; + struct ocfs2_extent_map_item *emi, *new_emi = NULL; + struct ocfs2_extent_map_item ins; + + ins.ei_cpos = le32_to_cpu(rec->e_cpos); + ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb, + le64_to_cpu(rec->e_blkno)); + ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters); + ins.ei_flags = rec->e_flags; + +search: + spin_lock(&oi->ip_lock); + + list_for_each_entry(emi, &em->em_list, ei_list) { + if (ocfs2_try_to_merge_extent_map(emi, &ins)) { + list_move(&emi->ei_list, &em->em_list); + spin_unlock(&oi->ip_lock); + goto out; + } + } + + /* + * No item could be merged. + * + * Either allocate and add a new item, or overwrite the last recently + * inserted. + */ + + if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) { + if (new_emi == NULL) { + spin_unlock(&oi->ip_lock); + + new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS); + if (new_emi == NULL) + goto out; + + goto search; + } + + ocfs2_copy_emi_fields(new_emi, &ins); + list_add(&new_emi->ei_list, &em->em_list); + em->em_num_items++; + new_emi = NULL; + } else { + BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0); + emi = list_entry(em->em_list.prev, + struct ocfs2_extent_map_item, ei_list); + list_move(&emi->ei_list, &em->em_list); + ocfs2_copy_emi_fields(emi, &ins); + } + + spin_unlock(&oi->ip_lock); + +out: + if (new_emi) + kfree(new_emi); +} + +static int ocfs2_last_eb_is_empty(struct inode *inode, + struct ocfs2_dinode *di) +{ + int ret, next_free; + u64 last_eb_blk = le64_to_cpu(di->i_last_eb_blk); + struct buffer_head *eb_bh = NULL; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_list *el; + + ret = ocfs2_read_block(inode, last_eb_blk, &eb_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) eb_bh->b_data; + el = &eb->h_list; + + if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { + ret = -EROFS; + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); + goto out; + } + + if (el->l_tree_depth) { + ocfs2_error(inode->i_sb, + "Inode %lu has non zero tree depth in " + "leaf block %llu\n", inode->i_ino, + (unsigned long long)eb_bh->b_blocknr); + ret = -EROFS; + goto out; + } + + next_free = le16_to_cpu(el->l_next_free_rec); + + if (next_free == 0 || + (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) + ret = 1; + +out: + brelse(eb_bh); + return ret; +} + +/* + * Return the 1st index within el which contains an extent start + * larger than v_cluster. + */ +static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el, + u32 v_cluster) +{ + int i; + struct ocfs2_extent_rec *rec; + + for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { + rec = &el->l_recs[i]; + + if (v_cluster < le32_to_cpu(rec->e_cpos)) + break; + } + + return i; +} + +/* + * Figure out the size of a hole which starts at v_cluster within the given + * extent list. + * + * If there is no more allocation past v_cluster, we return the maximum + * cluster size minus v_cluster. + * + * If we have in-inode extents, then el points to the dinode list and + * eb_bh is NULL. Otherwise, eb_bh should point to the extent block + * containing el. + */ +static int ocfs2_figure_hole_clusters(struct inode *inode, + struct ocfs2_extent_list *el, + struct buffer_head *eb_bh, + u32 v_cluster, + u32 *num_clusters) +{ + int ret, i; + struct buffer_head *next_eb_bh = NULL; + struct ocfs2_extent_block *eb, *next_eb; + + i = ocfs2_search_for_hole_index(el, v_cluster); + + if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) { + eb = (struct ocfs2_extent_block *)eb_bh->b_data; + + /* + * Check the next leaf for any extents. + */ + + if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL) + goto no_more_extents; + + ret = ocfs2_read_block(inode, + le64_to_cpu(eb->h_next_leaf_blk), + &next_eb_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data; + + if (!OCFS2_IS_VALID_EXTENT_BLOCK(next_eb)) { + ret = -EROFS; + OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, next_eb); + goto out; + } + + el = &next_eb->h_list; + + i = ocfs2_search_for_hole_index(el, v_cluster); + } + +no_more_extents: + if (i == le16_to_cpu(el->l_next_free_rec)) { + /* + * We're at the end of our existing allocation. Just + * return the maximum number of clusters we could + * possibly allocate. + */ + *num_clusters = UINT_MAX - v_cluster; + } else { + *num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster; + } + + ret = 0; +out: + brelse(next_eb_bh); + return ret; +} + +static int ocfs2_get_clusters_nocache(struct inode *inode, + struct buffer_head *di_bh, + u32 v_cluster, unsigned int *hole_len, + struct ocfs2_extent_rec *ret_rec, + unsigned int *is_last) +{ + int i, ret, tree_height, len; + struct ocfs2_dinode *di; + struct ocfs2_extent_block *uninitialized_var(eb); + struct ocfs2_extent_list *el; + struct ocfs2_extent_rec *rec; + struct buffer_head *eb_bh = NULL; + + memset(ret_rec, 0, sizeof(*ret_rec)); + if (is_last) + *is_last = 0; + + di = (struct ocfs2_dinode *) di_bh->b_data; + el = &di->id2.i_list; + tree_height = le16_to_cpu(el->l_tree_depth); + + if (tree_height > 0) { + ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) eb_bh->b_data; + el = &eb->h_list; + + if (el->l_tree_depth) { + ocfs2_error(inode->i_sb, + "Inode %lu has non zero tree depth in " + "leaf block %llu\n", inode->i_ino, + (unsigned long long)eb_bh->b_blocknr); + ret = -EROFS; + goto out; + } + } + + i = ocfs2_search_extent_list(el, v_cluster); + if (i == -1) { + /* + * Holes can be larger than the maximum size of an + * extent, so we return their lengths in a seperate + * field. + */ + if (hole_len) { + ret = ocfs2_figure_hole_clusters(inode, el, eb_bh, + v_cluster, &len); + if (ret) { + mlog_errno(ret); + goto out; + } + + *hole_len = len; + } + goto out_hole; + } + + rec = &el->l_recs[i]; + + BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos)); + + if (!rec->e_blkno) { + ocfs2_error(inode->i_sb, "Inode %lu has bad extent " + "record (%u, %u, 0)", inode->i_ino, + le32_to_cpu(rec->e_cpos), + ocfs2_rec_clusters(el, rec)); + ret = -EROFS; + goto out; + } + + *ret_rec = *rec; + + /* + * Checking for last extent is potentially expensive - we + * might have to look at the next leaf over to see if it's + * empty. + * + * The first two checks are to see whether the caller even + * cares for this information, and if the extent is at least + * the last in it's list. + * + * If those hold true, then the extent is last if any of the + * additional conditions hold true: + * - Extent list is in-inode + * - Extent list is right-most + * - Extent list is 2nd to rightmost, with empty right-most + */ + if (is_last) { + if (i == (le16_to_cpu(el->l_next_free_rec) - 1)) { + if (tree_height == 0) + *is_last = 1; + else if (eb->h_blkno == di->i_last_eb_blk) + *is_last = 1; + else if (eb->h_next_leaf_blk == di->i_last_eb_blk) { + ret = ocfs2_last_eb_is_empty(inode, di); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + if (ret == 1) + *is_last = 1; + } + } + } + +out_hole: + ret = 0; +out: + brelse(eb_bh); + return ret; +} + +static void ocfs2_relative_extent_offsets(struct super_block *sb, + u32 v_cluster, + struct ocfs2_extent_rec *rec, + u32 *p_cluster, u32 *num_clusters) + +{ + u32 coff = v_cluster - le32_to_cpu(rec->e_cpos); + + *p_cluster = ocfs2_blocks_to_clusters(sb, le64_to_cpu(rec->e_blkno)); + *p_cluster = *p_cluster + coff; + + if (num_clusters) + *num_clusters = le16_to_cpu(rec->e_leaf_clusters) - coff; +} + +int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, + u32 *p_cluster, u32 *num_clusters, + struct ocfs2_extent_list *el) +{ + int ret = 0, i; + struct buffer_head *eb_bh = NULL; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_rec *rec; + u32 coff; + + if (el->l_tree_depth) { + ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) eb_bh->b_data; + el = &eb->h_list; + + if (el->l_tree_depth) { + ocfs2_error(inode->i_sb, + "Inode %lu has non zero tree depth in " + "xattr leaf block %llu\n", inode->i_ino, + (unsigned long long)eb_bh->b_blocknr); + ret = -EROFS; + goto out; + } + } + + i = ocfs2_search_extent_list(el, v_cluster); + if (i == -1) { + ret = -EROFS; + mlog_errno(ret); + goto out; + } else { + rec = &el->l_recs[i]; + BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos)); + + if (!rec->e_blkno) { + ocfs2_error(inode->i_sb, "Inode %lu has bad extent " + "record (%u, %u, 0) in xattr", inode->i_ino, + le32_to_cpu(rec->e_cpos), + ocfs2_rec_clusters(el, rec)); + ret = -EROFS; + goto out; + } + coff = v_cluster - le32_to_cpu(rec->e_cpos); + *p_cluster = ocfs2_blocks_to_clusters(inode->i_sb, + le64_to_cpu(rec->e_blkno)); + *p_cluster = *p_cluster + coff; + if (num_clusters) + *num_clusters = ocfs2_rec_clusters(el, rec) - coff; + } +out: + if (eb_bh) + brelse(eb_bh); + return ret; +} + +int ocfs2_get_clusters(struct inode *inode, u32 v_cluster, + u32 *p_cluster, u32 *num_clusters, + unsigned int *extent_flags) +{ + int ret; + unsigned int uninitialized_var(hole_len), flags = 0; + struct buffer_head *di_bh = NULL; + struct ocfs2_extent_rec rec; + + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + ret = -ERANGE; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster, + num_clusters, extent_flags); + if (ret == 0) + goto out; + + ret = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &di_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_get_clusters_nocache(inode, di_bh, v_cluster, &hole_len, + &rec, NULL); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (rec.e_blkno == 0ULL) { + /* + * A hole was found. Return some canned values that + * callers can key on. If asked for, num_clusters will + * be populated with the size of the hole. + */ + *p_cluster = 0; + if (num_clusters) { + *num_clusters = hole_len; + } + } else { + ocfs2_relative_extent_offsets(inode->i_sb, v_cluster, &rec, + p_cluster, num_clusters); + flags = rec.e_flags; + + ocfs2_extent_map_insert_rec(inode, &rec); + } + + if (extent_flags) + *extent_flags = flags; + +out: + brelse(di_bh); + return ret; +} + +/* + * This expects alloc_sem to be held. The allocation cannot change at + * all while the map is in the process of being updated. + */ +int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno, + u64 *ret_count, unsigned int *extent_flags) +{ + int ret; + int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); + u32 cpos, num_clusters, p_cluster; + u64 boff = 0; + + cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno); + + ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters, + extent_flags); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * p_cluster == 0 indicates a hole. + */ + if (p_cluster) { + boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); + boff += (v_blkno & (u64)(bpc - 1)); + } + + *p_blkno = boff; + + if (ret_count) { + *ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters); + *ret_count -= v_blkno & (u64)(bpc - 1); + } + +out: + return ret; +} + +static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, + struct fiemap_extent_info *fieinfo, + u64 map_start) +{ + int ret; + unsigned int id_count; + struct ocfs2_dinode *di; + u64 phys; + u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + di = (struct ocfs2_dinode *)di_bh->b_data; + id_count = le16_to_cpu(di->id2.i_data.id_count); + + if (map_start < id_count) { + phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits; + phys += offsetof(struct ocfs2_dinode, id2.i_data.id_data); + + ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count, + flags); + if (ret < 0) + return ret; + } + + return 0; +} + +#define OCFS2_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) + +int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 map_start, u64 map_len) +{ + int ret, is_last; + u32 mapping_end, cpos; + unsigned int hole_size; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + u64 len_bytes, phys_bytes, virt_bytes; + struct buffer_head *di_bh = NULL; + struct ocfs2_extent_rec rec; + + ret = fiemap_check_flags(fieinfo, OCFS2_FIEMAP_FLAGS); + if (ret) + return ret; + + ret = ocfs2_inode_lock(inode, &di_bh, 0); + if (ret) { + mlog_errno(ret); + goto out; + } + + down_read(&OCFS2_I(inode)->ip_alloc_sem); + + /* + * Handle inline-data separately. + */ + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start); + goto out_unlock; + } + + cpos = map_start >> osb->s_clustersize_bits; + mapping_end = ocfs2_clusters_for_bytes(inode->i_sb, + map_start + map_len); + mapping_end -= cpos; + is_last = 0; + while (cpos < mapping_end && !is_last) { + u32 fe_flags; + + ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, + &hole_size, &rec, &is_last); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (rec.e_blkno == 0ULL) { + cpos += hole_size; + continue; + } + + fe_flags = 0; + if (rec.e_flags & OCFS2_EXT_UNWRITTEN) + fe_flags |= FIEMAP_EXTENT_UNWRITTEN; + if (is_last) + fe_flags |= FIEMAP_EXTENT_LAST; + len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits; + phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits; + virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits; + + ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes, + len_bytes, fe_flags); + if (ret) + break; + + cpos = le32_to_cpu(rec.e_cpos)+ le16_to_cpu(rec.e_leaf_clusters); + } + + if (ret > 0) + ret = 0; + +out_unlock: + brelse(di_bh); + + up_read(&OCFS2_I(inode)->ip_alloc_sem); + + ocfs2_inode_unlock(inode, 0); +out: + + return ret; +} diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h new file mode 100644 index 0000000..1c4aa8b --- /dev/null +++ b/fs/ocfs2/extent_map.h @@ -0,0 +1,60 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * extent_map.h + * + * In-memory file extent mappings for OCFS2. + * + * Copyright (C) 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef _EXTENT_MAP_H +#define _EXTENT_MAP_H + +struct ocfs2_extent_map_item { + unsigned int ei_cpos; + unsigned int ei_phys; + unsigned int ei_clusters; + unsigned int ei_flags; + + struct list_head ei_list; +}; + +#define OCFS2_MAX_EXTENT_MAP_ITEMS 3 +struct ocfs2_extent_map { + unsigned int em_num_items; + struct list_head em_list; +}; + +void ocfs2_extent_map_init(struct inode *inode); +void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cluster); +void ocfs2_extent_map_insert_rec(struct inode *inode, + struct ocfs2_extent_rec *rec); + +int ocfs2_get_clusters(struct inode *inode, u32 v_cluster, u32 *p_cluster, + u32 *num_clusters, unsigned int *extent_flags); +int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno, + u64 *ret_count, unsigned int *extent_flags); + +int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 map_start, u64 map_len); + +int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, + u32 *p_cluster, u32 *num_clusters, + struct ocfs2_extent_list *el); + +#endif /* _EXTENT_MAP_H */ diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c new file mode 100644 index 0000000..e2570a3 --- /dev/null +++ b/fs/ocfs2/file.c @@ -0,0 +1,2180 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * file.c + * + * File open, close, extend, truncate + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/capability.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/pagemap.h> +#include <linux/uio.h> +#include <linux/sched.h> +#include <linux/splice.h> +#include <linux/mount.h> +#include <linux/writeback.h> +#include <linux/falloc.h> + +#define MLOG_MASK_PREFIX ML_INODE +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "aops.h" +#include "dir.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "file.h" +#include "sysfile.h" +#include "inode.h" +#include "ioctl.h" +#include "journal.h" +#include "locks.h" +#include "mmap.h" +#include "suballoc.h" +#include "super.h" +#include "xattr.h" + +#include "buffer_head_io.h" + +static int ocfs2_sync_inode(struct inode *inode) +{ + filemap_fdatawrite(inode->i_mapping); + return sync_mapping_buffers(inode->i_mapping); +} + +static int ocfs2_init_file_private(struct inode *inode, struct file *file) +{ + struct ocfs2_file_private *fp; + + fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL); + if (!fp) + return -ENOMEM; + + fp->fp_file = file; + mutex_init(&fp->fp_mutex); + ocfs2_file_lock_res_init(&fp->fp_flock, fp); + file->private_data = fp; + + return 0; +} + +static void ocfs2_free_file_private(struct inode *inode, struct file *file) +{ + struct ocfs2_file_private *fp = file->private_data; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (fp) { + ocfs2_simple_drop_lockres(osb, &fp->fp_flock); + ocfs2_lock_res_free(&fp->fp_flock); + kfree(fp); + file->private_data = NULL; + } +} + +static int ocfs2_file_open(struct inode *inode, struct file *file) +{ + int status; + int mode = file->f_flags; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, + file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name); + + spin_lock(&oi->ip_lock); + + /* Check that the inode hasn't been wiped from disk by another + * node. If it hasn't then we're safe as long as we hold the + * spin lock until our increment of open count. */ + if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { + spin_unlock(&oi->ip_lock); + + status = -ENOENT; + goto leave; + } + + if (mode & O_DIRECT) + oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT; + + oi->ip_open_count++; + spin_unlock(&oi->ip_lock); + + status = ocfs2_init_file_private(inode, file); + if (status) { + /* + * We want to set open count back if we're failing the + * open. + */ + spin_lock(&oi->ip_lock); + oi->ip_open_count--; + spin_unlock(&oi->ip_lock); + } + +leave: + mlog_exit(status); + return status; +} + +static int ocfs2_file_release(struct inode *inode, struct file *file) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, + file->f_path.dentry->d_name.len, + file->f_path.dentry->d_name.name); + + spin_lock(&oi->ip_lock); + if (!--oi->ip_open_count) + oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT; + spin_unlock(&oi->ip_lock); + + ocfs2_free_file_private(inode, file); + + mlog_exit(0); + + return 0; +} + +static int ocfs2_dir_open(struct inode *inode, struct file *file) +{ + return ocfs2_init_file_private(inode, file); +} + +static int ocfs2_dir_release(struct inode *inode, struct file *file) +{ + ocfs2_free_file_private(inode, file); + return 0; +} + +static int ocfs2_sync_file(struct file *file, + struct dentry *dentry, + int datasync) +{ + int err = 0; + journal_t *journal; + struct inode *inode = dentry->d_inode; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync, + dentry->d_name.len, dentry->d_name.name); + + err = ocfs2_sync_inode(dentry->d_inode); + if (err) + goto bail; + + journal = osb->journal->j_journal; + err = jbd2_journal_force_commit(journal); + +bail: + mlog_exit(err); + + return (err < 0) ? -EIO : 0; +} + +int ocfs2_should_update_atime(struct inode *inode, + struct vfsmount *vfsmnt) +{ + struct timespec now; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + return 0; + + if ((inode->i_flags & S_NOATIME) || + ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))) + return 0; + + /* + * We can be called with no vfsmnt structure - NFSD will + * sometimes do this. + * + * Note that our action here is different than touch_atime() - + * if we can't tell whether this is a noatime mount, then we + * don't know whether to trust the value of s_atime_quantum. + */ + if (vfsmnt == NULL) + return 0; + + if ((vfsmnt->mnt_flags & MNT_NOATIME) || + ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) + return 0; + + if (vfsmnt->mnt_flags & MNT_RELATIME) { + if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) || + (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0)) + return 1; + + return 0; + } + + now = CURRENT_TIME; + if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum)) + return 0; + else + return 1; +} + +int ocfs2_update_inode_atime(struct inode *inode, + struct buffer_head *bh) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + handle_t *handle; + struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data; + + mlog_entry_void(); + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* + * Don't use ocfs2_mark_inode_dirty() here as we don't always + * have i_mutex to guard against concurrent changes to other + * inode fields. + */ + inode->i_atime = CURRENT_TIME; + di->i_atime = cpu_to_le64(inode->i_atime.tv_sec); + di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); + + ret = ocfs2_journal_dirty(handle, bh); + if (ret < 0) + mlog_errno(ret); + +out_commit: + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +out: + mlog_exit(ret); + return ret; +} + +static int ocfs2_set_inode_size(handle_t *handle, + struct inode *inode, + struct buffer_head *fe_bh, + u64 new_i_size) +{ + int status; + + mlog_entry_void(); + i_size_write(inode, new_i_size); + inode->i_blocks = ocfs2_inode_sector_count(inode); + inode->i_ctime = inode->i_mtime = CURRENT_TIME; + + status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + +bail: + mlog_exit(status); + return status; +} + +static int ocfs2_simple_size_update(struct inode *inode, + struct buffer_head *di_bh, + u64 new_i_size) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + handle_t *handle = NULL; + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_set_inode_size(handle, inode, di_bh, + new_i_size); + if (ret < 0) + mlog_errno(ret); + + ocfs2_commit_trans(osb, handle); +out: + return ret; +} + +static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, + struct inode *inode, + struct buffer_head *fe_bh, + u64 new_i_size) +{ + int status; + handle_t *handle; + struct ocfs2_dinode *di; + u64 cluster_bytes; + + mlog_entry_void(); + + /* TODO: This needs to actually orphan the inode in this + * transaction. */ + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out; + } + + status = ocfs2_journal_access(handle, inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_commit; + } + + /* + * Do this before setting i_size. + */ + cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size); + status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size, + cluster_bytes); + if (status) { + mlog_errno(status); + goto out_commit; + } + + i_size_write(inode, new_i_size); + inode->i_ctime = inode->i_mtime = CURRENT_TIME; + + di = (struct ocfs2_dinode *) fe_bh->b_data; + di->i_size = cpu_to_le64(new_i_size); + di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec); + di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); + + status = ocfs2_journal_dirty(handle, fe_bh); + if (status < 0) + mlog_errno(status); + +out_commit: + ocfs2_commit_trans(osb, handle); +out: + + mlog_exit(status); + return status; +} + +static int ocfs2_truncate_file(struct inode *inode, + struct buffer_head *di_bh, + u64 new_i_size) +{ + int status = 0; + struct ocfs2_dinode *fe = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_truncate_context *tc = NULL; + + mlog_entry("(inode = %llu, new_i_size = %llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)new_i_size); + + fe = (struct ocfs2_dinode *) di_bh->b_data; + if (!OCFS2_IS_VALID_DINODE(fe)) { + OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe); + status = -EIO; + goto bail; + } + + mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode), + "Inode %llu, inode i_size = %lld != di " + "i_size = %llu, i_flags = 0x%x\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + i_size_read(inode), + (unsigned long long)le64_to_cpu(fe->i_size), + le32_to_cpu(fe->i_flags)); + + if (new_i_size > le64_to_cpu(fe->i_size)) { + mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n", + (unsigned long long)le64_to_cpu(fe->i_size), + (unsigned long long)new_i_size); + status = -EINVAL; + mlog_errno(status); + goto bail; + } + + mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n", + (unsigned long long)le64_to_cpu(fe->i_blkno), + (unsigned long long)le64_to_cpu(fe->i_size), + (unsigned long long)new_i_size); + + /* lets handle the simple truncate cases before doing any more + * cluster locking. */ + if (new_i_size == le64_to_cpu(fe->i_size)) + goto bail; + + down_write(&OCFS2_I(inode)->ip_alloc_sem); + + /* + * The inode lock forced other nodes to sync and drop their + * pages, which (correctly) happens even if we have a truncate + * without allocation change - ocfs2 cluster sizes can be much + * greater than page size, so we have to truncate them + * anyway. + */ + unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1); + truncate_inode_pages(inode->i_mapping, new_i_size); + + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + status = ocfs2_truncate_inline(inode, di_bh, new_i_size, + i_size_read(inode), 1); + if (status) + mlog_errno(status); + + goto bail_unlock_sem; + } + + /* alright, we're going to need to do a full blown alloc size + * change. Orphan the inode so that recovery can complete the + * truncate if necessary. This does the task of marking + * i_size. */ + status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size); + if (status < 0) { + mlog_errno(status); + goto bail_unlock_sem; + } + + status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc); + if (status < 0) { + mlog_errno(status); + goto bail_unlock_sem; + } + + status = ocfs2_commit_truncate(osb, inode, di_bh, tc); + if (status < 0) { + mlog_errno(status); + goto bail_unlock_sem; + } + + /* TODO: orphan dir cleanup here. */ +bail_unlock_sem: + up_write(&OCFS2_I(inode)->ip_alloc_sem); + +bail: + + mlog_exit(status); + return status; +} + +/* + * extend file allocation only here. + * we'll update all the disk stuff, and oip->alloc_size + * + * expect stuff to be locked, a transaction started and enough data / + * metadata reservations in the contexts. + * + * Will return -EAGAIN, and a reason if a restart is needed. + * If passed in, *reason will always be set, even in error. + */ +int ocfs2_add_inode_data(struct ocfs2_super *osb, + struct inode *inode, + u32 *logical_offset, + u32 clusters_to_add, + int mark_unwritten, + struct buffer_head *fe_bh, + handle_t *handle, + struct ocfs2_alloc_context *data_ac, + struct ocfs2_alloc_context *meta_ac, + enum ocfs2_alloc_restarted *reason_ret) +{ + int ret; + struct ocfs2_extent_tree et; + + ocfs2_init_dinode_extent_tree(&et, inode, fe_bh); + ret = ocfs2_add_clusters_in_btree(osb, inode, logical_offset, + clusters_to_add, mark_unwritten, + &et, handle, + data_ac, meta_ac, reason_ret); + + return ret; +} + +static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start, + u32 clusters_to_add, int mark_unwritten) +{ + int status = 0; + int restart_func = 0; + int credits; + u32 prev_clusters; + struct buffer_head *bh = NULL; + struct ocfs2_dinode *fe = NULL; + handle_t *handle = NULL; + struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *meta_ac = NULL; + enum ocfs2_alloc_restarted why; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_extent_tree et; + + mlog_entry("(clusters_to_add = %u)\n", clusters_to_add); + + /* + * This function only exists for file systems which don't + * support holes. + */ + BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb)); + + status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + fe = (struct ocfs2_dinode *) bh->b_data; + if (!OCFS2_IS_VALID_DINODE(fe)) { + OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe); + status = -EIO; + goto leave; + } + +restart_all: + BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); + + mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, " + "clusters_to_add = %u\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters), + clusters_to_add); + ocfs2_init_dinode_extent_tree(&et, inode, bh); + status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, + &data_ac, &meta_ac); + if (status) { + mlog_errno(status); + goto leave; + } + + credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list, + clusters_to_add); + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto leave; + } + +restarted_transaction: + /* reserve a write to the file entry early on - that we if we + * run out of credits in the allocation path, we can still + * update i_size. */ + status = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + prev_clusters = OCFS2_I(inode)->ip_clusters; + + status = ocfs2_add_inode_data(osb, + inode, + &logical_start, + clusters_to_add, + mark_unwritten, + bh, + handle, + data_ac, + meta_ac, + &why); + if ((status < 0) && (status != -EAGAIN)) { + if (status != -ENOSPC) + mlog_errno(status); + goto leave; + } + + status = ocfs2_journal_dirty(handle, bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + spin_lock(&OCFS2_I(inode)->ip_lock); + clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters); + spin_unlock(&OCFS2_I(inode)->ip_lock); + + if (why != RESTART_NONE && clusters_to_add) { + if (why == RESTART_META) { + mlog(0, "restarting function.\n"); + restart_func = 1; + } else { + BUG_ON(why != RESTART_TRANS); + + mlog(0, "restarting transaction.\n"); + /* TODO: This can be more intelligent. */ + credits = ocfs2_calc_extend_credits(osb->sb, + &fe->id2.i_list, + clusters_to_add); + status = ocfs2_extend_trans(handle, credits); + if (status < 0) { + /* handle still has to be committed at + * this point. */ + status = -ENOMEM; + mlog_errno(status); + goto leave; + } + goto restarted_transaction; + } + } + + mlog(0, "fe: i_clusters = %u, i_size=%llu\n", + le32_to_cpu(fe->i_clusters), + (unsigned long long)le64_to_cpu(fe->i_size)); + mlog(0, "inode: ip_clusters=%u, i_size=%lld\n", + OCFS2_I(inode)->ip_clusters, (long long)i_size_read(inode)); + +leave: + if (handle) { + ocfs2_commit_trans(osb, handle); + handle = NULL; + } + if (data_ac) { + ocfs2_free_alloc_context(data_ac); + data_ac = NULL; + } + if (meta_ac) { + ocfs2_free_alloc_context(meta_ac); + meta_ac = NULL; + } + if ((!status) && restart_func) { + restart_func = 0; + goto restart_all; + } + brelse(bh); + bh = NULL; + + mlog_exit(status); + return status; +} + +/* Some parts of this taken from generic_cont_expand, which turned out + * to be too fragile to do exactly what we need without us having to + * worry about recursive locking in ->write_begin() and ->write_end(). */ +static int ocfs2_write_zero_page(struct inode *inode, + u64 size) +{ + struct address_space *mapping = inode->i_mapping; + struct page *page; + unsigned long index; + unsigned int offset; + handle_t *handle = NULL; + int ret; + + offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ + /* ugh. in prepare/commit_write, if from==to==start of block, we + ** skip the prepare. make sure we never send an offset for the start + ** of a block + */ + if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) { + offset++; + } + index = size >> PAGE_CACHE_SHIFT; + + page = grab_cache_page(mapping, index); + if (!page) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_prepare_write_nolock(inode, page, offset, offset); + if (ret < 0) { + mlog_errno(ret); + goto out_unlock; + } + + if (ocfs2_should_order_data(inode)) { + handle = ocfs2_start_walk_page_trans(inode, page, offset, + offset); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + handle = NULL; + goto out_unlock; + } + } + + /* must not update i_size! */ + ret = block_commit_write(page, offset, offset); + if (ret < 0) + mlog_errno(ret); + else + ret = 0; + + if (handle) + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +out_unlock: + unlock_page(page); + page_cache_release(page); +out: + return ret; +} + +static int ocfs2_zero_extend(struct inode *inode, + u64 zero_to_size) +{ + int ret = 0; + u64 start_off; + struct super_block *sb = inode->i_sb; + + start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode)); + while (start_off < zero_to_size) { + ret = ocfs2_write_zero_page(inode, start_off); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + start_off += sb->s_blocksize; + + /* + * Very large extends have the potential to lock up + * the cpu for extended periods of time. + */ + cond_resched(); + } + +out: + return ret; +} + +int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to) +{ + int ret; + u32 clusters_to_add; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size); + if (clusters_to_add < oi->ip_clusters) + clusters_to_add = 0; + else + clusters_to_add -= oi->ip_clusters; + + if (clusters_to_add) { + ret = __ocfs2_extend_allocation(inode, oi->ip_clusters, + clusters_to_add, 0); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + /* + * Call this even if we don't add any clusters to the tree. We + * still need to zero the area between the old i_size and the + * new i_size. + */ + ret = ocfs2_zero_extend(inode, zero_to); + if (ret < 0) + mlog_errno(ret); + +out: + return ret; +} + +static int ocfs2_extend_file(struct inode *inode, + struct buffer_head *di_bh, + u64 new_i_size) +{ + int ret = 0; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + BUG_ON(!di_bh); + + /* setattr sometimes calls us like this. */ + if (new_i_size == 0) + goto out; + + if (i_size_read(inode) == new_i_size) + goto out; + BUG_ON(new_i_size < i_size_read(inode)); + + /* + * Fall through for converting inline data, even if the fs + * supports sparse files. + * + * The check for inline data here is legal - nobody can add + * the feature since we have i_mutex. We must check it again + * after acquiring ip_alloc_sem though, as paths like mmap + * might have raced us to converting the inode to extents. + */ + if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) + && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) + goto out_update_size; + + /* + * The alloc sem blocks people in read/write from reading our + * allocation until we're done changing it. We depend on + * i_mutex to block other extend/truncate calls while we're + * here. + */ + down_write(&oi->ip_alloc_sem); + + if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + /* + * We can optimize small extends by keeping the inodes + * inline data. + */ + if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) { + up_write(&oi->ip_alloc_sem); + goto out_update_size; + } + + ret = ocfs2_convert_inline_data_to_extents(inode, di_bh); + if (ret) { + up_write(&oi->ip_alloc_sem); + + mlog_errno(ret); + goto out; + } + } + + if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) + ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size); + + up_write(&oi->ip_alloc_sem); + + if (ret < 0) { + mlog_errno(ret); + goto out; + } + +out_update_size: + ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); + if (ret < 0) + mlog_errno(ret); + +out: + return ret; +} + +int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) +{ + int status = 0, size_change; + struct inode *inode = dentry->d_inode; + struct super_block *sb = inode->i_sb; + struct ocfs2_super *osb = OCFS2_SB(sb); + struct buffer_head *bh = NULL; + handle_t *handle = NULL; + + mlog_entry("(0x%p, '%.*s')\n", dentry, + dentry->d_name.len, dentry->d_name.name); + + /* ensuring we don't even attempt to truncate a symlink */ + if (S_ISLNK(inode->i_mode)) + attr->ia_valid &= ~ATTR_SIZE; + + if (attr->ia_valid & ATTR_MODE) + mlog(0, "mode change: %d\n", attr->ia_mode); + if (attr->ia_valid & ATTR_UID) + mlog(0, "uid change: %d\n", attr->ia_uid); + if (attr->ia_valid & ATTR_GID) + mlog(0, "gid change: %d\n", attr->ia_gid); + if (attr->ia_valid & ATTR_SIZE) + mlog(0, "size change...\n"); + if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME)) + mlog(0, "time change...\n"); + +#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ + | ATTR_GID | ATTR_UID | ATTR_MODE) + if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) { + mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid); + return 0; + } + + status = inode_change_ok(inode, attr); + if (status) + return status; + + size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; + if (size_change) { + status = ocfs2_rw_lock(inode, 1); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + status = ocfs2_inode_lock(inode, &bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + goto bail_unlock_rw; + } + + if (size_change && attr->ia_size != i_size_read(inode)) { + if (attr->ia_size > sb->s_maxbytes) { + status = -EFBIG; + goto bail_unlock; + } + + if (i_size_read(inode) > attr->ia_size) { + if (ocfs2_should_order_data(inode)) { + status = ocfs2_begin_ordered_truncate(inode, + attr->ia_size); + if (status) + goto bail_unlock; + } + status = ocfs2_truncate_file(inode, bh, attr->ia_size); + } else + status = ocfs2_extend_file(inode, bh, attr->ia_size); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + status = -ENOSPC; + goto bail_unlock; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_unlock; + } + + /* + * This will intentionally not wind up calling vmtruncate(), + * since all the work for a size change has been done above. + * Otherwise, we could get into problems with truncate as + * ip_alloc_sem is used there to protect against i_size + * changes. + */ + status = inode_setattr(inode, attr); + if (status < 0) { + mlog_errno(status); + goto bail_commit; + } + + status = ocfs2_mark_inode_dirty(handle, inode, bh); + if (status < 0) + mlog_errno(status); + +bail_commit: + ocfs2_commit_trans(osb, handle); +bail_unlock: + ocfs2_inode_unlock(inode, 1); +bail_unlock_rw: + if (size_change) + ocfs2_rw_unlock(inode, 1); +bail: + brelse(bh); + + mlog_exit(status); + return status; +} + +int ocfs2_getattr(struct vfsmount *mnt, + struct dentry *dentry, + struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + struct super_block *sb = dentry->d_inode->i_sb; + struct ocfs2_super *osb = sb->s_fs_info; + int err; + + mlog_entry_void(); + + err = ocfs2_inode_revalidate(dentry); + if (err) { + if (err != -ENOENT) + mlog_errno(err); + goto bail; + } + + generic_fillattr(inode, stat); + + /* We set the blksize from the cluster size for performance */ + stat->blksize = osb->s_clustersize; + +bail: + mlog_exit(err); + + return err; +} + +int ocfs2_permission(struct inode *inode, int mask) +{ + int ret; + + mlog_entry_void(); + + ret = ocfs2_inode_lock(inode, NULL, 0); + if (ret) { + if (ret != -ENOENT) + mlog_errno(ret); + goto out; + } + + ret = generic_permission(inode, mask, NULL); + + ocfs2_inode_unlock(inode, 0); +out: + mlog_exit(ret); + return ret; +} + +static int __ocfs2_write_remove_suid(struct inode *inode, + struct buffer_head *bh) +{ + int ret; + handle_t *handle; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_dinode *di; + + mlog_entry("(Inode %llu, mode 0%o)\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode); + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out_trans; + } + + inode->i_mode &= ~S_ISUID; + if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP)) + inode->i_mode &= ~S_ISGID; + + di = (struct ocfs2_dinode *) bh->b_data; + di->i_mode = cpu_to_le16(inode->i_mode); + + ret = ocfs2_journal_dirty(handle, bh); + if (ret < 0) + mlog_errno(ret); + +out_trans: + ocfs2_commit_trans(osb, handle); +out: + mlog_exit(ret); + return ret; +} + +/* + * Will look for holes and unwritten extents in the range starting at + * pos for count bytes (inclusive). + */ +static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos, + size_t count) +{ + int ret = 0; + unsigned int extent_flags; + u32 cpos, clusters, extent_len, phys_cpos; + struct super_block *sb = inode->i_sb; + + cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; + clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos; + + while (clusters) { + ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len, + &extent_flags); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) { + ret = 1; + break; + } + + if (extent_len > clusters) + extent_len = clusters; + + clusters -= extent_len; + cpos += extent_len; + } +out: + return ret; +} + +static int ocfs2_write_remove_suid(struct inode *inode) +{ + int ret; + struct buffer_head *bh = NULL; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + ret = ocfs2_read_block(inode, oi->ip_blkno, &bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = __ocfs2_write_remove_suid(inode, bh); +out: + brelse(bh); + return ret; +} + +/* + * Allocate enough extents to cover the region starting at byte offset + * start for len bytes. Existing extents are skipped, any extents + * added are marked as "unwritten". + */ +static int ocfs2_allocate_unwritten_extents(struct inode *inode, + u64 start, u64 len) +{ + int ret; + u32 cpos, phys_cpos, clusters, alloc_size; + u64 end = start + len; + struct buffer_head *di_bh = NULL; + + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + ret = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, + &di_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * Nothing to do if the requested reservation range + * fits within the inode. + */ + if (ocfs2_size_fits_inline_data(di_bh, end)) + goto out; + + ret = ocfs2_convert_inline_data_to_extents(inode, di_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + /* + * We consider both start and len to be inclusive. + */ + cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; + clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len); + clusters -= cpos; + + while (clusters) { + ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, + &alloc_size, NULL); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * Hole or existing extent len can be arbitrary, so + * cap it to our own allocation request. + */ + if (alloc_size > clusters) + alloc_size = clusters; + + if (phys_cpos) { + /* + * We already have an allocation at this + * region so we can safely skip it. + */ + goto next; + } + + ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1); + if (ret) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + +next: + cpos += alloc_size; + clusters -= alloc_size; + } + + ret = 0; +out: + + brelse(di_bh); + return ret; +} + +static int __ocfs2_remove_inode_range(struct inode *inode, + struct buffer_head *di_bh, + u32 cpos, u32 phys_cpos, u32 len, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret; + u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct inode *tl_inode = osb->osb_tl_inode; + handle_t *handle; + struct ocfs2_alloc_context *meta_ac = NULL; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_extent_tree et; + + ocfs2_init_dinode_extent_tree(&et, inode, di_bh); + + ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac); + if (ret) { + mlog_errno(ret); + return ret; + } + + mutex_lock(&tl_inode->i_mutex); + + if (ocfs2_truncate_log_needs_flush(osb)) { + ret = __ocfs2_flush_truncate_log(osb); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, meta_ac, + dealloc); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + OCFS2_I(inode)->ip_clusters -= len; + di->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters); + + ret = ocfs2_journal_dirty(handle, di_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len); + if (ret) + mlog_errno(ret); + +out_commit: + ocfs2_commit_trans(osb, handle); +out: + mutex_unlock(&tl_inode->i_mutex); + + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + + return ret; +} + +/* + * Truncate a byte range, avoiding pages within partial clusters. This + * preserves those pages for the zeroing code to write to. + */ +static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start, + u64 byte_len) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + loff_t start, end; + struct address_space *mapping = inode->i_mapping; + + start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start); + end = byte_start + byte_len; + end = end & ~(osb->s_clustersize - 1); + + if (start < end) { + unmap_mapping_range(mapping, start, end - start, 0); + truncate_inode_pages_range(mapping, start, end - 1); + } +} + +static int ocfs2_zero_partial_clusters(struct inode *inode, + u64 start, u64 len) +{ + int ret = 0; + u64 tmpend, end = start + len; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + unsigned int csize = osb->s_clustersize; + handle_t *handle; + + /* + * The "start" and "end" values are NOT necessarily part of + * the range whose allocation is being deleted. Rather, this + * is what the user passed in with the request. We must zero + * partial clusters here. There's no need to worry about + * physical allocation - the zeroing code knows to skip holes. + */ + mlog(0, "byte start: %llu, end: %llu\n", + (unsigned long long)start, (unsigned long long)end); + + /* + * If both edges are on a cluster boundary then there's no + * zeroing required as the region is part of the allocation to + * be truncated. + */ + if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0) + goto out; + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + /* + * We want to get the byte offset of the end of the 1st cluster. + */ + tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1)); + if (tmpend > end) + tmpend = end; + + mlog(0, "1st range: start: %llu, tmpend: %llu\n", + (unsigned long long)start, (unsigned long long)tmpend); + + ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); + if (ret) + mlog_errno(ret); + + if (tmpend < end) { + /* + * This may make start and end equal, but the zeroing + * code will skip any work in that case so there's no + * need to catch it up here. + */ + start = end & ~(osb->s_clustersize - 1); + + mlog(0, "2nd range: start: %llu, end: %llu\n", + (unsigned long long)start, (unsigned long long)end); + + ret = ocfs2_zero_range_for_truncate(inode, handle, start, end); + if (ret) + mlog_errno(ret); + } + + ocfs2_commit_trans(osb, handle); +out: + return ret; +} + +static int ocfs2_remove_inode_range(struct inode *inode, + struct buffer_head *di_bh, u64 byte_start, + u64 byte_len) +{ + int ret = 0; + u32 trunc_start, trunc_len, cpos, phys_cpos, alloc_size; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_cached_dealloc_ctxt dealloc; + struct address_space *mapping = inode->i_mapping; + + ocfs2_init_dealloc_ctxt(&dealloc); + + if (byte_len == 0) + return 0; + + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + ret = ocfs2_truncate_inline(inode, di_bh, byte_start, + byte_start + byte_len, 0); + if (ret) { + mlog_errno(ret); + goto out; + } + /* + * There's no need to get fancy with the page cache + * truncate of an inline-data inode. We're talking + * about less than a page here, which will be cached + * in the dinode buffer anyway. + */ + unmap_mapping_range(mapping, 0, 0, 0); + truncate_inode_pages(mapping, 0); + goto out; + } + + trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start); + trunc_len = (byte_start + byte_len) >> osb->s_clustersize_bits; + if (trunc_len >= trunc_start) + trunc_len -= trunc_start; + else + trunc_len = 0; + + mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)byte_start, + (unsigned long long)byte_len, trunc_start, trunc_len); + + ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len); + if (ret) { + mlog_errno(ret); + goto out; + } + + cpos = trunc_start; + while (trunc_len) { + ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, + &alloc_size, NULL); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (alloc_size > trunc_len) + alloc_size = trunc_len; + + /* Only do work for non-holes */ + if (phys_cpos != 0) { + ret = __ocfs2_remove_inode_range(inode, di_bh, cpos, + phys_cpos, alloc_size, + &dealloc); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + cpos += alloc_size; + trunc_len -= alloc_size; + } + + ocfs2_truncate_cluster_pages(inode, byte_start, byte_len); + +out: + ocfs2_schedule_truncate_log_flush(osb, 1); + ocfs2_run_deallocs(osb, &dealloc); + + return ret; +} + +/* + * Parts of this function taken from xfs_change_file_space() + */ +static int __ocfs2_change_file_space(struct file *file, struct inode *inode, + loff_t f_pos, unsigned int cmd, + struct ocfs2_space_resv *sr, + int change_size) +{ + int ret; + s64 llen; + loff_t size; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *di_bh = NULL; + handle_t *handle; + unsigned long long max_off = inode->i_sb->s_maxbytes; + + if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + return -EROFS; + + mutex_lock(&inode->i_mutex); + + /* + * This prevents concurrent writes on other nodes + */ + ret = ocfs2_rw_lock(inode, 1); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret) { + mlog_errno(ret); + goto out_rw_unlock; + } + + if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) { + ret = -EPERM; + goto out_inode_unlock; + } + + switch (sr->l_whence) { + case 0: /*SEEK_SET*/ + break; + case 1: /*SEEK_CUR*/ + sr->l_start += f_pos; + break; + case 2: /*SEEK_END*/ + sr->l_start += i_size_read(inode); + break; + default: + ret = -EINVAL; + goto out_inode_unlock; + } + sr->l_whence = 0; + + llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len; + + if (sr->l_start < 0 + || sr->l_start > max_off + || (sr->l_start + llen) < 0 + || (sr->l_start + llen) > max_off) { + ret = -EINVAL; + goto out_inode_unlock; + } + size = sr->l_start + sr->l_len; + + if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) { + if (sr->l_len <= 0) { + ret = -EINVAL; + goto out_inode_unlock; + } + } + + if (file && should_remove_suid(file->f_path.dentry)) { + ret = __ocfs2_write_remove_suid(inode, di_bh); + if (ret) { + mlog_errno(ret); + goto out_inode_unlock; + } + } + + down_write(&OCFS2_I(inode)->ip_alloc_sem); + switch (cmd) { + case OCFS2_IOC_RESVSP: + case OCFS2_IOC_RESVSP64: + /* + * This takes unsigned offsets, but the signed ones we + * pass have been checked against overflow above. + */ + ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start, + sr->l_len); + break; + case OCFS2_IOC_UNRESVSP: + case OCFS2_IOC_UNRESVSP64: + ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start, + sr->l_len); + break; + default: + ret = -EINVAL; + } + up_write(&OCFS2_I(inode)->ip_alloc_sem); + if (ret) { + mlog_errno(ret); + goto out_inode_unlock; + } + + /* + * We update c/mtime for these changes + */ + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out_inode_unlock; + } + + if (change_size && i_size_read(inode) < size) + i_size_write(inode, size); + + inode->i_ctime = inode->i_mtime = CURRENT_TIME; + ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); + if (ret < 0) + mlog_errno(ret); + + ocfs2_commit_trans(osb, handle); + +out_inode_unlock: + brelse(di_bh); + ocfs2_inode_unlock(inode, 1); +out_rw_unlock: + ocfs2_rw_unlock(inode, 1); + +out: + mutex_unlock(&inode->i_mutex); + return ret; +} + +int ocfs2_change_file_space(struct file *file, unsigned int cmd, + struct ocfs2_space_resv *sr) +{ + struct inode *inode = file->f_path.dentry->d_inode; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);; + + if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) && + !ocfs2_writes_unwritten_extents(osb)) + return -ENOTTY; + else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) && + !ocfs2_sparse_alloc(osb)) + return -ENOTTY; + + if (!S_ISREG(inode->i_mode)) + return -EINVAL; + + if (!(file->f_mode & FMODE_WRITE)) + return -EBADF; + + return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); +} + +static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, + loff_t len) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_space_resv sr; + int change_size = 1; + + if (!ocfs2_writes_unwritten_extents(osb)) + return -EOPNOTSUPP; + + if (S_ISDIR(inode->i_mode)) + return -ENODEV; + + if (mode & FALLOC_FL_KEEP_SIZE) + change_size = 0; + + sr.l_whence = 0; + sr.l_start = (s64)offset; + sr.l_len = (s64)len; + + return __ocfs2_change_file_space(NULL, inode, offset, + OCFS2_IOC_RESVSP64, &sr, change_size); +} + +static int ocfs2_prepare_inode_for_write(struct dentry *dentry, + loff_t *ppos, + size_t count, + int appending, + int *direct_io) +{ + int ret = 0, meta_level = 0; + struct inode *inode = dentry->d_inode; + loff_t saved_pos, end; + + /* + * We start with a read level meta lock and only jump to an ex + * if we need to make modifications here. + */ + for(;;) { + ret = ocfs2_inode_lock(inode, NULL, meta_level); + if (ret < 0) { + meta_level = -1; + mlog_errno(ret); + goto out; + } + + /* Clear suid / sgid if necessary. We do this here + * instead of later in the write path because + * remove_suid() calls ->setattr without any hint that + * we may have already done our cluster locking. Since + * ocfs2_setattr() *must* take cluster locks to + * proceeed, this will lead us to recursively lock the + * inode. There's also the dinode i_size state which + * can be lost via setattr during extending writes (we + * set inode->i_size at the end of a write. */ + if (should_remove_suid(dentry)) { + if (meta_level == 0) { + ocfs2_inode_unlock(inode, meta_level); + meta_level = 1; + continue; + } + + ret = ocfs2_write_remove_suid(inode); + if (ret < 0) { + mlog_errno(ret); + goto out_unlock; + } + } + + /* work on a copy of ppos until we're sure that we won't have + * to recalculate it due to relocking. */ + if (appending) { + saved_pos = i_size_read(inode); + mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos); + } else { + saved_pos = *ppos; + } + + end = saved_pos + count; + + /* + * Skip the O_DIRECT checks if we don't need + * them. + */ + if (!direct_io || !(*direct_io)) + break; + + /* + * There's no sane way to do direct writes to an inode + * with inline data. + */ + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + *direct_io = 0; + break; + } + + /* + * Allowing concurrent direct writes means + * i_size changes wouldn't be synchronized, so + * one node could wind up truncating another + * nodes writes. + */ + if (end > i_size_read(inode)) { + *direct_io = 0; + break; + } + + /* + * We don't fill holes during direct io, so + * check for them here. If any are found, the + * caller will have to retake some cluster + * locks and initiate the io as buffered. + */ + ret = ocfs2_check_range_for_holes(inode, saved_pos, count); + if (ret == 1) { + *direct_io = 0; + ret = 0; + } else if (ret < 0) + mlog_errno(ret); + break; + } + + if (appending) + *ppos = saved_pos; + +out_unlock: + ocfs2_inode_unlock(inode, meta_level); + +out: + return ret; +} + +static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, + const struct iovec *iov, + unsigned long nr_segs, + loff_t pos) +{ + int ret, direct_io, appending, rw_level, have_alloc_sem = 0; + int can_do_direct; + ssize_t written = 0; + size_t ocount; /* original count */ + size_t count; /* after file limit checks */ + loff_t old_size, *ppos = &iocb->ki_pos; + u32 old_clusters; + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_path.dentry->d_inode; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + mlog_entry("(0x%p, %u, '%.*s')\n", file, + (unsigned int)nr_segs, + file->f_path.dentry->d_name.len, + file->f_path.dentry->d_name.name); + + if (iocb->ki_left == 0) + return 0; + + vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + + appending = file->f_flags & O_APPEND ? 1 : 0; + direct_io = file->f_flags & O_DIRECT ? 1 : 0; + + mutex_lock(&inode->i_mutex); + +relock: + /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */ + if (direct_io) { + down_read(&inode->i_alloc_sem); + have_alloc_sem = 1; + } + + /* concurrent O_DIRECT writes are allowed */ + rw_level = !direct_io; + ret = ocfs2_rw_lock(inode, rw_level); + if (ret < 0) { + mlog_errno(ret); + goto out_sems; + } + + can_do_direct = direct_io; + ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos, + iocb->ki_left, appending, + &can_do_direct); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + /* + * We can't complete the direct I/O as requested, fall back to + * buffered I/O. + */ + if (direct_io && !can_do_direct) { + ocfs2_rw_unlock(inode, rw_level); + up_read(&inode->i_alloc_sem); + + have_alloc_sem = 0; + rw_level = -1; + + direct_io = 0; + goto relock; + } + + /* + * To later detect whether a journal commit for sync writes is + * necessary, we sample i_size, and cluster count here. + */ + old_size = i_size_read(inode); + old_clusters = OCFS2_I(inode)->ip_clusters; + + /* communicate with ocfs2_dio_end_io */ + ocfs2_iocb_set_rw_locked(iocb, rw_level); + + if (direct_io) { + ret = generic_segment_checks(iov, &nr_segs, &ocount, + VERIFY_READ); + if (ret) + goto out_dio; + + ret = generic_write_checks(file, ppos, &count, + S_ISBLK(inode->i_mode)); + if (ret) + goto out_dio; + + written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos, + ppos, count, ocount); + if (written < 0) { + /* + * direct write may have instantiated a few + * blocks outside i_size. Trim these off again. + * Don't need i_size_read because we hold i_mutex. + */ + if (*ppos + count > inode->i_size) + vmtruncate(inode, inode->i_size); + ret = written; + goto out_dio; + } + } else { + written = generic_file_aio_write_nolock(iocb, iov, nr_segs, + *ppos); + } + +out_dio: + /* buffered aio wouldn't have proper lock coverage today */ + BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); + + if ((file->f_flags & O_SYNC && !direct_io) || IS_SYNC(inode)) { + /* + * The generic write paths have handled getting data + * to disk, but since we don't make use of the dirty + * inode list, a manual journal commit is necessary + * here. + */ + if (old_size != i_size_read(inode) || + old_clusters != OCFS2_I(inode)->ip_clusters) { + ret = jbd2_journal_force_commit(osb->journal->j_journal); + if (ret < 0) + written = ret; + } + } + + /* + * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io + * function pointer which is called when o_direct io completes so that + * it can unlock our rw lock. (it's the clustered equivalent of + * i_alloc_sem; protects truncate from racing with pending ios). + * Unfortunately there are error cases which call end_io and others + * that don't. so we don't have to unlock the rw_lock if either an + * async dio is going to do it in the future or an end_io after an + * error has already done it. + */ + if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { + rw_level = -1; + have_alloc_sem = 0; + } + +out: + if (rw_level != -1) + ocfs2_rw_unlock(inode, rw_level); + +out_sems: + if (have_alloc_sem) + up_read(&inode->i_alloc_sem); + + mutex_unlock(&inode->i_mutex); + + mlog_exit(ret); + return written ? written : ret; +} + +static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, + struct file *out, + loff_t *ppos, + size_t len, + unsigned int flags) +{ + int ret; + struct inode *inode = out->f_path.dentry->d_inode; + + mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe, + (unsigned int)len, + out->f_path.dentry->d_name.len, + out->f_path.dentry->d_name.name); + + inode_double_lock(inode, pipe->inode); + + ret = ocfs2_rw_lock(inode, 1); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0, + NULL); + if (ret < 0) { + mlog_errno(ret); + goto out_unlock; + } + + ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags); + +out_unlock: + ocfs2_rw_unlock(inode, 1); +out: + inode_double_unlock(inode, pipe->inode); + + mlog_exit(ret); + return ret; +} + +static ssize_t ocfs2_file_splice_read(struct file *in, + loff_t *ppos, + struct pipe_inode_info *pipe, + size_t len, + unsigned int flags) +{ + int ret = 0; + struct inode *inode = in->f_path.dentry->d_inode; + + mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe, + (unsigned int)len, + in->f_path.dentry->d_name.len, + in->f_path.dentry->d_name.name); + + /* + * See the comment in ocfs2_file_aio_read() + */ + ret = ocfs2_inode_lock(inode, NULL, 0); + if (ret < 0) { + mlog_errno(ret); + goto bail; + } + ocfs2_inode_unlock(inode, 0); + + ret = generic_file_splice_read(in, ppos, pipe, len, flags); + +bail: + mlog_exit(ret); + return ret; +} + +static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, + const struct iovec *iov, + unsigned long nr_segs, + loff_t pos) +{ + int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0; + struct file *filp = iocb->ki_filp; + struct inode *inode = filp->f_path.dentry->d_inode; + + mlog_entry("(0x%p, %u, '%.*s')\n", filp, + (unsigned int)nr_segs, + filp->f_path.dentry->d_name.len, + filp->f_path.dentry->d_name.name); + + if (!inode) { + ret = -EINVAL; + mlog_errno(ret); + goto bail; + } + + /* + * buffered reads protect themselves in ->readpage(). O_DIRECT reads + * need locks to protect pending reads from racing with truncate. + */ + if (filp->f_flags & O_DIRECT) { + down_read(&inode->i_alloc_sem); + have_alloc_sem = 1; + + ret = ocfs2_rw_lock(inode, 0); + if (ret < 0) { + mlog_errno(ret); + goto bail; + } + rw_level = 0; + /* communicate with ocfs2_dio_end_io */ + ocfs2_iocb_set_rw_locked(iocb, rw_level); + } + + /* + * We're fine letting folks race truncates and extending + * writes with read across the cluster, just like they can + * locally. Hence no rw_lock during read. + * + * Take and drop the meta data lock to update inode fields + * like i_size. This allows the checks down below + * generic_file_aio_read() a chance of actually working. + */ + ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); + if (ret < 0) { + mlog_errno(ret); + goto bail; + } + ocfs2_inode_unlock(inode, lock_level); + + ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos); + if (ret == -EINVAL) + mlog(0, "generic_file_aio_read returned -EINVAL\n"); + + /* buffered aio wouldn't have proper lock coverage today */ + BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT)); + + /* see ocfs2_file_aio_write */ + if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { + rw_level = -1; + have_alloc_sem = 0; + } + +bail: + if (have_alloc_sem) + up_read(&inode->i_alloc_sem); + if (rw_level != -1) + ocfs2_rw_unlock(inode, rw_level); + mlog_exit(ret); + + return ret; +} + +const struct inode_operations ocfs2_file_iops = { + .setattr = ocfs2_setattr, + .getattr = ocfs2_getattr, + .permission = ocfs2_permission, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = ocfs2_listxattr, + .removexattr = generic_removexattr, + .fallocate = ocfs2_fallocate, + .fiemap = ocfs2_fiemap, +}; + +const struct inode_operations ocfs2_special_file_iops = { + .setattr = ocfs2_setattr, + .getattr = ocfs2_getattr, + .permission = ocfs2_permission, +}; + +/* + * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with + * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! + */ +const struct file_operations ocfs2_fops = { + .llseek = generic_file_llseek, + .read = do_sync_read, + .write = do_sync_write, + .mmap = ocfs2_mmap, + .fsync = ocfs2_sync_file, + .release = ocfs2_file_release, + .open = ocfs2_file_open, + .aio_read = ocfs2_file_aio_read, + .aio_write = ocfs2_file_aio_write, + .unlocked_ioctl = ocfs2_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ocfs2_compat_ioctl, +#endif + .lock = ocfs2_lock, + .flock = ocfs2_flock, + .splice_read = ocfs2_file_splice_read, + .splice_write = ocfs2_file_splice_write, +}; + +const struct file_operations ocfs2_dops = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .readdir = ocfs2_readdir, + .fsync = ocfs2_sync_file, + .release = ocfs2_dir_release, + .open = ocfs2_dir_open, + .unlocked_ioctl = ocfs2_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ocfs2_compat_ioctl, +#endif + .lock = ocfs2_lock, + .flock = ocfs2_flock, +}; + +/* + * POSIX-lockless variants of our file_operations. + * + * These will be used if the underlying cluster stack does not support + * posix file locking, if the user passes the "localflocks" mount + * option, or if we have a local-only fs. + * + * ocfs2_flock is in here because all stacks handle UNIX file locks, + * so we still want it in the case of no stack support for + * plocks. Internally, it will do the right thing when asked to ignore + * the cluster. + */ +const struct file_operations ocfs2_fops_no_plocks = { + .llseek = generic_file_llseek, + .read = do_sync_read, + .write = do_sync_write, + .mmap = ocfs2_mmap, + .fsync = ocfs2_sync_file, + .release = ocfs2_file_release, + .open = ocfs2_file_open, + .aio_read = ocfs2_file_aio_read, + .aio_write = ocfs2_file_aio_write, + .unlocked_ioctl = ocfs2_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ocfs2_compat_ioctl, +#endif + .flock = ocfs2_flock, + .splice_read = ocfs2_file_splice_read, + .splice_write = ocfs2_file_splice_write, +}; + +const struct file_operations ocfs2_dops_no_plocks = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .readdir = ocfs2_readdir, + .fsync = ocfs2_sync_file, + .release = ocfs2_dir_release, + .open = ocfs2_dir_open, + .unlocked_ioctl = ocfs2_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ocfs2_compat_ioctl, +#endif + .flock = ocfs2_flock, +}; diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h new file mode 100644 index 0000000..e92382c --- /dev/null +++ b/fs/ocfs2/file.h @@ -0,0 +1,69 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * file.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_FILE_H +#define OCFS2_FILE_H + +extern const struct file_operations ocfs2_fops; +extern const struct file_operations ocfs2_dops; +extern const struct file_operations ocfs2_fops_no_plocks; +extern const struct file_operations ocfs2_dops_no_plocks; +extern const struct inode_operations ocfs2_file_iops; +extern const struct inode_operations ocfs2_special_file_iops; +struct ocfs2_alloc_context; +enum ocfs2_alloc_restarted; + +struct ocfs2_file_private { + struct file *fp_file; + struct mutex fp_mutex; + struct ocfs2_lock_res fp_flock; +}; + +int ocfs2_add_inode_data(struct ocfs2_super *osb, + struct inode *inode, + u32 *logical_offset, + u32 clusters_to_add, + int mark_unwritten, + struct buffer_head *fe_bh, + handle_t *handle, + struct ocfs2_alloc_context *data_ac, + struct ocfs2_alloc_context *meta_ac, + enum ocfs2_alloc_restarted *reason_ret); +int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, + u64 zero_to); +int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); +int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat); +int ocfs2_permission(struct inode *inode, int mask); + +int ocfs2_should_update_atime(struct inode *inode, + struct vfsmount *vfsmnt); +int ocfs2_update_inode_atime(struct inode *inode, + struct buffer_head *bh); + +int ocfs2_change_file_space(struct file *file, unsigned int cmd, + struct ocfs2_space_resv *sr); + +#endif /* OCFS2_FILE_H */ diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c new file mode 100644 index 0000000..c6e7213 --- /dev/null +++ b/fs/ocfs2/heartbeat.c @@ -0,0 +1,135 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * heartbeat.c + * + * Register ourselves with the heartbaet service, keep our node maps + * up to date, and fire off recovery when needed. + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> + +#define MLOG_MASK_PREFIX ML_SUPER +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "heartbeat.h" +#include "inode.h" +#include "journal.h" + +#include "buffer_head_io.h" + +static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map, + int bit); +static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map, + int bit); + +/* special case -1 for now + * TODO: should *really* make sure the calling func never passes -1!! */ +static void ocfs2_node_map_init(struct ocfs2_node_map *map) +{ + map->num_nodes = OCFS2_NODE_MAP_MAX_NODES; + memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) * + sizeof(unsigned long)); +} + +void ocfs2_init_node_maps(struct ocfs2_super *osb) +{ + spin_lock_init(&osb->node_map_lock); + ocfs2_node_map_init(&osb->osb_recovering_orphan_dirs); +} + +void ocfs2_do_node_down(int node_num, void *data) +{ + struct ocfs2_super *osb = data; + + BUG_ON(osb->node_num == node_num); + + mlog(0, "ocfs2: node down event for %d\n", node_num); + + if (!osb->cconn) { + /* + * No cluster connection means we're not even ready to + * participate yet. We check the slots after the cluster + * comes up, so we will notice the node death then. We + * can safely ignore it here. + */ + return; + } + + ocfs2_recovery_thread(osb, node_num); +} + +static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map, + int bit) +{ + set_bit(bit, map->map); +} + +void ocfs2_node_map_set_bit(struct ocfs2_super *osb, + struct ocfs2_node_map *map, + int bit) +{ + if (bit==-1) + return; + BUG_ON(bit >= map->num_nodes); + spin_lock(&osb->node_map_lock); + __ocfs2_node_map_set_bit(map, bit); + spin_unlock(&osb->node_map_lock); +} + +static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map, + int bit) +{ + clear_bit(bit, map->map); +} + +void ocfs2_node_map_clear_bit(struct ocfs2_super *osb, + struct ocfs2_node_map *map, + int bit) +{ + if (bit==-1) + return; + BUG_ON(bit >= map->num_nodes); + spin_lock(&osb->node_map_lock); + __ocfs2_node_map_clear_bit(map, bit); + spin_unlock(&osb->node_map_lock); +} + +int ocfs2_node_map_test_bit(struct ocfs2_super *osb, + struct ocfs2_node_map *map, + int bit) +{ + int ret; + if (bit >= map->num_nodes) { + mlog(ML_ERROR, "bit=%d map->num_nodes=%d\n", bit, map->num_nodes); + BUG(); + } + spin_lock(&osb->node_map_lock); + ret = test_bit(bit, map->map); + spin_unlock(&osb->node_map_lock); + return ret; +} + diff --git a/fs/ocfs2/heartbeat.h b/fs/ocfs2/heartbeat.h new file mode 100644 index 0000000..74b9c5d --- /dev/null +++ b/fs/ocfs2/heartbeat.h @@ -0,0 +1,45 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * heartbeat.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_HEARTBEAT_H +#define OCFS2_HEARTBEAT_H + +void ocfs2_init_node_maps(struct ocfs2_super *osb); + +void ocfs2_do_node_down(int node_num, void *data); + +/* node map functions - used to keep track of mounted and in-recovery + * nodes. */ +void ocfs2_node_map_set_bit(struct ocfs2_super *osb, + struct ocfs2_node_map *map, + int bit); +void ocfs2_node_map_clear_bit(struct ocfs2_super *osb, + struct ocfs2_node_map *map, + int bit); +int ocfs2_node_map_test_bit(struct ocfs2_super *osb, + struct ocfs2_node_map *map, + int bit); + +#endif /* OCFS2_HEARTBEAT_H */ diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c new file mode 100644 index 0000000..7aa00d5 --- /dev/null +++ b/fs/ocfs2/inode.c @@ -0,0 +1,1266 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * inode.c + * + * vfs' aops, fops, dops and iops + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/pagemap.h> + +#include <asm/byteorder.h> + +#define MLOG_MASK_PREFIX ML_INODE +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "file.h" +#include "heartbeat.h" +#include "inode.h" +#include "journal.h" +#include "namei.h" +#include "suballoc.h" +#include "super.h" +#include "symlink.h" +#include "sysfile.h" +#include "uptodate.h" +#include "xattr.h" + +#include "buffer_head_io.h" + +struct ocfs2_find_inode_args +{ + u64 fi_blkno; + unsigned long fi_ino; + unsigned int fi_flags; + unsigned int fi_sysfile_type; +}; + +static struct lock_class_key ocfs2_sysfile_lock_key[NUM_SYSTEM_INODES]; + +static int ocfs2_read_locked_inode(struct inode *inode, + struct ocfs2_find_inode_args *args); +static int ocfs2_init_locked_inode(struct inode *inode, void *opaque); +static int ocfs2_find_actor(struct inode *inode, void *opaque); +static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, + struct inode *inode, + struct buffer_head *fe_bh); + +void ocfs2_set_inode_flags(struct inode *inode) +{ + unsigned int flags = OCFS2_I(inode)->ip_attr; + + inode->i_flags &= ~(S_IMMUTABLE | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); + + if (flags & OCFS2_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + + if (flags & OCFS2_SYNC_FL) + inode->i_flags |= S_SYNC; + if (flags & OCFS2_APPEND_FL) + inode->i_flags |= S_APPEND; + if (flags & OCFS2_NOATIME_FL) + inode->i_flags |= S_NOATIME; + if (flags & OCFS2_DIRSYNC_FL) + inode->i_flags |= S_DIRSYNC; +} + +/* Propagate flags from i_flags to OCFS2_I(inode)->ip_attr */ +void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi) +{ + unsigned int flags = oi->vfs_inode.i_flags; + + oi->ip_attr &= ~(OCFS2_SYNC_FL|OCFS2_APPEND_FL| + OCFS2_IMMUTABLE_FL|OCFS2_NOATIME_FL|OCFS2_DIRSYNC_FL); + if (flags & S_SYNC) + oi->ip_attr |= OCFS2_SYNC_FL; + if (flags & S_APPEND) + oi->ip_attr |= OCFS2_APPEND_FL; + if (flags & S_IMMUTABLE) + oi->ip_attr |= OCFS2_IMMUTABLE_FL; + if (flags & S_NOATIME) + oi->ip_attr |= OCFS2_NOATIME_FL; + if (flags & S_DIRSYNC) + oi->ip_attr |= OCFS2_DIRSYNC_FL; +} + +struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, + int sysfile_type) +{ + struct inode *inode = NULL; + struct super_block *sb = osb->sb; + struct ocfs2_find_inode_args args; + + mlog_entry("(blkno = %llu)\n", (unsigned long long)blkno); + + /* Ok. By now we've either got the offsets passed to us by the + * caller, or we just pulled them off the bh. Lets do some + * sanity checks to make sure they're OK. */ + if (blkno == 0) { + inode = ERR_PTR(-EINVAL); + mlog_errno(PTR_ERR(inode)); + goto bail; + } + + args.fi_blkno = blkno; + args.fi_flags = flags; + args.fi_ino = ino_from_blkno(sb, blkno); + args.fi_sysfile_type = sysfile_type; + + inode = iget5_locked(sb, args.fi_ino, ocfs2_find_actor, + ocfs2_init_locked_inode, &args); + /* inode was *not* in the inode cache. 2.6.x requires + * us to do our own read_inode call and unlock it + * afterwards. */ + if (inode && inode->i_state & I_NEW) { + mlog(0, "Inode was not in inode cache, reading it.\n"); + ocfs2_read_locked_inode(inode, &args); + unlock_new_inode(inode); + } + if (inode == NULL) { + inode = ERR_PTR(-ENOMEM); + mlog_errno(PTR_ERR(inode)); + goto bail; + } + if (is_bad_inode(inode)) { + iput(inode); + inode = ERR_PTR(-ESTALE); + goto bail; + } + +bail: + if (!IS_ERR(inode)) { + mlog(0, "returning inode with number %llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + mlog_exit_ptr(inode); + } + + return inode; +} + + +/* + * here's how inodes get read from disk: + * iget5_locked -> find_actor -> OCFS2_FIND_ACTOR + * found? : return the in-memory inode + * not found? : get_new_inode -> OCFS2_INIT_LOCKED_INODE + */ + +static int ocfs2_find_actor(struct inode *inode, void *opaque) +{ + struct ocfs2_find_inode_args *args = NULL; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + int ret = 0; + + mlog_entry("(0x%p, %lu, 0x%p)\n", inode, inode->i_ino, opaque); + + args = opaque; + + mlog_bug_on_msg(!inode, "No inode in find actor!\n"); + + if (oi->ip_blkno != args->fi_blkno) + goto bail; + + ret = 1; +bail: + mlog_exit(ret); + return ret; +} + +/* + * initialize the new inode, but don't do anything that would cause + * us to sleep. + * return 0 on success, 1 on failure + */ +static int ocfs2_init_locked_inode(struct inode *inode, void *opaque) +{ + struct ocfs2_find_inode_args *args = opaque; + + mlog_entry("inode = %p, opaque = %p\n", inode, opaque); + + inode->i_ino = args->fi_ino; + OCFS2_I(inode)->ip_blkno = args->fi_blkno; + if (args->fi_sysfile_type != 0) + lockdep_set_class(&inode->i_mutex, + &ocfs2_sysfile_lock_key[args->fi_sysfile_type]); + + mlog_exit(0); + return 0; +} + +int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, + int create_ino) +{ + struct super_block *sb; + struct ocfs2_super *osb; + int status = -EINVAL; + int use_plocks = 1; + + mlog_entry("(0x%p, size:%llu)\n", inode, + (unsigned long long)le64_to_cpu(fe->i_size)); + + sb = inode->i_sb; + osb = OCFS2_SB(sb); + + if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) || + ocfs2_mount_local(osb) || !ocfs2_stack_supports_plocks()) + use_plocks = 0; + + /* this means that read_inode cannot create a superblock inode + * today. change if needed. */ + if (!OCFS2_IS_VALID_DINODE(fe) || + !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL))) { + mlog(0, "Invalid dinode: i_ino=%lu, i_blkno=%llu, " + "signature = %.*s, flags = 0x%x\n", + inode->i_ino, + (unsigned long long)le64_to_cpu(fe->i_blkno), 7, + fe->i_signature, le32_to_cpu(fe->i_flags)); + goto bail; + } + + if (le32_to_cpu(fe->i_fs_generation) != osb->fs_generation) { + mlog(ML_ERROR, "file entry generation does not match " + "superblock! osb->fs_generation=%x, " + "fe->i_fs_generation=%x\n", + osb->fs_generation, le32_to_cpu(fe->i_fs_generation)); + goto bail; + } + + OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); + OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr); + OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features); + + inode->i_version = 1; + inode->i_generation = le32_to_cpu(fe->i_generation); + inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev)); + inode->i_mode = le16_to_cpu(fe->i_mode); + inode->i_uid = le32_to_cpu(fe->i_uid); + inode->i_gid = le32_to_cpu(fe->i_gid); + + /* Fast symlinks will have i_size but no allocated clusters. */ + if (S_ISLNK(inode->i_mode) && !fe->i_clusters) + inode->i_blocks = 0; + else + inode->i_blocks = ocfs2_inode_sector_count(inode); + inode->i_mapping->a_ops = &ocfs2_aops; + inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime); + inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec); + inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime); + inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec); + inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime); + inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec); + + if (OCFS2_I(inode)->ip_blkno != le64_to_cpu(fe->i_blkno)) + mlog(ML_ERROR, + "ip_blkno %llu != i_blkno %llu!\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)le64_to_cpu(fe->i_blkno)); + + inode->i_nlink = le16_to_cpu(fe->i_links_count); + + if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) + OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE; + + if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) { + OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; + mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino); + } else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) { + OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP; + } else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) { + mlog(0, "superblock inode: i_ino=%lu\n", inode->i_ino); + /* we can't actually hit this as read_inode can't + * handle superblocks today ;-) */ + BUG(); + } + + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + if (use_plocks) + inode->i_fop = &ocfs2_fops; + else + inode->i_fop = &ocfs2_fops_no_plocks; + inode->i_op = &ocfs2_file_iops; + i_size_write(inode, le64_to_cpu(fe->i_size)); + break; + case S_IFDIR: + inode->i_op = &ocfs2_dir_iops; + if (use_plocks) + inode->i_fop = &ocfs2_dops; + else + inode->i_fop = &ocfs2_dops_no_plocks; + i_size_write(inode, le64_to_cpu(fe->i_size)); + break; + case S_IFLNK: + if (ocfs2_inode_is_fast_symlink(inode)) + inode->i_op = &ocfs2_fast_symlink_inode_operations; + else + inode->i_op = &ocfs2_symlink_inode_operations; + i_size_write(inode, le64_to_cpu(fe->i_size)); + break; + default: + inode->i_op = &ocfs2_special_file_iops; + init_special_inode(inode, inode->i_mode, + inode->i_rdev); + break; + } + + if (create_ino) { + inode->i_ino = ino_from_blkno(inode->i_sb, + le64_to_cpu(fe->i_blkno)); + + /* + * If we ever want to create system files from kernel, + * the generation argument to + * ocfs2_inode_lock_res_init() will have to change. + */ + BUG_ON(le32_to_cpu(fe->i_flags) & OCFS2_SYSTEM_FL); + + ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_inode_lockres, + OCFS2_LOCK_TYPE_META, 0, inode); + + ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres, + OCFS2_LOCK_TYPE_OPEN, 0, inode); + } + + ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_rw_lockres, + OCFS2_LOCK_TYPE_RW, inode->i_generation, + inode); + + ocfs2_set_inode_flags(inode); + + status = 0; +bail: + mlog_exit(status); + return status; +} + +static int ocfs2_read_locked_inode(struct inode *inode, + struct ocfs2_find_inode_args *args) +{ + struct super_block *sb; + struct ocfs2_super *osb; + struct ocfs2_dinode *fe; + struct buffer_head *bh = NULL; + int status, can_lock; + u32 generation = 0; + + mlog_entry("(0x%p, 0x%p)\n", inode, args); + + status = -EINVAL; + if (inode == NULL || inode->i_sb == NULL) { + mlog(ML_ERROR, "bad inode\n"); + return status; + } + sb = inode->i_sb; + osb = OCFS2_SB(sb); + + if (!args) { + mlog(ML_ERROR, "bad inode args\n"); + make_bad_inode(inode); + return status; + } + + /* + * To improve performance of cold-cache inode stats, we take + * the cluster lock here if possible. + * + * Generally, OCFS2 never trusts the contents of an inode + * unless it's holding a cluster lock, so taking it here isn't + * a correctness issue as much as it is a performance + * improvement. + * + * There are three times when taking the lock is not a good idea: + * + * 1) During startup, before we have initialized the DLM. + * + * 2) If we are reading certain system files which never get + * cluster locks (local alloc, truncate log). + * + * 3) If the process doing the iget() is responsible for + * orphan dir recovery. We're holding the orphan dir lock and + * can get into a deadlock with another process on another + * node in ->delete_inode(). + * + * #1 and #2 can be simply solved by never taking the lock + * here for system files (which are the only type we read + * during mount). It's a heavier approach, but our main + * concern is user-accesible files anyway. + * + * #3 works itself out because we'll eventually take the + * cluster lock before trusting anything anyway. + */ + can_lock = !(args->fi_flags & OCFS2_FI_FLAG_SYSFILE) + && !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) + && !ocfs2_mount_local(osb); + + /* + * To maintain backwards compatibility with older versions of + * ocfs2-tools, we still store the generation value for system + * files. The only ones that actually matter to userspace are + * the journals, but it's easier and inexpensive to just flag + * all system files similarly. + */ + if (args->fi_flags & OCFS2_FI_FLAG_SYSFILE) + generation = osb->fs_generation; + + ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_inode_lockres, + OCFS2_LOCK_TYPE_META, + generation, inode); + + ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres, + OCFS2_LOCK_TYPE_OPEN, + 0, inode); + + if (can_lock) { + status = ocfs2_open_lock(inode); + if (status) { + make_bad_inode(inode); + mlog_errno(status); + return status; + } + status = ocfs2_inode_lock(inode, NULL, 0); + if (status) { + make_bad_inode(inode); + mlog_errno(status); + return status; + } + } + + if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { + status = ocfs2_try_open_lock(inode, 0); + if (status) { + make_bad_inode(inode); + return status; + } + } + + if (can_lock) + status = ocfs2_read_blocks(inode, args->fi_blkno, 1, &bh, + OCFS2_BH_IGNORE_CACHE); + else + status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = -EINVAL; + fe = (struct ocfs2_dinode *) bh->b_data; + if (!OCFS2_IS_VALID_DINODE(fe)) { + mlog(0, "Invalid dinode #%llu: signature = %.*s\n", + (unsigned long long)args->fi_blkno, 7, + fe->i_signature); + goto bail; + } + + /* + * This is a code bug. Right now the caller needs to + * understand whether it is asking for a system file inode or + * not so the proper lock names can be built. + */ + mlog_bug_on_msg(!!(fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) != + !!(args->fi_flags & OCFS2_FI_FLAG_SYSFILE), + "Inode %llu: system file state is ambigous\n", + (unsigned long long)args->fi_blkno); + + if (S_ISCHR(le16_to_cpu(fe->i_mode)) || + S_ISBLK(le16_to_cpu(fe->i_mode))) + inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev)); + + if (ocfs2_populate_inode(inode, fe, 0) < 0) + goto bail; + + BUG_ON(args->fi_blkno != le64_to_cpu(fe->i_blkno)); + + status = 0; + +bail: + if (can_lock) + ocfs2_inode_unlock(inode, 0); + + if (status < 0) + make_bad_inode(inode); + + if (args && bh) + brelse(bh); + + mlog_exit(status); + return status; +} + +void ocfs2_sync_blockdev(struct super_block *sb) +{ + sync_blockdev(sb->s_bdev); +} + +static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, + struct inode *inode, + struct buffer_head *fe_bh) +{ + int status = 0; + struct ocfs2_truncate_context *tc = NULL; + struct ocfs2_dinode *fe; + handle_t *handle = NULL; + + mlog_entry_void(); + + fe = (struct ocfs2_dinode *) fe_bh->b_data; + + /* + * This check will also skip truncate of inodes with inline + * data and fast symlinks. + */ + if (fe->i_clusters) { + if (ocfs2_should_order_data(inode)) + ocfs2_begin_ordered_truncate(inode, 0); + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto out; + } + + status = ocfs2_journal_access(handle, inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out; + } + + i_size_write(inode, 0); + + status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); + if (status < 0) { + mlog_errno(status); + goto out; + } + + ocfs2_commit_trans(osb, handle); + handle = NULL; + + status = ocfs2_prepare_truncate(osb, inode, fe_bh, &tc); + if (status < 0) { + mlog_errno(status); + goto out; + } + + status = ocfs2_commit_truncate(osb, inode, fe_bh, tc); + if (status < 0) { + mlog_errno(status); + goto out; + } + } + +out: + if (handle) + ocfs2_commit_trans(osb, handle); + mlog_exit(status); + return status; +} + +static int ocfs2_remove_inode(struct inode *inode, + struct buffer_head *di_bh, + struct inode *orphan_dir_inode, + struct buffer_head *orphan_dir_bh) +{ + int status; + struct inode *inode_alloc_inode = NULL; + struct buffer_head *inode_alloc_bh = NULL; + handle_t *handle; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; + + inode_alloc_inode = + ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE, + le16_to_cpu(di->i_suballoc_slot)); + if (!inode_alloc_inode) { + status = -EEXIST; + mlog_errno(status); + goto bail; + } + + mutex_lock(&inode_alloc_inode->i_mutex); + status = ocfs2_inode_lock(inode_alloc_inode, &inode_alloc_bh, 1); + if (status < 0) { + mutex_unlock(&inode_alloc_inode->i_mutex); + + mlog_errno(status); + goto bail; + } + + handle = ocfs2_start_trans(osb, OCFS2_DELETE_INODE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_unlock; + } + + status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode, + orphan_dir_bh); + if (status < 0) { + mlog_errno(status); + goto bail_commit; + } + + /* set the inodes dtime */ + status = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail_commit; + } + + di->i_dtime = cpu_to_le64(CURRENT_TIME.tv_sec); + di->i_flags &= cpu_to_le32(~(OCFS2_VALID_FL | OCFS2_ORPHANED_FL)); + + status = ocfs2_journal_dirty(handle, di_bh); + if (status < 0) { + mlog_errno(status); + goto bail_commit; + } + + ocfs2_remove_from_cache(inode, di_bh); + + status = ocfs2_free_dinode(handle, inode_alloc_inode, + inode_alloc_bh, di); + if (status < 0) + mlog_errno(status); + +bail_commit: + ocfs2_commit_trans(osb, handle); +bail_unlock: + ocfs2_inode_unlock(inode_alloc_inode, 1); + mutex_unlock(&inode_alloc_inode->i_mutex); + brelse(inode_alloc_bh); +bail: + iput(inode_alloc_inode); + + return status; +} + +/* + * Serialize with orphan dir recovery. If the process doing + * recovery on this orphan dir does an iget() with the dir + * i_mutex held, we'll deadlock here. Instead we detect this + * and exit early - recovery will wipe this inode for us. + */ +static int ocfs2_check_orphan_recovery_state(struct ocfs2_super *osb, + int slot) +{ + int ret = 0; + + spin_lock(&osb->osb_lock); + if (ocfs2_node_map_test_bit(osb, &osb->osb_recovering_orphan_dirs, slot)) { + mlog(0, "Recovery is happening on orphan dir %d, will skip " + "this inode\n", slot); + ret = -EDEADLK; + goto out; + } + /* This signals to the orphan recovery process that it should + * wait for us to handle the wipe. */ + osb->osb_orphan_wipes[slot]++; +out: + spin_unlock(&osb->osb_lock); + return ret; +} + +static void ocfs2_signal_wipe_completion(struct ocfs2_super *osb, + int slot) +{ + spin_lock(&osb->osb_lock); + osb->osb_orphan_wipes[slot]--; + spin_unlock(&osb->osb_lock); + + wake_up(&osb->osb_wipe_event); +} + +static int ocfs2_wipe_inode(struct inode *inode, + struct buffer_head *di_bh) +{ + int status, orphaned_slot; + struct inode *orphan_dir_inode = NULL; + struct buffer_head *orphan_dir_bh = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_dinode *di; + + di = (struct ocfs2_dinode *) di_bh->b_data; + orphaned_slot = le16_to_cpu(di->i_orphaned_slot); + + status = ocfs2_check_orphan_recovery_state(osb, orphaned_slot); + if (status) + return status; + + orphan_dir_inode = ocfs2_get_system_file_inode(osb, + ORPHAN_DIR_SYSTEM_INODE, + orphaned_slot); + if (!orphan_dir_inode) { + status = -EEXIST; + mlog_errno(status); + goto bail; + } + + /* Lock the orphan dir. The lock will be held for the entire + * delete_inode operation. We do this now to avoid races with + * recovery completion on other nodes. */ + mutex_lock(&orphan_dir_inode->i_mutex); + status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); + if (status < 0) { + mutex_unlock(&orphan_dir_inode->i_mutex); + + mlog_errno(status); + goto bail; + } + + /* we do this while holding the orphan dir lock because we + * don't want recovery being run from another node to try an + * inode delete underneath us -- this will result in two nodes + * truncating the same file! */ + status = ocfs2_truncate_for_delete(osb, inode, di_bh); + if (status < 0) { + mlog_errno(status); + goto bail_unlock_dir; + } + + /*Free extended attribute resources associated with this inode.*/ + status = ocfs2_xattr_remove(inode, di_bh); + if (status < 0) { + mlog_errno(status); + goto bail_unlock_dir; + } + + status = ocfs2_remove_inode(inode, di_bh, orphan_dir_inode, + orphan_dir_bh); + if (status < 0) + mlog_errno(status); + +bail_unlock_dir: + ocfs2_inode_unlock(orphan_dir_inode, 1); + mutex_unlock(&orphan_dir_inode->i_mutex); + brelse(orphan_dir_bh); +bail: + iput(orphan_dir_inode); + ocfs2_signal_wipe_completion(osb, orphaned_slot); + + return status; +} + +/* There is a series of simple checks that should be done before a + * trylock is even considered. Encapsulate those in this function. */ +static int ocfs2_inode_is_valid_to_delete(struct inode *inode) +{ + int ret = 0; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + /* We shouldn't be getting here for the root directory + * inode.. */ + if (inode == osb->root_inode) { + mlog(ML_ERROR, "Skipping delete of root inode.\n"); + goto bail; + } + + /* If we're coming from downconvert_thread we can't go into our own + * voting [hello, deadlock city!], so unforuntately we just + * have to skip deleting this guy. That's OK though because + * the node who's doing the actual deleting should handle it + * anyway. */ + if (current == osb->dc_task) { + mlog(0, "Skipping delete of %lu because we're currently " + "in downconvert\n", inode->i_ino); + goto bail; + } + + spin_lock(&oi->ip_lock); + /* OCFS2 *never* deletes system files. This should technically + * never get here as system file inodes should always have a + * positive link count. */ + if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) { + mlog(ML_ERROR, "Skipping delete of system file %llu\n", + (unsigned long long)oi->ip_blkno); + goto bail_unlock; + } + + /* If we have allowd wipe of this inode for another node, it + * will be marked here so we can safely skip it. Recovery will + * cleanup any inodes we might inadvertantly skip here. */ + if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) { + mlog(0, "Skipping delete of %lu because another node " + "has done this for us.\n", inode->i_ino); + goto bail_unlock; + } + + ret = 1; +bail_unlock: + spin_unlock(&oi->ip_lock); +bail: + return ret; +} + +/* Query the cluster to determine whether we should wipe an inode from + * disk or not. + * + * Requires the inode to have the cluster lock. */ +static int ocfs2_query_inode_wipe(struct inode *inode, + struct buffer_head *di_bh, + int *wipe) +{ + int status = 0; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di; + + *wipe = 0; + + /* While we were waiting for the cluster lock in + * ocfs2_delete_inode, another node might have asked to delete + * the inode. Recheck our flags to catch this. */ + if (!ocfs2_inode_is_valid_to_delete(inode)) { + mlog(0, "Skipping delete of %llu because flags changed\n", + (unsigned long long)oi->ip_blkno); + goto bail; + } + + /* Now that we have an up to date inode, we can double check + * the link count. */ + if (inode->i_nlink) { + mlog(0, "Skipping delete of %llu because nlink = %u\n", + (unsigned long long)oi->ip_blkno, inode->i_nlink); + goto bail; + } + + /* Do some basic inode verification... */ + di = (struct ocfs2_dinode *) di_bh->b_data; + if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL))) { + /* for lack of a better error? */ + status = -EEXIST; + mlog(ML_ERROR, + "Inode %llu (on-disk %llu) not orphaned! " + "Disk flags 0x%x, inode flags 0x%x\n", + (unsigned long long)oi->ip_blkno, + (unsigned long long)le64_to_cpu(di->i_blkno), + le32_to_cpu(di->i_flags), oi->ip_flags); + goto bail; + } + + /* has someone already deleted us?! baaad... */ + if (di->i_dtime) { + status = -EEXIST; + mlog_errno(status); + goto bail; + } + + /* + * This is how ocfs2 determines whether an inode is still live + * within the cluster. Every node takes a shared read lock on + * the inode open lock in ocfs2_read_locked_inode(). When we + * get to ->delete_inode(), each node tries to convert it's + * lock to an exclusive. Trylocks are serialized by the inode + * meta data lock. If the upconvert suceeds, we know the inode + * is no longer live and can be deleted. + * + * Though we call this with the meta data lock held, the + * trylock keeps us from ABBA deadlock. + */ + status = ocfs2_try_open_lock(inode, 1); + if (status == -EAGAIN) { + status = 0; + mlog(0, "Skipping delete of %llu because it is in use on " + "other nodes\n", (unsigned long long)oi->ip_blkno); + goto bail; + } + if (status < 0) { + mlog_errno(status); + goto bail; + } + + *wipe = 1; + mlog(0, "Inode %llu is ok to wipe from orphan dir %u\n", + (unsigned long long)oi->ip_blkno, + le16_to_cpu(di->i_orphaned_slot)); + +bail: + return status; +} + +/* Support function for ocfs2_delete_inode. Will help us keep the + * inode data in a consistent state for clear_inode. Always truncates + * pages, optionally sync's them first. */ +static void ocfs2_cleanup_delete_inode(struct inode *inode, + int sync_data) +{ + mlog(0, "Cleanup inode %llu, sync = %d\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, sync_data); + if (sync_data) + write_inode_now(inode, 1); + truncate_inode_pages(&inode->i_data, 0); +} + +void ocfs2_delete_inode(struct inode *inode) +{ + int wipe, status; + sigset_t blocked, oldset; + struct buffer_head *di_bh = NULL; + + mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); + + if (is_bad_inode(inode)) { + mlog(0, "Skipping delete of bad inode\n"); + goto bail; + } + + if (!ocfs2_inode_is_valid_to_delete(inode)) { + /* It's probably not necessary to truncate_inode_pages + * here but we do it for safety anyway (it will most + * likely be a no-op anyway) */ + ocfs2_cleanup_delete_inode(inode, 0); + goto bail; + } + + /* We want to block signals in delete_inode as the lock and + * messaging paths may return us -ERESTARTSYS. Which would + * cause us to exit early, resulting in inodes being orphaned + * forever. */ + sigfillset(&blocked); + status = sigprocmask(SIG_BLOCK, &blocked, &oldset); + if (status < 0) { + mlog_errno(status); + ocfs2_cleanup_delete_inode(inode, 1); + goto bail; + } + + /* Lock down the inode. This gives us an up to date view of + * it's metadata (for verification), and allows us to + * serialize delete_inode on multiple nodes. + * + * Even though we might be doing a truncate, we don't take the + * allocation lock here as it won't be needed - nobody will + * have the file open. + */ + status = ocfs2_inode_lock(inode, &di_bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + ocfs2_cleanup_delete_inode(inode, 0); + goto bail_unblock; + } + + /* Query the cluster. This will be the final decision made + * before we go ahead and wipe the inode. */ + status = ocfs2_query_inode_wipe(inode, di_bh, &wipe); + if (!wipe || status < 0) { + /* Error and remote inode busy both mean we won't be + * removing the inode, so they take almost the same + * path. */ + if (status < 0) + mlog_errno(status); + + /* Someone in the cluster has disallowed a wipe of + * this inode, or it was never completely + * orphaned. Write out the pages and exit now. */ + ocfs2_cleanup_delete_inode(inode, 1); + goto bail_unlock_inode; + } + + ocfs2_cleanup_delete_inode(inode, 0); + + status = ocfs2_wipe_inode(inode, di_bh); + if (status < 0) { + if (status != -EDEADLK) + mlog_errno(status); + goto bail_unlock_inode; + } + + /* + * Mark the inode as successfully deleted. + * + * This is important for ocfs2_clear_inode() as it will check + * this flag and skip any checkpointing work + * + * ocfs2_stuff_meta_lvb() also uses this flag to invalidate + * the LVB for other nodes. + */ + OCFS2_I(inode)->ip_flags |= OCFS2_INODE_DELETED; + +bail_unlock_inode: + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); +bail_unblock: + status = sigprocmask(SIG_SETMASK, &oldset, NULL); + if (status < 0) + mlog_errno(status); +bail: + clear_inode(inode); + mlog_exit_void(); +} + +void ocfs2_clear_inode(struct inode *inode) +{ + int status; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + mlog_entry_void(); + + if (!inode) + goto bail; + + mlog(0, "Clearing inode: %llu, nlink = %u\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_nlink); + + mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL, + "Inode=%lu\n", inode->i_ino); + + /* To preven remote deletes we hold open lock before, now it + * is time to unlock PR and EX open locks. */ + ocfs2_open_unlock(inode); + + /* Do these before all the other work so that we don't bounce + * the downconvert thread while waiting to destroy the locks. */ + ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres); + ocfs2_mark_lockres_freeing(&oi->ip_inode_lockres); + ocfs2_mark_lockres_freeing(&oi->ip_open_lockres); + + /* We very well may get a clear_inode before all an inodes + * metadata has hit disk. Of course, we can't drop any cluster + * locks until the journal has finished with it. The only + * exception here are successfully wiped inodes - their + * metadata can now be considered to be part of the system + * inodes from which it came. */ + if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED)) + ocfs2_checkpoint_inode(inode); + + mlog_bug_on_msg(!list_empty(&oi->ip_io_markers), + "Clear inode of %llu, inode has io markers\n", + (unsigned long long)oi->ip_blkno); + + ocfs2_extent_map_trunc(inode, 0); + + status = ocfs2_drop_inode_locks(inode); + if (status < 0) + mlog_errno(status); + + ocfs2_lock_res_free(&oi->ip_rw_lockres); + ocfs2_lock_res_free(&oi->ip_inode_lockres); + ocfs2_lock_res_free(&oi->ip_open_lockres); + + ocfs2_metadata_cache_purge(inode); + + mlog_bug_on_msg(oi->ip_metadata_cache.ci_num_cached, + "Clear inode of %llu, inode has %u cache items\n", + (unsigned long long)oi->ip_blkno, oi->ip_metadata_cache.ci_num_cached); + + mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE), + "Clear inode of %llu, inode has a bad flag\n", + (unsigned long long)oi->ip_blkno); + + mlog_bug_on_msg(spin_is_locked(&oi->ip_lock), + "Clear inode of %llu, inode is locked\n", + (unsigned long long)oi->ip_blkno); + + mlog_bug_on_msg(!mutex_trylock(&oi->ip_io_mutex), + "Clear inode of %llu, io_mutex is locked\n", + (unsigned long long)oi->ip_blkno); + mutex_unlock(&oi->ip_io_mutex); + + /* + * down_trylock() returns 0, down_write_trylock() returns 1 + * kernel 1, world 0 + */ + mlog_bug_on_msg(!down_write_trylock(&oi->ip_alloc_sem), + "Clear inode of %llu, alloc_sem is locked\n", + (unsigned long long)oi->ip_blkno); + up_write(&oi->ip_alloc_sem); + + mlog_bug_on_msg(oi->ip_open_count, + "Clear inode of %llu has open count %d\n", + (unsigned long long)oi->ip_blkno, oi->ip_open_count); + + /* Clear all other flags. */ + oi->ip_flags = OCFS2_INODE_CACHE_INLINE; + oi->ip_created_trans = 0; + oi->ip_last_trans = 0; + oi->ip_dir_start_lookup = 0; + oi->ip_blkno = 0ULL; + + /* + * ip_jinode is used to track txns against this inode. We ensure that + * the journal is flushed before journal shutdown. Thus it is safe to + * have inodes get cleaned up after journal shutdown. + */ + jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal, + &oi->ip_jinode); + +bail: + mlog_exit_void(); +} + +/* Called under inode_lock, with no more references on the + * struct inode, so it's safe here to check the flags field + * and to manipulate i_nlink without any other locks. */ +void ocfs2_drop_inode(struct inode *inode) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + mlog_entry_void(); + + mlog(0, "Drop inode %llu, nlink = %u, ip_flags = 0x%x\n", + (unsigned long long)oi->ip_blkno, inode->i_nlink, oi->ip_flags); + + if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED) + generic_delete_inode(inode); + else + generic_drop_inode(inode); + + mlog_exit_void(); +} + +/* + * This is called from our getattr. + */ +int ocfs2_inode_revalidate(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + int status = 0; + + mlog_entry("(inode = 0x%p, ino = %llu)\n", inode, + inode ? (unsigned long long)OCFS2_I(inode)->ip_blkno : 0ULL); + + if (!inode) { + mlog(0, "eep, no inode!\n"); + status = -ENOENT; + goto bail; + } + + spin_lock(&OCFS2_I(inode)->ip_lock); + if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { + spin_unlock(&OCFS2_I(inode)->ip_lock); + mlog(0, "inode deleted!\n"); + status = -ENOENT; + goto bail; + } + spin_unlock(&OCFS2_I(inode)->ip_lock); + + /* Let ocfs2_inode_lock do the work of updating our struct + * inode for us. */ + status = ocfs2_inode_lock(inode, NULL, 0); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + goto bail; + } + ocfs2_inode_unlock(inode, 0); +bail: + mlog_exit(status); + + return status; +} + +/* + * Updates a disk inode from a + * struct inode. + * Only takes ip_lock. + */ +int ocfs2_mark_inode_dirty(handle_t *handle, + struct inode *inode, + struct buffer_head *bh) +{ + int status; + struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data; + + mlog_entry("(inode %llu)\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + + status = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + spin_lock(&OCFS2_I(inode)->ip_lock); + fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters); + ocfs2_get_inode_flags(OCFS2_I(inode)); + fe->i_attr = cpu_to_le32(OCFS2_I(inode)->ip_attr); + fe->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features); + spin_unlock(&OCFS2_I(inode)->ip_lock); + + fe->i_size = cpu_to_le64(i_size_read(inode)); + fe->i_links_count = cpu_to_le16(inode->i_nlink); + fe->i_uid = cpu_to_le32(inode->i_uid); + fe->i_gid = cpu_to_le32(inode->i_gid); + fe->i_mode = cpu_to_le16(inode->i_mode); + fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec); + fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); + fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); + fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); + fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); + fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); + + status = ocfs2_journal_dirty(handle, bh); + if (status < 0) + mlog_errno(status); + + status = 0; +leave: + + mlog_exit(status); + return status; +} + +/* + * + * Updates a struct inode from a disk inode. + * does no i/o, only takes ip_lock. + */ +void ocfs2_refresh_inode(struct inode *inode, + struct ocfs2_dinode *fe) +{ + spin_lock(&OCFS2_I(inode)->ip_lock); + + OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); + OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr); + OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features); + ocfs2_set_inode_flags(inode); + i_size_write(inode, le64_to_cpu(fe->i_size)); + inode->i_nlink = le16_to_cpu(fe->i_links_count); + inode->i_uid = le32_to_cpu(fe->i_uid); + inode->i_gid = le32_to_cpu(fe->i_gid); + inode->i_mode = le16_to_cpu(fe->i_mode); + if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0) + inode->i_blocks = 0; + else + inode->i_blocks = ocfs2_inode_sector_count(inode); + inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime); + inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec); + inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime); + inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec); + inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime); + inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec); + + spin_unlock(&OCFS2_I(inode)->ip_lock); +} diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h new file mode 100644 index 0000000..2f37af9 --- /dev/null +++ b/fs/ocfs2/inode.h @@ -0,0 +1,156 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * inode.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_INODE_H +#define OCFS2_INODE_H + +#include "extent_map.h" + +/* OCFS2 Inode Private Data */ +struct ocfs2_inode_info +{ + u64 ip_blkno; + + struct ocfs2_lock_res ip_rw_lockres; + struct ocfs2_lock_res ip_inode_lockres; + struct ocfs2_lock_res ip_open_lockres; + + /* protects allocation changes on this inode. */ + struct rw_semaphore ip_alloc_sem; + + /* protects extended attribute changes on this inode */ + struct rw_semaphore ip_xattr_sem; + + /* These fields are protected by ip_lock */ + spinlock_t ip_lock; + u32 ip_open_count; + u32 ip_clusters; + struct list_head ip_io_markers; + + struct mutex ip_io_mutex; + + u32 ip_flags; /* see below */ + u32 ip_attr; /* inode attributes */ + u16 ip_dyn_features; + + /* protected by recovery_lock. */ + struct inode *ip_next_orphan; + + u32 ip_dir_start_lookup; + + /* next two are protected by trans_inc_lock */ + /* which transaction were we created on? Zero if none. */ + unsigned long ip_created_trans; + /* last transaction we were a part of. */ + unsigned long ip_last_trans; + + struct ocfs2_caching_info ip_metadata_cache; + + struct ocfs2_extent_map ip_extent_map; + + struct inode vfs_inode; + struct jbd2_inode ip_jinode; +}; + +/* + * Flags for the ip_flags field + */ +/* System file inodes */ +#define OCFS2_INODE_SYSTEM_FILE 0x00000001 +#define OCFS2_INODE_JOURNAL 0x00000002 +#define OCFS2_INODE_BITMAP 0x00000004 +/* This inode has been wiped from disk */ +#define OCFS2_INODE_DELETED 0x00000008 +/* Another node is deleting, so our delete is a nop */ +#define OCFS2_INODE_SKIP_DELETE 0x00000010 +/* Has the inode been orphaned on another node? + * + * This hints to ocfs2_drop_inode that it should clear i_nlink before + * continuing. + * + * We *only* set this on unlink vote from another node. If the inode + * was locally orphaned, then we're sure of the state and don't need + * to twiddle i_nlink later - it's either zero or not depending on + * whether our unlink succeeded. Otherwise we got this from a node + * whose intention was to orphan the inode, however he may have + * crashed, failed etc, so we let ocfs2_drop_inode zero the value and + * rely on ocfs2_delete_inode to sort things out under the proper + * cluster locks. + */ +#define OCFS2_INODE_MAYBE_ORPHANED 0x00000020 +/* Does someone have the file open O_DIRECT */ +#define OCFS2_INODE_OPEN_DIRECT 0x00000040 +/* Indicates that the metadata cache should be used as an array. */ +#define OCFS2_INODE_CACHE_INLINE 0x00000080 + +static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode) +{ + return container_of(inode, struct ocfs2_inode_info, vfs_inode); +} + +#define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL) +#define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL) + +extern struct kmem_cache *ocfs2_inode_cache; + +extern const struct address_space_operations ocfs2_aops; + +void ocfs2_clear_inode(struct inode *inode); +void ocfs2_delete_inode(struct inode *inode); +void ocfs2_drop_inode(struct inode *inode); + +/* Flags for ocfs2_iget() */ +#define OCFS2_FI_FLAG_SYSFILE 0x1 +#define OCFS2_FI_FLAG_ORPHAN_RECOVERY 0x2 +struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff, unsigned flags, + int sysfile_type); +int ocfs2_inode_init_private(struct inode *inode); +int ocfs2_inode_revalidate(struct dentry *dentry); +int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, + int create_ino); +void ocfs2_read_inode(struct inode *inode); +void ocfs2_read_inode2(struct inode *inode, void *opaque); +ssize_t ocfs2_rw_direct(int rw, struct file *filp, char *buf, + size_t size, loff_t *offp); +void ocfs2_sync_blockdev(struct super_block *sb); +void ocfs2_refresh_inode(struct inode *inode, + struct ocfs2_dinode *fe); +int ocfs2_mark_inode_dirty(handle_t *handle, + struct inode *inode, + struct buffer_head *bh); +int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb); +int ocfs2_aio_write(struct file *file, struct kiocb *req, struct iocb *iocb); + +void ocfs2_set_inode_flags(struct inode *inode); +void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi); + +static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode) +{ + int c_to_s_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits - 9; + + return (blkcnt_t)(OCFS2_I(inode)->ip_clusters << c_to_s_bits); +} + +#endif /* OCFS2_INODE_H */ diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c new file mode 100644 index 0000000..9fcd36d --- /dev/null +++ b/fs/ocfs2/ioctl.c @@ -0,0 +1,193 @@ +/* + * linux/fs/ocfs2/ioctl.c + * + * Copyright (C) 2006 Herbert Poetzl + * adapted from Remy Card's ext2/ioctl.c + */ + +#include <linux/fs.h> +#include <linux/mount.h> +#include <linux/smp_lock.h> + +#define MLOG_MASK_PREFIX ML_INODE +#include <cluster/masklog.h> + +#include "ocfs2.h" +#include "alloc.h" +#include "dlmglue.h" +#include "file.h" +#include "inode.h" +#include "journal.h" + +#include "ocfs2_fs.h" +#include "ioctl.h" +#include "resize.h" + +#include <linux/ext2_fs.h> + +static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) +{ + int status; + + status = ocfs2_inode_lock(inode, NULL, 0); + if (status < 0) { + mlog_errno(status); + return status; + } + ocfs2_get_inode_flags(OCFS2_I(inode)); + *flags = OCFS2_I(inode)->ip_attr; + ocfs2_inode_unlock(inode, 0); + + mlog_exit(status); + return status; +} + +static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, + unsigned mask) +{ + struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + handle_t *handle = NULL; + struct buffer_head *bh = NULL; + unsigned oldflags; + int status; + + mutex_lock(&inode->i_mutex); + + status = ocfs2_inode_lock(inode, &bh, 1); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = -EACCES; + if (!is_owner_or_cap(inode)) + goto bail_unlock; + + if (!S_ISDIR(inode->i_mode)) + flags &= ~OCFS2_DIRSYNC_FL; + + handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_unlock; + } + + oldflags = ocfs2_inode->ip_attr; + flags = flags & mask; + flags |= oldflags & ~mask; + + /* + * The IMMUTABLE and APPEND_ONLY flags can only be changed by + * the relevant capability. + */ + status = -EPERM; + if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & + (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { + if (!capable(CAP_LINUX_IMMUTABLE)) + goto bail_unlock; + } + + ocfs2_inode->ip_attr = flags; + ocfs2_set_inode_flags(inode); + + status = ocfs2_mark_inode_dirty(handle, inode, bh); + if (status < 0) + mlog_errno(status); + + ocfs2_commit_trans(osb, handle); +bail_unlock: + ocfs2_inode_unlock(inode, 1); +bail: + mutex_unlock(&inode->i_mutex); + + brelse(bh); + + mlog_exit(status); + return status; +} + +long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + unsigned int flags; + int new_clusters; + int status; + struct ocfs2_space_resv sr; + struct ocfs2_new_group_input input; + + switch (cmd) { + case OCFS2_IOC_GETFLAGS: + status = ocfs2_get_inode_attr(inode, &flags); + if (status < 0) + return status; + + flags &= OCFS2_FL_VISIBLE; + return put_user(flags, (int __user *) arg); + case OCFS2_IOC_SETFLAGS: + if (get_user(flags, (int __user *) arg)) + return -EFAULT; + + status = mnt_want_write(filp->f_path.mnt); + if (status) + return status; + status = ocfs2_set_inode_attr(inode, flags, + OCFS2_FL_MODIFIABLE); + mnt_drop_write(filp->f_path.mnt); + return status; + case OCFS2_IOC_RESVSP: + case OCFS2_IOC_RESVSP64: + case OCFS2_IOC_UNRESVSP: + case OCFS2_IOC_UNRESVSP64: + if (copy_from_user(&sr, (int __user *) arg, sizeof(sr))) + return -EFAULT; + + return ocfs2_change_file_space(filp, cmd, &sr); + case OCFS2_IOC_GROUP_EXTEND: + if (!capable(CAP_SYS_RESOURCE)) + return -EPERM; + + if (get_user(new_clusters, (int __user *)arg)) + return -EFAULT; + + return ocfs2_group_extend(inode, new_clusters); + case OCFS2_IOC_GROUP_ADD: + case OCFS2_IOC_GROUP_ADD64: + if (!capable(CAP_SYS_RESOURCE)) + return -EPERM; + + if (copy_from_user(&input, (int __user *) arg, sizeof(input))) + return -EFAULT; + + return ocfs2_group_add(inode, &input); + default: + return -ENOTTY; + } +} + +#ifdef CONFIG_COMPAT +long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) +{ + switch (cmd) { + case OCFS2_IOC32_GETFLAGS: + cmd = OCFS2_IOC_GETFLAGS; + break; + case OCFS2_IOC32_SETFLAGS: + cmd = OCFS2_IOC_SETFLAGS; + break; + case OCFS2_IOC_RESVSP: + case OCFS2_IOC_RESVSP64: + case OCFS2_IOC_UNRESVSP: + case OCFS2_IOC_UNRESVSP64: + case OCFS2_IOC_GROUP_EXTEND: + case OCFS2_IOC_GROUP_ADD: + case OCFS2_IOC_GROUP_ADD64: + break; + default: + return -ENOIOCTLCMD; + } + + return ocfs2_ioctl(file, cmd, arg); +} +#endif diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h new file mode 100644 index 0000000..cf9a5ee --- /dev/null +++ b/fs/ocfs2/ioctl.h @@ -0,0 +1,16 @@ +/* + * ioctl.h + * + * Function prototypes + * + * Copyright (C) 2006 Herbert Poetzl + * + */ + +#ifndef OCFS2_IOCTL_H +#define OCFS2_IOCTL_H + +long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg); + +#endif /* OCFS2_IOCTL_H */ diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c new file mode 100644 index 0000000..99fe9d5 --- /dev/null +++ b/fs/ocfs2/journal.c @@ -0,0 +1,1751 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * journal.c + * + * Defines functions of journalling api + * + * Copyright (C) 2003, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/kthread.h> + +#define MLOG_MASK_PREFIX ML_JOURNAL +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dir.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "heartbeat.h" +#include "inode.h" +#include "journal.h" +#include "localalloc.h" +#include "slot_map.h" +#include "super.h" +#include "sysfile.h" + +#include "buffer_head_io.h" + +DEFINE_SPINLOCK(trans_inc_lock); + +static int ocfs2_force_read_journal(struct inode *inode); +static int ocfs2_recover_node(struct ocfs2_super *osb, + int node_num); +static int __ocfs2_recovery_thread(void *arg); +static int ocfs2_commit_cache(struct ocfs2_super *osb); +static int ocfs2_wait_on_mount(struct ocfs2_super *osb); +static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, + int dirty, int replayed); +static int ocfs2_trylock_journal(struct ocfs2_super *osb, + int slot_num); +static int ocfs2_recover_orphans(struct ocfs2_super *osb, + int slot); +static int ocfs2_commit_thread(void *arg); + + +/* + * The recovery_list is a simple linked list of node numbers to recover. + * It is protected by the recovery_lock. + */ + +struct ocfs2_recovery_map { + unsigned int rm_used; + unsigned int *rm_entries; +}; + +int ocfs2_recovery_init(struct ocfs2_super *osb) +{ + struct ocfs2_recovery_map *rm; + + mutex_init(&osb->recovery_lock); + osb->disable_recovery = 0; + osb->recovery_thread_task = NULL; + init_waitqueue_head(&osb->recovery_event); + + rm = kzalloc(sizeof(struct ocfs2_recovery_map) + + osb->max_slots * sizeof(unsigned int), + GFP_KERNEL); + if (!rm) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + + rm->rm_entries = (unsigned int *)((char *)rm + + sizeof(struct ocfs2_recovery_map)); + osb->recovery_map = rm; + + return 0; +} + +/* we can't grab the goofy sem lock from inside wait_event, so we use + * memory barriers to make sure that we'll see the null task before + * being woken up */ +static int ocfs2_recovery_thread_running(struct ocfs2_super *osb) +{ + mb(); + return osb->recovery_thread_task != NULL; +} + +void ocfs2_recovery_exit(struct ocfs2_super *osb) +{ + struct ocfs2_recovery_map *rm; + + /* disable any new recovery threads and wait for any currently + * running ones to exit. Do this before setting the vol_state. */ + mutex_lock(&osb->recovery_lock); + osb->disable_recovery = 1; + mutex_unlock(&osb->recovery_lock); + wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); + + /* At this point, we know that no more recovery threads can be + * launched, so wait for any recovery completion work to + * complete. */ + flush_workqueue(ocfs2_wq); + + /* + * Now that recovery is shut down, and the osb is about to be + * freed, the osb_lock is not taken here. + */ + rm = osb->recovery_map; + /* XXX: Should we bug if there are dirty entries? */ + + kfree(rm); +} + +static int __ocfs2_recovery_map_test(struct ocfs2_super *osb, + unsigned int node_num) +{ + int i; + struct ocfs2_recovery_map *rm = osb->recovery_map; + + assert_spin_locked(&osb->osb_lock); + + for (i = 0; i < rm->rm_used; i++) { + if (rm->rm_entries[i] == node_num) + return 1; + } + + return 0; +} + +/* Behaves like test-and-set. Returns the previous value */ +static int ocfs2_recovery_map_set(struct ocfs2_super *osb, + unsigned int node_num) +{ + struct ocfs2_recovery_map *rm = osb->recovery_map; + + spin_lock(&osb->osb_lock); + if (__ocfs2_recovery_map_test(osb, node_num)) { + spin_unlock(&osb->osb_lock); + return 1; + } + + /* XXX: Can this be exploited? Not from o2dlm... */ + BUG_ON(rm->rm_used >= osb->max_slots); + + rm->rm_entries[rm->rm_used] = node_num; + rm->rm_used++; + spin_unlock(&osb->osb_lock); + + return 0; +} + +static void ocfs2_recovery_map_clear(struct ocfs2_super *osb, + unsigned int node_num) +{ + int i; + struct ocfs2_recovery_map *rm = osb->recovery_map; + + spin_lock(&osb->osb_lock); + + for (i = 0; i < rm->rm_used; i++) { + if (rm->rm_entries[i] == node_num) + break; + } + + if (i < rm->rm_used) { + /* XXX: be careful with the pointer math */ + memmove(&(rm->rm_entries[i]), &(rm->rm_entries[i + 1]), + (rm->rm_used - i - 1) * sizeof(unsigned int)); + rm->rm_used--; + } + + spin_unlock(&osb->osb_lock); +} + +static int ocfs2_commit_cache(struct ocfs2_super *osb) +{ + int status = 0; + unsigned int flushed; + unsigned long old_id; + struct ocfs2_journal *journal = NULL; + + mlog_entry_void(); + + journal = osb->journal; + + /* Flush all pending commits and checkpoint the journal. */ + down_write(&journal->j_trans_barrier); + + if (atomic_read(&journal->j_num_trans) == 0) { + up_write(&journal->j_trans_barrier); + mlog(0, "No transactions for me to flush!\n"); + goto finally; + } + + jbd2_journal_lock_updates(journal->j_journal); + status = jbd2_journal_flush(journal->j_journal); + jbd2_journal_unlock_updates(journal->j_journal); + if (status < 0) { + up_write(&journal->j_trans_barrier); + mlog_errno(status); + goto finally; + } + + old_id = ocfs2_inc_trans_id(journal); + + flushed = atomic_read(&journal->j_num_trans); + atomic_set(&journal->j_num_trans, 0); + up_write(&journal->j_trans_barrier); + + mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n", + journal->j_trans_id, flushed); + + ocfs2_wake_downconvert_thread(osb); + wake_up(&journal->j_checkpointed); +finally: + mlog_exit(status); + return status; +} + +/* pass it NULL and it will allocate a new handle object for you. If + * you pass it a handle however, it may still return error, in which + * case it has free'd the passed handle for you. */ +handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs) +{ + journal_t *journal = osb->journal->j_journal; + handle_t *handle; + + BUG_ON(!osb || !osb->journal->j_journal); + + if (ocfs2_is_hard_readonly(osb)) + return ERR_PTR(-EROFS); + + BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE); + BUG_ON(max_buffs <= 0); + + /* JBD might support this, but our journalling code doesn't yet. */ + if (journal_current_handle()) { + mlog(ML_ERROR, "Recursive transaction attempted!\n"); + BUG(); + } + + down_read(&osb->journal->j_trans_barrier); + + handle = jbd2_journal_start(journal, max_buffs); + if (IS_ERR(handle)) { + up_read(&osb->journal->j_trans_barrier); + + mlog_errno(PTR_ERR(handle)); + + if (is_journal_aborted(journal)) { + ocfs2_abort(osb->sb, "Detected aborted journal"); + handle = ERR_PTR(-EROFS); + } + } else { + if (!ocfs2_mount_local(osb)) + atomic_inc(&(osb->journal->j_num_trans)); + } + + return handle; +} + +int ocfs2_commit_trans(struct ocfs2_super *osb, + handle_t *handle) +{ + int ret; + struct ocfs2_journal *journal = osb->journal; + + BUG_ON(!handle); + + ret = jbd2_journal_stop(handle); + if (ret < 0) + mlog_errno(ret); + + up_read(&journal->j_trans_barrier); + + return ret; +} + +/* + * 'nblocks' is what you want to add to the current + * transaction. extend_trans will either extend the current handle by + * nblocks, or commit it and start a new one with nblocks credits. + * + * This might call jbd2_journal_restart() which will commit dirty buffers + * and then restart the transaction. Before calling + * ocfs2_extend_trans(), any changed blocks should have been + * dirtied. After calling it, all blocks which need to be changed must + * go through another set of journal_access/journal_dirty calls. + * + * WARNING: This will not release any semaphores or disk locks taken + * during the transaction, so make sure they were taken *before* + * start_trans or we'll have ordering deadlocks. + * + * WARNING2: Note that we do *not* drop j_trans_barrier here. This is + * good because transaction ids haven't yet been recorded on the + * cluster locks associated with this handle. + */ +int ocfs2_extend_trans(handle_t *handle, int nblocks) +{ + int status; + + BUG_ON(!handle); + BUG_ON(!nblocks); + + mlog_entry_void(); + + mlog(0, "Trying to extend transaction by %d blocks\n", nblocks); + +#ifdef CONFIG_OCFS2_DEBUG_FS + status = 1; +#else + status = jbd2_journal_extend(handle, nblocks); + if (status < 0) { + mlog_errno(status); + goto bail; + } +#endif + + if (status > 0) { + mlog(0, + "jbd2_journal_extend failed, trying " + "jbd2_journal_restart\n"); + status = jbd2_journal_restart(handle, nblocks); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + status = 0; +bail: + + mlog_exit(status); + return status; +} + +int ocfs2_journal_access(handle_t *handle, + struct inode *inode, + struct buffer_head *bh, + int type) +{ + int status; + + BUG_ON(!inode); + BUG_ON(!handle); + BUG_ON(!bh); + + mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", + (unsigned long long)bh->b_blocknr, type, + (type == OCFS2_JOURNAL_ACCESS_CREATE) ? + "OCFS2_JOURNAL_ACCESS_CREATE" : + "OCFS2_JOURNAL_ACCESS_WRITE", + bh->b_size); + + /* we can safely remove this assertion after testing. */ + if (!buffer_uptodate(bh)) { + mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n"); + mlog(ML_ERROR, "b_blocknr=%llu\n", + (unsigned long long)bh->b_blocknr); + BUG(); + } + + /* Set the current transaction information on the inode so + * that the locking code knows whether it can drop it's locks + * on this inode or not. We're protected from the commit + * thread updating the current transaction id until + * ocfs2_commit_trans() because ocfs2_start_trans() took + * j_trans_barrier for us. */ + ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode); + + mutex_lock(&OCFS2_I(inode)->ip_io_mutex); + switch (type) { + case OCFS2_JOURNAL_ACCESS_CREATE: + case OCFS2_JOURNAL_ACCESS_WRITE: + status = jbd2_journal_get_write_access(handle, bh); + break; + + case OCFS2_JOURNAL_ACCESS_UNDO: + status = jbd2_journal_get_undo_access(handle, bh); + break; + + default: + status = -EINVAL; + mlog(ML_ERROR, "Uknown access type!\n"); + } + mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); + + if (status < 0) + mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", + status, type); + + mlog_exit(status); + return status; +} + +int ocfs2_journal_dirty(handle_t *handle, + struct buffer_head *bh) +{ + int status; + + mlog_entry("(bh->b_blocknr=%llu)\n", + (unsigned long long)bh->b_blocknr); + + status = jbd2_journal_dirty_metadata(handle, bh); + if (status < 0) + mlog(ML_ERROR, "Could not dirty metadata buffer. " + "(bh->b_blocknr=%llu)\n", + (unsigned long long)bh->b_blocknr); + + mlog_exit(status); + return status; +} + +#ifdef CONFIG_OCFS2_COMPAT_JBD +int ocfs2_journal_dirty_data(handle_t *handle, + struct buffer_head *bh) +{ + int err = journal_dirty_data(handle, bh); + if (err) + mlog_errno(err); + /* TODO: When we can handle it, abort the handle and go RO on + * error here. */ + + return err; +} +#endif + +#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE) + +void ocfs2_set_journal_params(struct ocfs2_super *osb) +{ + journal_t *journal = osb->journal->j_journal; + unsigned long commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL; + + if (osb->osb_commit_interval) + commit_interval = osb->osb_commit_interval; + + spin_lock(&journal->j_state_lock); + journal->j_commit_interval = commit_interval; + if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER) + journal->j_flags |= JBD2_BARRIER; + else + journal->j_flags &= ~JBD2_BARRIER; + spin_unlock(&journal->j_state_lock); +} + +int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) +{ + int status = -1; + struct inode *inode = NULL; /* the journal inode */ + journal_t *j_journal = NULL; + struct ocfs2_dinode *di = NULL; + struct buffer_head *bh = NULL; + struct ocfs2_super *osb; + int inode_lock = 0; + + mlog_entry_void(); + + BUG_ON(!journal); + + osb = journal->j_osb; + + /* already have the inode for our journal */ + inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, + osb->slot_num); + if (inode == NULL) { + status = -EACCES; + mlog_errno(status); + goto done; + } + if (is_bad_inode(inode)) { + mlog(ML_ERROR, "access error (bad inode)\n"); + iput(inode); + inode = NULL; + status = -EACCES; + goto done; + } + + SET_INODE_JOURNAL(inode); + OCFS2_I(inode)->ip_open_count++; + + /* Skip recovery waits here - journal inode metadata never + * changes in a live cluster so it can be considered an + * exception to the rule. */ + status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); + if (status < 0) { + if (status != -ERESTARTSYS) + mlog(ML_ERROR, "Could not get lock on journal!\n"); + goto done; + } + + inode_lock = 1; + di = (struct ocfs2_dinode *)bh->b_data; + + if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) { + mlog(ML_ERROR, "Journal file size (%lld) is too small!\n", + inode->i_size); + status = -EINVAL; + goto done; + } + + mlog(0, "inode->i_size = %lld\n", inode->i_size); + mlog(0, "inode->i_blocks = %llu\n", + (unsigned long long)inode->i_blocks); + mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); + + /* call the kernels journal init function now */ + j_journal = jbd2_journal_init_inode(inode); + if (j_journal == NULL) { + mlog(ML_ERROR, "Linux journal layer error\n"); + status = -EINVAL; + goto done; + } + + mlog(0, "Returned from jbd2_journal_init_inode\n"); + mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen); + + *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) & + OCFS2_JOURNAL_DIRTY_FL); + + journal->j_journal = j_journal; + journal->j_inode = inode; + journal->j_bh = bh; + + ocfs2_set_journal_params(osb); + + journal->j_state = OCFS2_JOURNAL_LOADED; + + status = 0; +done: + if (status < 0) { + if (inode_lock) + ocfs2_inode_unlock(inode, 1); + brelse(bh); + if (inode) { + OCFS2_I(inode)->ip_open_count--; + iput(inode); + } + } + + mlog_exit(status); + return status; +} + +static void ocfs2_bump_recovery_generation(struct ocfs2_dinode *di) +{ + le32_add_cpu(&(di->id1.journal1.ij_recovery_generation), 1); +} + +static u32 ocfs2_get_recovery_generation(struct ocfs2_dinode *di) +{ + return le32_to_cpu(di->id1.journal1.ij_recovery_generation); +} + +static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, + int dirty, int replayed) +{ + int status; + unsigned int flags; + struct ocfs2_journal *journal = osb->journal; + struct buffer_head *bh = journal->j_bh; + struct ocfs2_dinode *fe; + + mlog_entry_void(); + + fe = (struct ocfs2_dinode *)bh->b_data; + if (!OCFS2_IS_VALID_DINODE(fe)) { + /* This is called from startup/shutdown which will + * handle the errors in a specific manner, so no need + * to call ocfs2_error() here. */ + mlog(ML_ERROR, "Journal dinode %llu has invalid " + "signature: %.*s", + (unsigned long long)le64_to_cpu(fe->i_blkno), 7, + fe->i_signature); + status = -EIO; + goto out; + } + + flags = le32_to_cpu(fe->id1.journal1.ij_flags); + if (dirty) + flags |= OCFS2_JOURNAL_DIRTY_FL; + else + flags &= ~OCFS2_JOURNAL_DIRTY_FL; + fe->id1.journal1.ij_flags = cpu_to_le32(flags); + + if (replayed) + ocfs2_bump_recovery_generation(fe); + + status = ocfs2_write_block(osb, bh, journal->j_inode); + if (status < 0) + mlog_errno(status); + +out: + mlog_exit(status); + return status; +} + +/* + * If the journal has been kmalloc'd it needs to be freed after this + * call. + */ +void ocfs2_journal_shutdown(struct ocfs2_super *osb) +{ + struct ocfs2_journal *journal = NULL; + int status = 0; + struct inode *inode = NULL; + int num_running_trans = 0; + + mlog_entry_void(); + + BUG_ON(!osb); + + journal = osb->journal; + if (!journal) + goto done; + + inode = journal->j_inode; + + if (journal->j_state != OCFS2_JOURNAL_LOADED) + goto done; + + /* need to inc inode use count - jbd2_journal_destroy will iput. */ + if (!igrab(inode)) + BUG(); + + num_running_trans = atomic_read(&(osb->journal->j_num_trans)); + if (num_running_trans > 0) + mlog(0, "Shutting down journal: must wait on %d " + "running transactions!\n", + num_running_trans); + + /* Do a commit_cache here. It will flush our journal, *and* + * release any locks that are still held. + * set the SHUTDOWN flag and release the trans lock. + * the commit thread will take the trans lock for us below. */ + journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN; + + /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not + * drop the trans_lock (which we want to hold until we + * completely destroy the journal. */ + if (osb->commit_task) { + /* Wait for the commit thread */ + mlog(0, "Waiting for ocfs2commit to exit....\n"); + kthread_stop(osb->commit_task); + osb->commit_task = NULL; + } + + BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0); + + if (ocfs2_mount_local(osb)) { + jbd2_journal_lock_updates(journal->j_journal); + status = jbd2_journal_flush(journal->j_journal); + jbd2_journal_unlock_updates(journal->j_journal); + if (status < 0) + mlog_errno(status); + } + + if (status == 0) { + /* + * Do not toggle if flush was unsuccessful otherwise + * will leave dirty metadata in a "clean" journal + */ + status = ocfs2_journal_toggle_dirty(osb, 0, 0); + if (status < 0) + mlog_errno(status); + } + + /* Shutdown the kernel journal system */ + jbd2_journal_destroy(journal->j_journal); + journal->j_journal = NULL; + + OCFS2_I(inode)->ip_open_count--; + + /* unlock our journal */ + ocfs2_inode_unlock(inode, 1); + + brelse(journal->j_bh); + journal->j_bh = NULL; + + journal->j_state = OCFS2_JOURNAL_FREE; + +// up_write(&journal->j_trans_barrier); +done: + if (inode) + iput(inode); + mlog_exit_void(); +} + +static void ocfs2_clear_journal_error(struct super_block *sb, + journal_t *journal, + int slot) +{ + int olderr; + + olderr = jbd2_journal_errno(journal); + if (olderr) { + mlog(ML_ERROR, "File system error %d recorded in " + "journal %u.\n", olderr, slot); + mlog(ML_ERROR, "File system on device %s needs checking.\n", + sb->s_id); + + jbd2_journal_ack_err(journal); + jbd2_journal_clear_err(journal); + } +} + +int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) +{ + int status = 0; + struct ocfs2_super *osb; + + mlog_entry_void(); + + BUG_ON(!journal); + + osb = journal->j_osb; + + status = jbd2_journal_load(journal->j_journal); + if (status < 0) { + mlog(ML_ERROR, "Failed to load journal!\n"); + goto done; + } + + ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); + + status = ocfs2_journal_toggle_dirty(osb, 1, replayed); + if (status < 0) { + mlog_errno(status); + goto done; + } + + /* Launch the commit thread */ + if (!local) { + osb->commit_task = kthread_run(ocfs2_commit_thread, osb, + "ocfs2cmt"); + if (IS_ERR(osb->commit_task)) { + status = PTR_ERR(osb->commit_task); + osb->commit_task = NULL; + mlog(ML_ERROR, "unable to launch ocfs2commit thread, " + "error=%d", status); + goto done; + } + } else + osb->commit_task = NULL; + +done: + mlog_exit(status); + return status; +} + + +/* 'full' flag tells us whether we clear out all blocks or if we just + * mark the journal clean */ +int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full) +{ + int status; + + mlog_entry_void(); + + BUG_ON(!journal); + + status = jbd2_journal_wipe(journal->j_journal, full); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_journal_toggle_dirty(journal->j_osb, 0, 0); + if (status < 0) + mlog_errno(status); + +bail: + mlog_exit(status); + return status; +} + +static int ocfs2_recovery_completed(struct ocfs2_super *osb) +{ + int empty; + struct ocfs2_recovery_map *rm = osb->recovery_map; + + spin_lock(&osb->osb_lock); + empty = (rm->rm_used == 0); + spin_unlock(&osb->osb_lock); + + return empty; +} + +void ocfs2_wait_for_recovery(struct ocfs2_super *osb) +{ + wait_event(osb->recovery_event, ocfs2_recovery_completed(osb)); +} + +/* + * JBD Might read a cached version of another nodes journal file. We + * don't want this as this file changes often and we get no + * notification on those changes. The only way to be sure that we've + * got the most up to date version of those blocks then is to force + * read them off disk. Just searching through the buffer cache won't + * work as there may be pages backing this file which are still marked + * up to date. We know things can't change on this file underneath us + * as we have the lock by now :) + */ +static int ocfs2_force_read_journal(struct inode *inode) +{ + int status = 0; + int i; + u64 v_blkno, p_blkno, p_blocks, num_blocks; +#define CONCURRENT_JOURNAL_FILL 32ULL + struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; + + mlog_entry_void(); + + memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); + + num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size); + v_blkno = 0; + while (v_blkno < num_blocks) { + status = ocfs2_extent_map_get_blocks(inode, v_blkno, + &p_blkno, &p_blocks, NULL); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + if (p_blocks > CONCURRENT_JOURNAL_FILL) + p_blocks = CONCURRENT_JOURNAL_FILL; + + /* We are reading journal data which should not + * be put in the uptodate cache */ + status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb), + p_blkno, p_blocks, bhs); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + for(i = 0; i < p_blocks; i++) { + brelse(bhs[i]); + bhs[i] = NULL; + } + + v_blkno += p_blocks; + } + +bail: + for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) + brelse(bhs[i]); + mlog_exit(status); + return status; +} + +struct ocfs2_la_recovery_item { + struct list_head lri_list; + int lri_slot; + struct ocfs2_dinode *lri_la_dinode; + struct ocfs2_dinode *lri_tl_dinode; +}; + +/* Does the second half of the recovery process. By this point, the + * node is marked clean and can actually be considered recovered, + * hence it's no longer in the recovery map, but there's still some + * cleanup we can do which shouldn't happen within the recovery thread + * as locking in that context becomes very difficult if we are to take + * recovering nodes into account. + * + * NOTE: This function can and will sleep on recovery of other nodes + * during cluster locking, just like any other ocfs2 process. + */ +void ocfs2_complete_recovery(struct work_struct *work) +{ + int ret; + struct ocfs2_journal *journal = + container_of(work, struct ocfs2_journal, j_recovery_work); + struct ocfs2_super *osb = journal->j_osb; + struct ocfs2_dinode *la_dinode, *tl_dinode; + struct ocfs2_la_recovery_item *item, *n; + LIST_HEAD(tmp_la_list); + + mlog_entry_void(); + + mlog(0, "completing recovery from keventd\n"); + + spin_lock(&journal->j_lock); + list_splice_init(&journal->j_la_cleanups, &tmp_la_list); + spin_unlock(&journal->j_lock); + + list_for_each_entry_safe(item, n, &tmp_la_list, lri_list) { + list_del_init(&item->lri_list); + + mlog(0, "Complete recovery for slot %d\n", item->lri_slot); + + la_dinode = item->lri_la_dinode; + if (la_dinode) { + mlog(0, "Clean up local alloc %llu\n", + (unsigned long long)le64_to_cpu(la_dinode->i_blkno)); + + ret = ocfs2_complete_local_alloc_recovery(osb, + la_dinode); + if (ret < 0) + mlog_errno(ret); + + kfree(la_dinode); + } + + tl_dinode = item->lri_tl_dinode; + if (tl_dinode) { + mlog(0, "Clean up truncate log %llu\n", + (unsigned long long)le64_to_cpu(tl_dinode->i_blkno)); + + ret = ocfs2_complete_truncate_log_recovery(osb, + tl_dinode); + if (ret < 0) + mlog_errno(ret); + + kfree(tl_dinode); + } + + ret = ocfs2_recover_orphans(osb, item->lri_slot); + if (ret < 0) + mlog_errno(ret); + + kfree(item); + } + + mlog(0, "Recovery completion\n"); + mlog_exit_void(); +} + +/* NOTE: This function always eats your references to la_dinode and + * tl_dinode, either manually on error, or by passing them to + * ocfs2_complete_recovery */ +static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, + int slot_num, + struct ocfs2_dinode *la_dinode, + struct ocfs2_dinode *tl_dinode) +{ + struct ocfs2_la_recovery_item *item; + + item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS); + if (!item) { + /* Though we wish to avoid it, we are in fact safe in + * skipping local alloc cleanup as fsck.ocfs2 is more + * than capable of reclaiming unused space. */ + if (la_dinode) + kfree(la_dinode); + + if (tl_dinode) + kfree(tl_dinode); + + mlog_errno(-ENOMEM); + return; + } + + INIT_LIST_HEAD(&item->lri_list); + item->lri_la_dinode = la_dinode; + item->lri_slot = slot_num; + item->lri_tl_dinode = tl_dinode; + + spin_lock(&journal->j_lock); + list_add_tail(&item->lri_list, &journal->j_la_cleanups); + queue_work(ocfs2_wq, &journal->j_recovery_work); + spin_unlock(&journal->j_lock); +} + +/* Called by the mount code to queue recovery the last part of + * recovery for it's own slot. */ +void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) +{ + struct ocfs2_journal *journal = osb->journal; + + if (osb->dirty) { + /* No need to queue up our truncate_log as regular + * cleanup will catch that. */ + ocfs2_queue_recovery_completion(journal, + osb->slot_num, + osb->local_alloc_copy, + NULL); + ocfs2_schedule_truncate_log_flush(osb, 0); + + osb->local_alloc_copy = NULL; + osb->dirty = 0; + } +} + +static int __ocfs2_recovery_thread(void *arg) +{ + int status, node_num; + struct ocfs2_super *osb = arg; + struct ocfs2_recovery_map *rm = osb->recovery_map; + + mlog_entry_void(); + + status = ocfs2_wait_on_mount(osb); + if (status < 0) { + goto bail; + } + +restart: + status = ocfs2_super_lock(osb, 1); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + spin_lock(&osb->osb_lock); + while (rm->rm_used) { + /* It's always safe to remove entry zero, as we won't + * clear it until ocfs2_recover_node() has succeeded. */ + node_num = rm->rm_entries[0]; + spin_unlock(&osb->osb_lock); + + status = ocfs2_recover_node(osb, node_num); + if (!status) { + ocfs2_recovery_map_clear(osb, node_num); + } else { + mlog(ML_ERROR, + "Error %d recovering node %d on device (%u,%u)!\n", + status, node_num, + MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); + mlog(ML_ERROR, "Volume requires unmount.\n"); + } + + spin_lock(&osb->osb_lock); + } + spin_unlock(&osb->osb_lock); + mlog(0, "All nodes recovered\n"); + + /* Refresh all journal recovery generations from disk */ + status = ocfs2_check_journals_nolocks(osb); + status = (status == -EROFS) ? 0 : status; + if (status < 0) + mlog_errno(status); + + ocfs2_super_unlock(osb, 1); + + /* We always run recovery on our own orphan dir - the dead + * node(s) may have disallowd a previos inode delete. Re-processing + * is therefore required. */ + ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL, + NULL); + +bail: + mutex_lock(&osb->recovery_lock); + if (!status && !ocfs2_recovery_completed(osb)) { + mutex_unlock(&osb->recovery_lock); + goto restart; + } + + osb->recovery_thread_task = NULL; + mb(); /* sync with ocfs2_recovery_thread_running */ + wake_up(&osb->recovery_event); + + mutex_unlock(&osb->recovery_lock); + + mlog_exit(status); + /* no one is callint kthread_stop() for us so the kthread() api + * requires that we call do_exit(). And it isn't exported, but + * complete_and_exit() seems to be a minimal wrapper around it. */ + complete_and_exit(NULL, status); + return status; +} + +void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) +{ + mlog_entry("(node_num=%d, osb->node_num = %d)\n", + node_num, osb->node_num); + + mutex_lock(&osb->recovery_lock); + if (osb->disable_recovery) + goto out; + + /* People waiting on recovery will wait on + * the recovery map to empty. */ + if (ocfs2_recovery_map_set(osb, node_num)) + mlog(0, "node %d already in recovery map.\n", node_num); + + mlog(0, "starting recovery thread...\n"); + + if (osb->recovery_thread_task) + goto out; + + osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb, + "ocfs2rec"); + if (IS_ERR(osb->recovery_thread_task)) { + mlog_errno((int)PTR_ERR(osb->recovery_thread_task)); + osb->recovery_thread_task = NULL; + } + +out: + mutex_unlock(&osb->recovery_lock); + wake_up(&osb->recovery_event); + + mlog_exit_void(); +} + +static int ocfs2_read_journal_inode(struct ocfs2_super *osb, + int slot_num, + struct buffer_head **bh, + struct inode **ret_inode) +{ + int status = -EACCES; + struct inode *inode = NULL; + + BUG_ON(slot_num >= osb->max_slots); + + inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, + slot_num); + if (!inode || is_bad_inode(inode)) { + mlog_errno(status); + goto bail; + } + SET_INODE_JOURNAL(inode); + + status = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, bh, + OCFS2_BH_IGNORE_CACHE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = 0; + +bail: + if (inode) { + if (status || !ret_inode) + iput(inode); + else + *ret_inode = inode; + } + return status; +} + +/* Does the actual journal replay and marks the journal inode as + * clean. Will only replay if the journal inode is marked dirty. */ +static int ocfs2_replay_journal(struct ocfs2_super *osb, + int node_num, + int slot_num) +{ + int status; + int got_lock = 0; + unsigned int flags; + struct inode *inode = NULL; + struct ocfs2_dinode *fe; + journal_t *journal = NULL; + struct buffer_head *bh = NULL; + u32 slot_reco_gen; + + status = ocfs2_read_journal_inode(osb, slot_num, &bh, &inode); + if (status) { + mlog_errno(status); + goto done; + } + + fe = (struct ocfs2_dinode *)bh->b_data; + slot_reco_gen = ocfs2_get_recovery_generation(fe); + brelse(bh); + bh = NULL; + + /* + * As the fs recovery is asynchronous, there is a small chance that + * another node mounted (and recovered) the slot before the recovery + * thread could get the lock. To handle that, we dirty read the journal + * inode for that slot to get the recovery generation. If it is + * different than what we expected, the slot has been recovered. + * If not, it needs recovery. + */ + if (osb->slot_recovery_generations[slot_num] != slot_reco_gen) { + mlog(0, "Slot %u already recovered (old/new=%u/%u)\n", slot_num, + osb->slot_recovery_generations[slot_num], slot_reco_gen); + osb->slot_recovery_generations[slot_num] = slot_reco_gen; + status = -EBUSY; + goto done; + } + + /* Continue with recovery as the journal has not yet been recovered */ + + status = ocfs2_inode_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY); + if (status < 0) { + mlog(0, "status returned from ocfs2_inode_lock=%d\n", status); + if (status != -ERESTARTSYS) + mlog(ML_ERROR, "Could not lock journal!\n"); + goto done; + } + got_lock = 1; + + fe = (struct ocfs2_dinode *) bh->b_data; + + flags = le32_to_cpu(fe->id1.journal1.ij_flags); + slot_reco_gen = ocfs2_get_recovery_generation(fe); + + if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) { + mlog(0, "No recovery required for node %d\n", node_num); + /* Refresh recovery generation for the slot */ + osb->slot_recovery_generations[slot_num] = slot_reco_gen; + goto done; + } + + mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n", + node_num, slot_num, + MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); + + OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); + + status = ocfs2_force_read_journal(inode); + if (status < 0) { + mlog_errno(status); + goto done; + } + + mlog(0, "calling journal_init_inode\n"); + journal = jbd2_journal_init_inode(inode); + if (journal == NULL) { + mlog(ML_ERROR, "Linux journal layer error\n"); + status = -EIO; + goto done; + } + + status = jbd2_journal_load(journal); + if (status < 0) { + mlog_errno(status); + if (!igrab(inode)) + BUG(); + jbd2_journal_destroy(journal); + goto done; + } + + ocfs2_clear_journal_error(osb->sb, journal, slot_num); + + /* wipe the journal */ + mlog(0, "flushing the journal.\n"); + jbd2_journal_lock_updates(journal); + status = jbd2_journal_flush(journal); + jbd2_journal_unlock_updates(journal); + if (status < 0) + mlog_errno(status); + + /* This will mark the node clean */ + flags = le32_to_cpu(fe->id1.journal1.ij_flags); + flags &= ~OCFS2_JOURNAL_DIRTY_FL; + fe->id1.journal1.ij_flags = cpu_to_le32(flags); + + /* Increment recovery generation to indicate successful recovery */ + ocfs2_bump_recovery_generation(fe); + osb->slot_recovery_generations[slot_num] = + ocfs2_get_recovery_generation(fe); + + status = ocfs2_write_block(osb, bh, inode); + if (status < 0) + mlog_errno(status); + + if (!igrab(inode)) + BUG(); + + jbd2_journal_destroy(journal); + +done: + /* drop the lock on this nodes journal */ + if (got_lock) + ocfs2_inode_unlock(inode, 1); + + if (inode) + iput(inode); + + brelse(bh); + + mlog_exit(status); + return status; +} + +/* + * Do the most important parts of node recovery: + * - Replay it's journal + * - Stamp a clean local allocator file + * - Stamp a clean truncate log + * - Mark the node clean + * + * If this function completes without error, a node in OCFS2 can be + * said to have been safely recovered. As a result, failure during the + * second part of a nodes recovery process (local alloc recovery) is + * far less concerning. + */ +static int ocfs2_recover_node(struct ocfs2_super *osb, + int node_num) +{ + int status = 0; + int slot_num; + struct ocfs2_dinode *la_copy = NULL; + struct ocfs2_dinode *tl_copy = NULL; + + mlog_entry("(node_num=%d, osb->node_num = %d)\n", + node_num, osb->node_num); + + mlog(0, "checking node %d\n", node_num); + + /* Should not ever be called to recover ourselves -- in that + * case we should've called ocfs2_journal_load instead. */ + BUG_ON(osb->node_num == node_num); + + slot_num = ocfs2_node_num_to_slot(osb, node_num); + if (slot_num == -ENOENT) { + status = 0; + mlog(0, "no slot for this node, so no recovery required.\n"); + goto done; + } + + mlog(0, "node %d was using slot %d\n", node_num, slot_num); + + status = ocfs2_replay_journal(osb, node_num, slot_num); + if (status < 0) { + if (status == -EBUSY) { + mlog(0, "Skipping recovery for slot %u (node %u) " + "as another node has recovered it\n", slot_num, + node_num); + status = 0; + goto done; + } + mlog_errno(status); + goto done; + } + + /* Stamp a clean local alloc file AFTER recovering the journal... */ + status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy); + if (status < 0) { + mlog_errno(status); + goto done; + } + + /* An error from begin_truncate_log_recovery is not + * serious enough to warrant halting the rest of + * recovery. */ + status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy); + if (status < 0) + mlog_errno(status); + + /* Likewise, this would be a strange but ultimately not so + * harmful place to get an error... */ + status = ocfs2_clear_slot(osb, slot_num); + if (status < 0) + mlog_errno(status); + + /* This will kfree the memory pointed to by la_copy and tl_copy */ + ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy, + tl_copy); + + status = 0; +done: + + mlog_exit(status); + return status; +} + +/* Test node liveness by trylocking his journal. If we get the lock, + * we drop it here. Return 0 if we got the lock, -EAGAIN if node is + * still alive (we couldn't get the lock) and < 0 on error. */ +static int ocfs2_trylock_journal(struct ocfs2_super *osb, + int slot_num) +{ + int status, flags; + struct inode *inode = NULL; + + inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE, + slot_num); + if (inode == NULL) { + mlog(ML_ERROR, "access error\n"); + status = -EACCES; + goto bail; + } + if (is_bad_inode(inode)) { + mlog(ML_ERROR, "access error (bad inode)\n"); + iput(inode); + inode = NULL; + status = -EACCES; + goto bail; + } + SET_INODE_JOURNAL(inode); + + flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE; + status = ocfs2_inode_lock_full(inode, NULL, 1, flags); + if (status < 0) { + if (status != -EAGAIN) + mlog_errno(status); + goto bail; + } + + ocfs2_inode_unlock(inode, 1); +bail: + if (inode) + iput(inode); + + return status; +} + +/* Call this underneath ocfs2_super_lock. It also assumes that the + * slot info struct has been updated from disk. */ +int ocfs2_mark_dead_nodes(struct ocfs2_super *osb) +{ + unsigned int node_num; + int status, i; + u32 gen; + struct buffer_head *bh = NULL; + struct ocfs2_dinode *di; + + /* This is called with the super block cluster lock, so we + * know that the slot map can't change underneath us. */ + + for (i = 0; i < osb->max_slots; i++) { + /* Read journal inode to get the recovery generation */ + status = ocfs2_read_journal_inode(osb, i, &bh, NULL); + if (status) { + mlog_errno(status); + goto bail; + } + di = (struct ocfs2_dinode *)bh->b_data; + gen = ocfs2_get_recovery_generation(di); + brelse(bh); + bh = NULL; + + spin_lock(&osb->osb_lock); + osb->slot_recovery_generations[i] = gen; + + mlog(0, "Slot %u recovery generation is %u\n", i, + osb->slot_recovery_generations[i]); + + if (i == osb->slot_num) { + spin_unlock(&osb->osb_lock); + continue; + } + + status = ocfs2_slot_to_node_num_locked(osb, i, &node_num); + if (status == -ENOENT) { + spin_unlock(&osb->osb_lock); + continue; + } + + if (__ocfs2_recovery_map_test(osb, node_num)) { + spin_unlock(&osb->osb_lock); + continue; + } + spin_unlock(&osb->osb_lock); + + /* Ok, we have a slot occupied by another node which + * is not in the recovery map. We trylock his journal + * file here to test if he's alive. */ + status = ocfs2_trylock_journal(osb, i); + if (!status) { + /* Since we're called from mount, we know that + * the recovery thread can't race us on + * setting / checking the recovery bits. */ + ocfs2_recovery_thread(osb, node_num); + } else if ((status < 0) && (status != -EAGAIN)) { + mlog_errno(status); + goto bail; + } + } + + status = 0; +bail: + mlog_exit(status); + return status; +} + +struct ocfs2_orphan_filldir_priv { + struct inode *head; + struct ocfs2_super *osb; +}; + +static int ocfs2_orphan_filldir(void *priv, const char *name, int name_len, + loff_t pos, u64 ino, unsigned type) +{ + struct ocfs2_orphan_filldir_priv *p = priv; + struct inode *iter; + + if (name_len == 1 && !strncmp(".", name, 1)) + return 0; + if (name_len == 2 && !strncmp("..", name, 2)) + return 0; + + /* Skip bad inodes so that recovery can continue */ + iter = ocfs2_iget(p->osb, ino, + OCFS2_FI_FLAG_ORPHAN_RECOVERY, 0); + if (IS_ERR(iter)) + return 0; + + mlog(0, "queue orphan %llu\n", + (unsigned long long)OCFS2_I(iter)->ip_blkno); + /* No locking is required for the next_orphan queue as there + * is only ever a single process doing orphan recovery. */ + OCFS2_I(iter)->ip_next_orphan = p->head; + p->head = iter; + + return 0; +} + +static int ocfs2_queue_orphans(struct ocfs2_super *osb, + int slot, + struct inode **head) +{ + int status; + struct inode *orphan_dir_inode = NULL; + struct ocfs2_orphan_filldir_priv priv; + loff_t pos = 0; + + priv.osb = osb; + priv.head = *head; + + orphan_dir_inode = ocfs2_get_system_file_inode(osb, + ORPHAN_DIR_SYSTEM_INODE, + slot); + if (!orphan_dir_inode) { + status = -ENOENT; + mlog_errno(status); + return status; + } + + mutex_lock(&orphan_dir_inode->i_mutex); + status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); + if (status < 0) { + mlog_errno(status); + goto out; + } + + status = ocfs2_dir_foreach(orphan_dir_inode, &pos, &priv, + ocfs2_orphan_filldir); + if (status) { + mlog_errno(status); + goto out_cluster; + } + + *head = priv.head; + +out_cluster: + ocfs2_inode_unlock(orphan_dir_inode, 0); +out: + mutex_unlock(&orphan_dir_inode->i_mutex); + iput(orphan_dir_inode); + return status; +} + +static int ocfs2_orphan_recovery_can_continue(struct ocfs2_super *osb, + int slot) +{ + int ret; + + spin_lock(&osb->osb_lock); + ret = !osb->osb_orphan_wipes[slot]; + spin_unlock(&osb->osb_lock); + return ret; +} + +static void ocfs2_mark_recovering_orphan_dir(struct ocfs2_super *osb, + int slot) +{ + spin_lock(&osb->osb_lock); + /* Mark ourselves such that new processes in delete_inode() + * know to quit early. */ + ocfs2_node_map_set_bit(osb, &osb->osb_recovering_orphan_dirs, slot); + while (osb->osb_orphan_wipes[slot]) { + /* If any processes are already in the middle of an + * orphan wipe on this dir, then we need to wait for + * them. */ + spin_unlock(&osb->osb_lock); + wait_event_interruptible(osb->osb_wipe_event, + ocfs2_orphan_recovery_can_continue(osb, slot)); + spin_lock(&osb->osb_lock); + } + spin_unlock(&osb->osb_lock); +} + +static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb, + int slot) +{ + ocfs2_node_map_clear_bit(osb, &osb->osb_recovering_orphan_dirs, slot); +} + +/* + * Orphan recovery. Each mounted node has it's own orphan dir which we + * must run during recovery. Our strategy here is to build a list of + * the inodes in the orphan dir and iget/iput them. The VFS does + * (most) of the rest of the work. + * + * Orphan recovery can happen at any time, not just mount so we have a + * couple of extra considerations. + * + * - We grab as many inodes as we can under the orphan dir lock - + * doing iget() outside the orphan dir risks getting a reference on + * an invalid inode. + * - We must be sure not to deadlock with other processes on the + * system wanting to run delete_inode(). This can happen when they go + * to lock the orphan dir and the orphan recovery process attempts to + * iget() inside the orphan dir lock. This can be avoided by + * advertising our state to ocfs2_delete_inode(). + */ +static int ocfs2_recover_orphans(struct ocfs2_super *osb, + int slot) +{ + int ret = 0; + struct inode *inode = NULL; + struct inode *iter; + struct ocfs2_inode_info *oi; + + mlog(0, "Recover inodes from orphan dir in slot %d\n", slot); + + ocfs2_mark_recovering_orphan_dir(osb, slot); + ret = ocfs2_queue_orphans(osb, slot, &inode); + ocfs2_clear_recovering_orphan_dir(osb, slot); + + /* Error here should be noted, but we want to continue with as + * many queued inodes as we've got. */ + if (ret) + mlog_errno(ret); + + while (inode) { + oi = OCFS2_I(inode); + mlog(0, "iput orphan %llu\n", (unsigned long long)oi->ip_blkno); + + iter = oi->ip_next_orphan; + + spin_lock(&oi->ip_lock); + /* The remote delete code may have set these on the + * assumption that the other node would wipe them + * successfully. If they are still in the node's + * orphan dir, we need to reset that state. */ + oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE); + + /* Set the proper information to get us going into + * ocfs2_delete_inode. */ + oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; + spin_unlock(&oi->ip_lock); + + iput(inode); + + inode = iter; + } + + return ret; +} + +static int ocfs2_wait_on_mount(struct ocfs2_super *osb) +{ + /* This check is good because ocfs2 will wait on our recovery + * thread before changing it to something other than MOUNTED + * or DISABLED. */ + wait_event(osb->osb_mount_event, + atomic_read(&osb->vol_state) == VOLUME_MOUNTED || + atomic_read(&osb->vol_state) == VOLUME_DISABLED); + + /* If there's an error on mount, then we may never get to the + * MOUNTED flag, but this is set right before + * dismount_volume() so we can trust it. */ + if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { + mlog(0, "mount error, exiting!\n"); + return -EBUSY; + } + + return 0; +} + +static int ocfs2_commit_thread(void *arg) +{ + int status; + struct ocfs2_super *osb = arg; + struct ocfs2_journal *journal = osb->journal; + + /* we can trust j_num_trans here because _should_stop() is only set in + * shutdown and nobody other than ourselves should be able to start + * transactions. committing on shutdown might take a few iterations + * as final transactions put deleted inodes on the list */ + while (!(kthread_should_stop() && + atomic_read(&journal->j_num_trans) == 0)) { + + wait_event_interruptible(osb->checkpoint_event, + atomic_read(&journal->j_num_trans) + || kthread_should_stop()); + + status = ocfs2_commit_cache(osb); + if (status < 0) + mlog_errno(status); + + if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){ + mlog(ML_KTHREAD, + "commit_thread: %u transactions pending on " + "shutdown\n", + atomic_read(&journal->j_num_trans)); + } + } + + return 0; +} + +/* Reads all the journal inodes without taking any cluster locks. Used + * for hard readonly access to determine whether any journal requires + * recovery. Also used to refresh the recovery generation numbers after + * a journal has been recovered by another node. + */ +int ocfs2_check_journals_nolocks(struct ocfs2_super *osb) +{ + int ret = 0; + unsigned int slot; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di; + int journal_dirty = 0; + + for(slot = 0; slot < osb->max_slots; slot++) { + ret = ocfs2_read_journal_inode(osb, slot, &di_bh, NULL); + if (ret) { + mlog_errno(ret); + goto out; + } + + di = (struct ocfs2_dinode *) di_bh->b_data; + + osb->slot_recovery_generations[slot] = + ocfs2_get_recovery_generation(di); + + if (le32_to_cpu(di->id1.journal1.ij_flags) & + OCFS2_JOURNAL_DIRTY_FL) + journal_dirty = 1; + + brelse(di_bh); + di_bh = NULL; + } + +out: + if (journal_dirty) + ret = -EROFS; + return ret; +} diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h new file mode 100644 index 0000000..3657926 --- /dev/null +++ b/fs/ocfs2/journal.h @@ -0,0 +1,454 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * journal.h + * + * Defines journalling api and structures. + * + * Copyright (C) 2003, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_JOURNAL_H +#define OCFS2_JOURNAL_H + +#include <linux/fs.h> +#ifndef CONFIG_OCFS2_COMPAT_JBD +# include <linux/jbd2.h> +#else +# include <linux/jbd.h> +# include "ocfs2_jbd_compat.h" +#endif + +enum ocfs2_journal_state { + OCFS2_JOURNAL_FREE = 0, + OCFS2_JOURNAL_LOADED, + OCFS2_JOURNAL_IN_SHUTDOWN, +}; + +struct ocfs2_super; +struct ocfs2_dinode; + +struct ocfs2_journal { + enum ocfs2_journal_state j_state; /* Journals current state */ + + journal_t *j_journal; /* The kernels journal type */ + struct inode *j_inode; /* Kernel inode pointing to + * this journal */ + struct ocfs2_super *j_osb; /* pointer to the super + * block for the node + * we're currently + * running on -- not + * necessarily the super + * block from the node + * which we usually run + * from (recovery, + * etc) */ + struct buffer_head *j_bh; /* Journal disk inode block */ + atomic_t j_num_trans; /* Number of transactions + * currently in the system. */ + unsigned long j_trans_id; + struct rw_semaphore j_trans_barrier; + wait_queue_head_t j_checkpointed; + + spinlock_t j_lock; + struct list_head j_la_cleanups; + struct work_struct j_recovery_work; +}; + +extern spinlock_t trans_inc_lock; + +/* wrap j_trans_id so we never have it equal to zero. */ +static inline unsigned long ocfs2_inc_trans_id(struct ocfs2_journal *j) +{ + unsigned long old_id; + spin_lock(&trans_inc_lock); + old_id = j->j_trans_id++; + if (unlikely(!j->j_trans_id)) + j->j_trans_id = 1; + spin_unlock(&trans_inc_lock); + return old_id; +} + +static inline void ocfs2_set_inode_lock_trans(struct ocfs2_journal *journal, + struct inode *inode) +{ + spin_lock(&trans_inc_lock); + OCFS2_I(inode)->ip_last_trans = journal->j_trans_id; + spin_unlock(&trans_inc_lock); +} + +/* Used to figure out whether it's safe to drop a metadata lock on an + * inode. Returns true if all the inodes changes have been + * checkpointed to disk. You should be holding the spinlock on the + * metadata lock while calling this to be sure that nobody can take + * the lock and put it on another transaction. */ +static inline int ocfs2_inode_fully_checkpointed(struct inode *inode) +{ + int ret; + struct ocfs2_journal *journal = OCFS2_SB(inode->i_sb)->journal; + + spin_lock(&trans_inc_lock); + ret = time_after(journal->j_trans_id, OCFS2_I(inode)->ip_last_trans); + spin_unlock(&trans_inc_lock); + return ret; +} + +/* convenience function to check if an inode is still new (has never + * hit disk) Will do you a favor and set created_trans = 0 when you've + * been checkpointed. returns '1' if the inode is still new. */ +static inline int ocfs2_inode_is_new(struct inode *inode) +{ + int ret; + + /* System files are never "new" as they're written out by + * mkfs. This helps us early during mount, before we have the + * journal open and j_trans_id could be junk. */ + if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) + return 0; + spin_lock(&trans_inc_lock); + ret = !(time_after(OCFS2_SB(inode->i_sb)->journal->j_trans_id, + OCFS2_I(inode)->ip_created_trans)); + if (!ret) + OCFS2_I(inode)->ip_created_trans = 0; + spin_unlock(&trans_inc_lock); + return ret; +} + +static inline void ocfs2_inode_set_new(struct ocfs2_super *osb, + struct inode *inode) +{ + spin_lock(&trans_inc_lock); + OCFS2_I(inode)->ip_created_trans = osb->journal->j_trans_id; + spin_unlock(&trans_inc_lock); +} + +/* Exported only for the journal struct init code in super.c. Do not call. */ +void ocfs2_complete_recovery(struct work_struct *work); +void ocfs2_wait_for_recovery(struct ocfs2_super *osb); + +int ocfs2_recovery_init(struct ocfs2_super *osb); +void ocfs2_recovery_exit(struct ocfs2_super *osb); + +/* + * Journal Control: + * Initialize, Load, Shutdown, Wipe a journal. + * + * ocfs2_journal_init - Initialize journal structures in the OSB. + * ocfs2_journal_load - Load the given journal off disk. Replay it if + * there's transactions still in there. + * ocfs2_journal_shutdown - Shutdown a journal, this will flush all + * uncommitted, uncheckpointed transactions. + * ocfs2_journal_wipe - Wipe transactions from a journal. Optionally + * zero out each block. + * ocfs2_recovery_thread - Perform recovery on a node. osb is our own osb. + * ocfs2_mark_dead_nodes - Start recovery on nodes we won't get a heartbeat + * event on. + * ocfs2_start_checkpoint - Kick the commit thread to do a checkpoint. + */ +void ocfs2_set_journal_params(struct ocfs2_super *osb); +int ocfs2_journal_init(struct ocfs2_journal *journal, + int *dirty); +void ocfs2_journal_shutdown(struct ocfs2_super *osb); +int ocfs2_journal_wipe(struct ocfs2_journal *journal, + int full); +int ocfs2_journal_load(struct ocfs2_journal *journal, int local, + int replayed); +int ocfs2_check_journals_nolocks(struct ocfs2_super *osb); +void ocfs2_recovery_thread(struct ocfs2_super *osb, + int node_num); +int ocfs2_mark_dead_nodes(struct ocfs2_super *osb); +void ocfs2_complete_mount_recovery(struct ocfs2_super *osb); + +static inline void ocfs2_start_checkpoint(struct ocfs2_super *osb) +{ + atomic_set(&osb->needs_checkpoint, 1); + wake_up(&osb->checkpoint_event); +} + +static inline void ocfs2_checkpoint_inode(struct inode *inode) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (ocfs2_mount_local(osb)) + return; + + if (!ocfs2_inode_fully_checkpointed(inode)) { + /* WARNING: This only kicks off a single + * checkpoint. If someone races you and adds more + * metadata to the journal, you won't know, and will + * wind up waiting *alot* longer than necessary. Right + * now we only use this in clear_inode so that's + * OK. */ + ocfs2_start_checkpoint(osb); + + wait_event(osb->journal->j_checkpointed, + ocfs2_inode_fully_checkpointed(inode)); + } +} + +/* + * Transaction Handling: + * Manage the lifetime of a transaction handle. + * + * ocfs2_start_trans - Begin a transaction. Give it an upper estimate of + * the number of blocks that will be changed during + * this handle. + * ocfs2_commit_trans - Complete a handle. It might return -EIO if + * the journal was aborted. The majority of paths don't + * check the return value as an error there comes too + * late to do anything (and will be picked up in a + * later transaction). + * ocfs2_extend_trans - Extend a handle by nblocks credits. This may + * commit the handle to disk in the process, but will + * not release any locks taken during the transaction. + * ocfs2_journal_access - Notify the handle that we want to journal this + * buffer. Will have to call ocfs2_journal_dirty once + * we've actually dirtied it. Type is one of . or . + * ocfs2_journal_dirty - Mark a journalled buffer as having dirty data. + * ocfs2_jbd2_file_inode - Mark an inode so that its data goes out before + * the current handle commits. + */ + +/* You must always start_trans with a number of buffs > 0, but it's + * perfectly legal to go through an entire transaction without having + * dirtied any buffers. */ +handle_t *ocfs2_start_trans(struct ocfs2_super *osb, + int max_buffs); +int ocfs2_commit_trans(struct ocfs2_super *osb, + handle_t *handle); +int ocfs2_extend_trans(handle_t *handle, int nblocks); + +/* + * Create access is for when we get a newly created buffer and we're + * not gonna read it off disk, but rather fill it ourselves. Right + * now, we don't do anything special with this (it turns into a write + * request), but this is a good placeholder in case we do... + * + * Write access is for when we read a block off disk and are going to + * modify it. This way the journalling layer knows it may need to make + * a copy of that block (if it's part of another, uncommitted + * transaction) before we do so. + */ +#define OCFS2_JOURNAL_ACCESS_CREATE 0 +#define OCFS2_JOURNAL_ACCESS_WRITE 1 +#define OCFS2_JOURNAL_ACCESS_UNDO 2 + +int ocfs2_journal_access(handle_t *handle, + struct inode *inode, + struct buffer_head *bh, + int type); +/* + * A word about the journal_access/journal_dirty "dance". It is + * entirely legal to journal_access a buffer more than once (as long + * as the access type is the same -- I'm not sure what will happen if + * access type is different but this should never happen anyway) It is + * also legal to journal_dirty a buffer more than once. In fact, you + * can even journal_access a buffer after you've done a + * journal_access/journal_dirty pair. The only thing you cannot do + * however, is journal_dirty a buffer which you haven't yet passed to + * journal_access at least once. + * + * That said, 99% of the time this doesn't matter and this is what the + * path looks like: + * + * <read a bh> + * ocfs2_journal_access(handle, bh, OCFS2_JOURNAL_ACCESS_WRITE); + * <modify the bh> + * ocfs2_journal_dirty(handle, bh); + */ +int ocfs2_journal_dirty(handle_t *handle, + struct buffer_head *bh); +#ifdef CONFIG_OCFS2_COMPAT_JBD +int ocfs2_journal_dirty_data(handle_t *handle, + struct buffer_head *bh); +#endif + +/* + * Credit Macros: + * Convenience macros to calculate number of credits needed. + * + * For convenience sake, I have a set of macros here which calculate + * the *maximum* number of sectors which will be changed for various + * metadata updates. + */ + +/* simple file updates like chmod, etc. */ +#define OCFS2_INODE_UPDATE_CREDITS 1 + +/* extended attribute block update */ +#define OCFS2_XATTR_BLOCK_UPDATE_CREDITS 1 + +/* group extend. inode update and last group update. */ +#define OCFS2_GROUP_EXTEND_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) + +/* group add. inode update and the new group update. */ +#define OCFS2_GROUP_ADD_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) + +/* get one bit out of a suballocator: dinode + group descriptor + + * prev. group desc. if we relink. */ +#define OCFS2_SUBALLOC_ALLOC (3) + +#define OCFS2_INLINE_TO_EXTENTS_CREDITS (OCFS2_SUBALLOC_ALLOC \ + + OCFS2_INODE_UPDATE_CREDITS) + +/* dinode + group descriptor update. We don't relink on free yet. */ +#define OCFS2_SUBALLOC_FREE (2) + +#define OCFS2_TRUNCATE_LOG_UPDATE OCFS2_INODE_UPDATE_CREDITS +#define OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC (OCFS2_SUBALLOC_FREE \ + + OCFS2_TRUNCATE_LOG_UPDATE) + +#define OCFS2_REMOVE_EXTENT_CREDITS (OCFS2_TRUNCATE_LOG_UPDATE + OCFS2_INODE_UPDATE_CREDITS) + +/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe + + * bitmap block for the new bit) */ +#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2) + +/* parent fe, parent block, new file entry, inode alloc fe, inode alloc + * group descriptor + mkdir/symlink blocks */ +#define OCFS2_MKNOD_CREDITS (3 + OCFS2_SUBALLOC_ALLOC \ + + OCFS2_DIR_LINK_ADDITIONAL_CREDITS) + +/* local alloc metadata change + main bitmap updates */ +#define OCFS2_WINDOW_MOVE_CREDITS (OCFS2_INODE_UPDATE_CREDITS \ + + OCFS2_SUBALLOC_ALLOC + OCFS2_SUBALLOC_FREE) + +/* used when we don't need an allocation change for a dir extend. One + * for the dinode, one for the new block. */ +#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) + +/* file update (nlink, etc) + directory mtime/ctime + dir entry block */ +#define OCFS2_LINK_CREDITS (2*OCFS2_INODE_UPDATE_CREDITS + 1) + +/* inode + dir inode (if we unlink a dir), + dir entry block + orphan + * dir inode link */ +#define OCFS2_UNLINK_CREDITS (2 * OCFS2_INODE_UPDATE_CREDITS + 1 \ + + OCFS2_LINK_CREDITS) + +/* dinode + orphan dir dinode + inode alloc dinode + orphan dir entry + + * inode alloc group descriptor */ +#define OCFS2_DELETE_INODE_CREDITS (3 * OCFS2_INODE_UPDATE_CREDITS + 1 + 1) + +/* dinode update, old dir dinode update, new dir dinode update, old + * dir dir entry, new dir dir entry, dir entry update for renaming + * directory + target unlink */ +#define OCFS2_RENAME_CREDITS (3 * OCFS2_INODE_UPDATE_CREDITS + 3 \ + + OCFS2_UNLINK_CREDITS) + +/* global bitmap dinode, group desc., relinked group, + * suballocator dinode, group desc., relinked group, + * dinode, xattr block */ +#define OCFS2_XATTR_BLOCK_CREATE_CREDITS (OCFS2_SUBALLOC_ALLOC * 2 + \ + + OCFS2_INODE_UPDATE_CREDITS \ + + OCFS2_XATTR_BLOCK_UPDATE_CREDITS) + +/* + * Please note that the caller must make sure that root_el is the root + * of extent tree. So for an inode, it should be &fe->id2.i_list. Otherwise + * the result may be wrong. + */ +static inline int ocfs2_calc_extend_credits(struct super_block *sb, + struct ocfs2_extent_list *root_el, + u32 bits_wanted) +{ + int bitmap_blocks, sysfile_bitmap_blocks, extent_blocks; + + /* bitmap dinode, group desc. + relinked group. */ + bitmap_blocks = OCFS2_SUBALLOC_ALLOC; + + /* we might need to shift tree depth so lets assume an + * absolute worst case of complete fragmentation. Even with + * that, we only need one update for the dinode, and then + * however many metadata chunks needed * a remaining suballoc + * alloc. */ + sysfile_bitmap_blocks = 1 + + (OCFS2_SUBALLOC_ALLOC - 1) * ocfs2_extend_meta_needed(root_el); + + /* this does not include *new* metadata blocks, which are + * accounted for in sysfile_bitmap_blocks. root_el + + * prev. last_eb_blk + blocks along edge of tree. + * calc_symlink_credits passes because we just need 1 + * credit for the dinode there. */ + extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); + + return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks; +} + +static inline int ocfs2_calc_symlink_credits(struct super_block *sb) +{ + int blocks = OCFS2_MKNOD_CREDITS; + + /* links can be longer than one block so we may update many + * within our single allocated extent. */ + blocks += ocfs2_clusters_to_blocks(sb, 1); + + return blocks; +} + +static inline int ocfs2_calc_group_alloc_credits(struct super_block *sb, + unsigned int cpg) +{ + int blocks; + int bitmap_blocks = OCFS2_SUBALLOC_ALLOC + 1; + /* parent inode update + new block group header + bitmap inode update + + bitmap blocks affected */ + blocks = 1 + 1 + 1 + bitmap_blocks; + return blocks; +} + +static inline int ocfs2_calc_tree_trunc_credits(struct super_block *sb, + unsigned int clusters_to_del, + struct ocfs2_dinode *fe, + struct ocfs2_extent_list *last_el) +{ + /* for dinode + all headers in this pass + update to next leaf */ + u16 next_free = le16_to_cpu(last_el->l_next_free_rec); + u16 tree_depth = le16_to_cpu(fe->id2.i_list.l_tree_depth); + int credits = 1 + tree_depth + 1; + int i; + + i = next_free - 1; + BUG_ON(i < 0); + + /* We may be deleting metadata blocks, so metadata alloc dinode + + one desc. block for each possible delete. */ + if (tree_depth && next_free == 1 && + ocfs2_rec_clusters(last_el, &last_el->l_recs[i]) == clusters_to_del) + credits += 1 + tree_depth; + + /* update to the truncate log. */ + credits += OCFS2_TRUNCATE_LOG_UPDATE; + + return credits; +} + +static inline int ocfs2_jbd2_file_inode(handle_t *handle, struct inode *inode) +{ + return jbd2_journal_file_inode(handle, &OCFS2_I(inode)->ip_jinode); +} + +static inline int ocfs2_begin_ordered_truncate(struct inode *inode, + loff_t new_size) +{ + return jbd2_journal_begin_ordered_truncate( + OCFS2_SB(inode->i_sb)->journal->j_journal, + &OCFS2_I(inode)->ip_jinode, + new_size); +} + +#endif /* OCFS2_JOURNAL_H */ diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c new file mode 100644 index 0000000..687b287 --- /dev/null +++ b/fs/ocfs2/localalloc.c @@ -0,0 +1,1294 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * localalloc.c + * + * Node local data allocation + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/bitops.h> +#include <linux/debugfs.h> + +#define MLOG_MASK_PREFIX ML_DISK_ALLOC +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dlmglue.h" +#include "inode.h" +#include "journal.h" +#include "localalloc.h" +#include "suballoc.h" +#include "super.h" +#include "sysfile.h" + +#include "buffer_head_io.h" + +#define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab)) + +static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc); + +static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, + struct ocfs2_dinode *alloc, + u32 numbits); + +static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc); + +static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_dinode *alloc, + struct inode *main_bm_inode, + struct buffer_head *main_bm_bh); + +static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, + struct ocfs2_alloc_context **ac, + struct inode **bitmap_inode, + struct buffer_head **bitmap_bh); + +static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac); + +static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, + struct inode *local_alloc_inode); + +#ifdef CONFIG_OCFS2_FS_STATS + +static int ocfs2_la_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define LA_DEBUG_BUF_SZ PAGE_CACHE_SIZE +#define LA_DEBUG_VER 1 +static ssize_t ocfs2_la_debug_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + static DEFINE_MUTEX(la_debug_mutex); + struct ocfs2_super *osb = file->private_data; + int written, ret; + char *buf = osb->local_alloc_debug_buf; + + mutex_lock(&la_debug_mutex); + memset(buf, 0, LA_DEBUG_BUF_SZ); + + written = snprintf(buf, LA_DEBUG_BUF_SZ, + "0x%x\t0x%llx\t%u\t%u\t0x%x\n", + LA_DEBUG_VER, + (unsigned long long)osb->la_last_gd, + osb->local_alloc_default_bits, + osb->local_alloc_bits, osb->local_alloc_state); + + ret = simple_read_from_buffer(userbuf, count, ppos, buf, written); + + mutex_unlock(&la_debug_mutex); + return ret; +} + +static const struct file_operations ocfs2_la_debug_fops = { + .open = ocfs2_la_debug_open, + .read = ocfs2_la_debug_read, +}; + +static void ocfs2_init_la_debug(struct ocfs2_super *osb) +{ + osb->local_alloc_debug_buf = kmalloc(LA_DEBUG_BUF_SZ, GFP_NOFS); + if (!osb->local_alloc_debug_buf) + return; + + osb->local_alloc_debug = debugfs_create_file("local_alloc_stats", + S_IFREG|S_IRUSR, + osb->osb_debug_root, + osb, + &ocfs2_la_debug_fops); + if (!osb->local_alloc_debug) { + kfree(osb->local_alloc_debug_buf); + osb->local_alloc_debug_buf = NULL; + } +} + +static void ocfs2_shutdown_la_debug(struct ocfs2_super *osb) +{ + if (osb->local_alloc_debug) + debugfs_remove(osb->local_alloc_debug); + + if (osb->local_alloc_debug_buf) + kfree(osb->local_alloc_debug_buf); + + osb->local_alloc_debug_buf = NULL; + osb->local_alloc_debug = NULL; +} +#else /* CONFIG_OCFS2_FS_STATS */ +static void ocfs2_init_la_debug(struct ocfs2_super *osb) +{ + return; +} +static void ocfs2_shutdown_la_debug(struct ocfs2_super *osb) +{ + return; +} +#endif + +static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb) +{ + return (osb->local_alloc_state == OCFS2_LA_THROTTLED || + osb->local_alloc_state == OCFS2_LA_ENABLED); +} + +void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb, + unsigned int num_clusters) +{ + spin_lock(&osb->osb_lock); + if (osb->local_alloc_state == OCFS2_LA_DISABLED || + osb->local_alloc_state == OCFS2_LA_THROTTLED) + if (num_clusters >= osb->local_alloc_default_bits) { + cancel_delayed_work(&osb->la_enable_wq); + osb->local_alloc_state = OCFS2_LA_ENABLED; + } + spin_unlock(&osb->osb_lock); +} + +void ocfs2_la_enable_worker(struct work_struct *work) +{ + struct ocfs2_super *osb = + container_of(work, struct ocfs2_super, + la_enable_wq.work); + spin_lock(&osb->osb_lock); + osb->local_alloc_state = OCFS2_LA_ENABLED; + spin_unlock(&osb->osb_lock); +} + +/* + * Tell us whether a given allocation should use the local alloc + * file. Otherwise, it has to go to the main bitmap. + * + * This function does semi-dirty reads of local alloc size and state! + * This is ok however, as the values are re-checked once under mutex. + */ +int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits) +{ + int ret = 0; + int la_bits; + + spin_lock(&osb->osb_lock); + la_bits = osb->local_alloc_bits; + + if (!ocfs2_la_state_enabled(osb)) + goto bail; + + /* la_bits should be at least twice the size (in clusters) of + * a new block group. We want to be sure block group + * allocations go through the local alloc, so allow an + * allocation to take up to half the bitmap. */ + if (bits > (la_bits / 2)) + goto bail; + + ret = 1; +bail: + mlog(0, "state=%d, bits=%llu, la_bits=%d, ret=%d\n", + osb->local_alloc_state, (unsigned long long)bits, la_bits, ret); + spin_unlock(&osb->osb_lock); + return ret; +} + +int ocfs2_load_local_alloc(struct ocfs2_super *osb) +{ + int status = 0; + struct ocfs2_dinode *alloc = NULL; + struct buffer_head *alloc_bh = NULL; + u32 num_used; + struct inode *inode = NULL; + struct ocfs2_local_alloc *la; + + mlog_entry_void(); + + ocfs2_init_la_debug(osb); + + if (osb->local_alloc_bits == 0) + goto bail; + + if (osb->local_alloc_bits >= osb->bitmap_cpg) { + mlog(ML_NOTICE, "Requested local alloc window %d is larger " + "than max possible %u. Using defaults.\n", + osb->local_alloc_bits, (osb->bitmap_cpg - 1)); + osb->local_alloc_bits = + ocfs2_megabytes_to_clusters(osb->sb, + OCFS2_DEFAULT_LOCAL_ALLOC_SIZE); + } + + /* read the alloc off disk */ + inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE, + osb->slot_num); + if (!inode) { + status = -EINVAL; + mlog_errno(status); + goto bail; + } + + status = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, + &alloc_bh, OCFS2_BH_IGNORE_CACHE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + alloc = (struct ocfs2_dinode *) alloc_bh->b_data; + la = OCFS2_LOCAL_ALLOC(alloc); + + if (!(le32_to_cpu(alloc->i_flags) & + (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) { + mlog(ML_ERROR, "Invalid local alloc inode, %llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + status = -EINVAL; + goto bail; + } + + if ((la->la_size == 0) || + (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) { + mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n", + le16_to_cpu(la->la_size)); + status = -EINVAL; + goto bail; + } + + /* do a little verification. */ + num_used = ocfs2_local_alloc_count_bits(alloc); + + /* hopefully the local alloc has always been recovered before + * we load it. */ + if (num_used + || alloc->id1.bitmap1.i_used + || alloc->id1.bitmap1.i_total + || la->la_bm_off) + mlog(ML_ERROR, "Local alloc hasn't been recovered!\n" + "found = %u, set = %u, taken = %u, off = %u\n", + num_used, le32_to_cpu(alloc->id1.bitmap1.i_used), + le32_to_cpu(alloc->id1.bitmap1.i_total), + OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); + + osb->local_alloc_bh = alloc_bh; + osb->local_alloc_state = OCFS2_LA_ENABLED; + +bail: + if (status < 0) + brelse(alloc_bh); + if (inode) + iput(inode); + + if (status < 0) + ocfs2_shutdown_la_debug(osb); + + mlog(0, "Local alloc window bits = %d\n", osb->local_alloc_bits); + + mlog_exit(status); + return status; +} + +/* + * return any unused bits to the bitmap and write out a clean + * local_alloc. + * + * local_alloc_bh is optional. If not passed, we will simply use the + * one off osb. If you do pass it however, be warned that it *will* be + * returned brelse'd and NULL'd out.*/ +void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) +{ + int status; + handle_t *handle; + struct inode *local_alloc_inode = NULL; + struct buffer_head *bh = NULL; + struct buffer_head *main_bm_bh = NULL; + struct inode *main_bm_inode = NULL; + struct ocfs2_dinode *alloc_copy = NULL; + struct ocfs2_dinode *alloc = NULL; + + mlog_entry_void(); + + cancel_delayed_work(&osb->la_enable_wq); + flush_workqueue(ocfs2_wq); + + ocfs2_shutdown_la_debug(osb); + + if (osb->local_alloc_state == OCFS2_LA_UNUSED) + goto out; + + local_alloc_inode = + ocfs2_get_system_file_inode(osb, + LOCAL_ALLOC_SYSTEM_INODE, + osb->slot_num); + if (!local_alloc_inode) { + status = -ENOENT; + mlog_errno(status); + goto out; + } + + osb->local_alloc_state = OCFS2_LA_DISABLED; + + main_bm_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_BITMAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT); + if (!main_bm_inode) { + status = -EINVAL; + mlog_errno(status); + goto out; + } + + mutex_lock(&main_bm_inode->i_mutex); + + status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); + if (status < 0) { + mlog_errno(status); + goto out_mutex; + } + + /* WINDOW_MOVE_CREDITS is a bit heavy... */ + handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); + if (IS_ERR(handle)) { + mlog_errno(PTR_ERR(handle)); + handle = NULL; + goto out_unlock; + } + + bh = osb->local_alloc_bh; + alloc = (struct ocfs2_dinode *) bh->b_data; + + alloc_copy = kmalloc(bh->b_size, GFP_NOFS); + if (!alloc_copy) { + status = -ENOMEM; + goto out_commit; + } + memcpy(alloc_copy, alloc, bh->b_size); + + status = ocfs2_journal_access(handle, local_alloc_inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_commit; + } + + ocfs2_clear_local_alloc(alloc); + + status = ocfs2_journal_dirty(handle, bh); + if (status < 0) { + mlog_errno(status); + goto out_commit; + } + + brelse(bh); + osb->local_alloc_bh = NULL; + osb->local_alloc_state = OCFS2_LA_UNUSED; + + status = ocfs2_sync_local_to_main(osb, handle, alloc_copy, + main_bm_inode, main_bm_bh); + if (status < 0) + mlog_errno(status); + +out_commit: + ocfs2_commit_trans(osb, handle); + +out_unlock: + brelse(main_bm_bh); + + ocfs2_inode_unlock(main_bm_inode, 1); + +out_mutex: + mutex_unlock(&main_bm_inode->i_mutex); + iput(main_bm_inode); + +out: + if (local_alloc_inode) + iput(local_alloc_inode); + + if (alloc_copy) + kfree(alloc_copy); + + mlog_exit_void(); +} + +/* + * We want to free the bitmap bits outside of any recovery context as + * we'll need a cluster lock to do so, but we must clear the local + * alloc before giving up the recovered nodes journal. To solve this, + * we kmalloc a copy of the local alloc before it's change for the + * caller to process with ocfs2_complete_local_alloc_recovery + */ +int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb, + int slot_num, + struct ocfs2_dinode **alloc_copy) +{ + int status = 0; + struct buffer_head *alloc_bh = NULL; + struct inode *inode = NULL; + struct ocfs2_dinode *alloc; + + mlog_entry("(slot_num = %d)\n", slot_num); + + *alloc_copy = NULL; + + inode = ocfs2_get_system_file_inode(osb, + LOCAL_ALLOC_SYSTEM_INODE, + slot_num); + if (!inode) { + status = -EINVAL; + mlog_errno(status); + goto bail; + } + + mutex_lock(&inode->i_mutex); + + status = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, + &alloc_bh, OCFS2_BH_IGNORE_CACHE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + *alloc_copy = kmalloc(alloc_bh->b_size, GFP_KERNEL); + if (!(*alloc_copy)) { + status = -ENOMEM; + goto bail; + } + memcpy((*alloc_copy), alloc_bh->b_data, alloc_bh->b_size); + + alloc = (struct ocfs2_dinode *) alloc_bh->b_data; + ocfs2_clear_local_alloc(alloc); + + status = ocfs2_write_block(osb, alloc_bh, inode); + if (status < 0) + mlog_errno(status); + +bail: + if ((status < 0) && (*alloc_copy)) { + kfree(*alloc_copy); + *alloc_copy = NULL; + } + + brelse(alloc_bh); + + if (inode) { + mutex_unlock(&inode->i_mutex); + iput(inode); + } + + mlog_exit(status); + return status; +} + +/* + * Step 2: By now, we've completed the journal recovery, we've stamped + * a clean local alloc on disk and dropped the node out of the + * recovery map. Dlm locks will no longer stall, so lets clear out the + * main bitmap. + */ +int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb, + struct ocfs2_dinode *alloc) +{ + int status; + handle_t *handle; + struct buffer_head *main_bm_bh = NULL; + struct inode *main_bm_inode; + + mlog_entry_void(); + + main_bm_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_BITMAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT); + if (!main_bm_inode) { + status = -EINVAL; + mlog_errno(status); + goto out; + } + + mutex_lock(&main_bm_inode->i_mutex); + + status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); + if (status < 0) { + mlog_errno(status); + goto out_mutex; + } + + handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto out_unlock; + } + + /* we want the bitmap change to be recorded on disk asap */ + handle->h_sync = 1; + + status = ocfs2_sync_local_to_main(osb, handle, alloc, + main_bm_inode, main_bm_bh); + if (status < 0) + mlog_errno(status); + + ocfs2_commit_trans(osb, handle); + +out_unlock: + ocfs2_inode_unlock(main_bm_inode, 1); + +out_mutex: + mutex_unlock(&main_bm_inode->i_mutex); + + brelse(main_bm_bh); + + iput(main_bm_inode); + +out: + if (!status) + ocfs2_init_inode_steal_slot(osb); + mlog_exit(status); + return status; +} + +/* Check to see if the local alloc window is within ac->ac_max_block */ +static int ocfs2_local_alloc_in_range(struct inode *inode, + struct ocfs2_alloc_context *ac, + u32 bits_wanted) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_dinode *alloc; + struct ocfs2_local_alloc *la; + int start; + u64 block_off; + + if (!ac->ac_max_block) + return 1; + + alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; + la = OCFS2_LOCAL_ALLOC(alloc); + + start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted); + if (start == -1) { + mlog_errno(-ENOSPC); + return 0; + } + + /* + * Converting (bm_off + start + bits_wanted) to blocks gives us + * the blkno just past our actual allocation. This is perfect + * to compare with ac_max_block. + */ + block_off = ocfs2_clusters_to_blocks(inode->i_sb, + le32_to_cpu(la->la_bm_off) + + start + bits_wanted); + mlog(0, "Checking %llu against %llu\n", + (unsigned long long)block_off, + (unsigned long long)ac->ac_max_block); + if (block_off > ac->ac_max_block) + return 0; + + return 1; +} + +/* + * make sure we've got at least bits_wanted contiguous bits in the + * local alloc. You lose them when you drop i_mutex. + * + * We will add ourselves to the transaction passed in, but may start + * our own in order to shift windows. + */ +int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, + u32 bits_wanted, + struct ocfs2_alloc_context *ac) +{ + int status; + struct ocfs2_dinode *alloc; + struct inode *local_alloc_inode; + unsigned int free_bits; + + mlog_entry_void(); + + BUG_ON(!ac); + + local_alloc_inode = + ocfs2_get_system_file_inode(osb, + LOCAL_ALLOC_SYSTEM_INODE, + osb->slot_num); + if (!local_alloc_inode) { + status = -ENOENT; + mlog_errno(status); + goto bail; + } + + mutex_lock(&local_alloc_inode->i_mutex); + + /* + * We must double check state and allocator bits because + * another process may have changed them while holding i_mutex. + */ + spin_lock(&osb->osb_lock); + if (!ocfs2_la_state_enabled(osb) || + (bits_wanted > osb->local_alloc_bits)) { + spin_unlock(&osb->osb_lock); + status = -ENOSPC; + goto bail; + } + spin_unlock(&osb->osb_lock); + + alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; + +#ifdef CONFIG_OCFS2_DEBUG_FS + if (le32_to_cpu(alloc->id1.bitmap1.i_used) != + ocfs2_local_alloc_count_bits(alloc)) { + ocfs2_error(osb->sb, "local alloc inode %llu says it has " + "%u free bits, but a count shows %u", + (unsigned long long)le64_to_cpu(alloc->i_blkno), + le32_to_cpu(alloc->id1.bitmap1.i_used), + ocfs2_local_alloc_count_bits(alloc)); + status = -EIO; + goto bail; + } +#endif + + free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) - + le32_to_cpu(alloc->id1.bitmap1.i_used); + if (bits_wanted > free_bits) { + /* uhoh, window change time. */ + status = + ocfs2_local_alloc_slide_window(osb, local_alloc_inode); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + /* + * Under certain conditions, the window slide code + * might have reduced the number of bits available or + * disabled the the local alloc entirely. Re-check + * here and return -ENOSPC if necessary. + */ + status = -ENOSPC; + if (!ocfs2_la_state_enabled(osb)) + goto bail; + + free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) - + le32_to_cpu(alloc->id1.bitmap1.i_used); + if (bits_wanted > free_bits) + goto bail; + } + + if (ac->ac_max_block) + mlog(0, "Calling in_range for max block %llu\n", + (unsigned long long)ac->ac_max_block); + + if (!ocfs2_local_alloc_in_range(local_alloc_inode, ac, + bits_wanted)) { + /* + * The window is outside ac->ac_max_block. + * This errno tells the caller to keep localalloc enabled + * but to get the allocation from the main bitmap. + */ + status = -EFBIG; + goto bail; + } + + ac->ac_inode = local_alloc_inode; + /* We should never use localalloc from another slot */ + ac->ac_alloc_slot = osb->slot_num; + ac->ac_which = OCFS2_AC_USE_LOCAL; + get_bh(osb->local_alloc_bh); + ac->ac_bh = osb->local_alloc_bh; + status = 0; +bail: + if (status < 0 && local_alloc_inode) { + mutex_unlock(&local_alloc_inode->i_mutex); + iput(local_alloc_inode); + } + + mlog(0, "bits=%d, slot=%d, ret=%d\n", bits_wanted, osb->slot_num, + status); + + mlog_exit(status); + return status; +} + +int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 bits_wanted, + u32 *bit_off, + u32 *num_bits) +{ + int status, start; + struct inode *local_alloc_inode; + void *bitmap; + struct ocfs2_dinode *alloc; + struct ocfs2_local_alloc *la; + + mlog_entry_void(); + BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); + + local_alloc_inode = ac->ac_inode; + alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; + la = OCFS2_LOCAL_ALLOC(alloc); + + start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted); + if (start == -1) { + /* TODO: Shouldn't we just BUG here? */ + status = -ENOSPC; + mlog_errno(status); + goto bail; + } + + bitmap = la->la_bitmap; + *bit_off = le32_to_cpu(la->la_bm_off) + start; + /* local alloc is always contiguous by nature -- we never + * delete bits from it! */ + *num_bits = bits_wanted; + + status = ocfs2_journal_access(handle, local_alloc_inode, + osb->local_alloc_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + while(bits_wanted--) + ocfs2_set_bit(start++, bitmap); + + le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits); + + status = ocfs2_journal_dirty(handle, osb->local_alloc_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = 0; +bail: + mlog_exit(status); + return status; +} + +static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc) +{ + int i; + u8 *buffer; + u32 count = 0; + struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); + + mlog_entry_void(); + + buffer = la->la_bitmap; + for (i = 0; i < le16_to_cpu(la->la_size); i++) + count += hweight8(buffer[i]); + + mlog_exit(count); + return count; +} + +static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, + struct ocfs2_dinode *alloc, + u32 numbits) +{ + int numfound, bitoff, left, startoff, lastzero; + void *bitmap = NULL; + + mlog_entry("(numbits wanted = %u)\n", numbits); + + if (!alloc->id1.bitmap1.i_total) { + mlog(0, "No bits in my window!\n"); + bitoff = -1; + goto bail; + } + + bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap; + + numfound = bitoff = startoff = 0; + lastzero = -1; + left = le32_to_cpu(alloc->id1.bitmap1.i_total); + while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) { + if (bitoff == left) { + /* mlog(0, "bitoff (%d) == left", bitoff); */ + break; + } + /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, " + "numfound = %d\n", bitoff, startoff, numfound);*/ + + /* Ok, we found a zero bit... is it contig. or do we + * start over?*/ + if (bitoff == startoff) { + /* we found a zero */ + numfound++; + startoff++; + } else { + /* got a zero after some ones */ + numfound = 1; + startoff = bitoff+1; + } + /* we got everything we needed */ + if (numfound == numbits) { + /* mlog(0, "Found it all!\n"); */ + break; + } + } + + mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff, + numfound); + + if (numfound == numbits) + bitoff = startoff - numfound; + else + bitoff = -1; + +bail: + mlog_exit(bitoff); + return bitoff; +} + +static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc) +{ + struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); + int i; + mlog_entry_void(); + + alloc->id1.bitmap1.i_total = 0; + alloc->id1.bitmap1.i_used = 0; + la->la_bm_off = 0; + for(i = 0; i < le16_to_cpu(la->la_size); i++) + la->la_bitmap[i] = 0; + + mlog_exit_void(); +} + +#if 0 +/* turn this on and uncomment below to aid debugging window shifts. */ +static void ocfs2_verify_zero_bits(unsigned long *bitmap, + unsigned int start, + unsigned int count) +{ + unsigned int tmp = count; + while(tmp--) { + if (ocfs2_test_bit(start + tmp, bitmap)) { + printk("ocfs2_verify_zero_bits: start = %u, count = " + "%u\n", start, count); + printk("ocfs2_verify_zero_bits: bit %u is set!", + start + tmp); + BUG(); + } + } +} +#endif + +/* + * sync the local alloc to main bitmap. + * + * assumes you've already locked the main bitmap -- the bitmap inode + * passed is used for caching. + */ +static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_dinode *alloc, + struct inode *main_bm_inode, + struct buffer_head *main_bm_bh) +{ + int status = 0; + int bit_off, left, count, start; + u64 la_start_blk; + u64 blkno; + void *bitmap; + struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); + + mlog_entry("total = %u, used = %u\n", + le32_to_cpu(alloc->id1.bitmap1.i_total), + le32_to_cpu(alloc->id1.bitmap1.i_used)); + + if (!alloc->id1.bitmap1.i_total) { + mlog(0, "nothing to sync!\n"); + goto bail; + } + + if (le32_to_cpu(alloc->id1.bitmap1.i_used) == + le32_to_cpu(alloc->id1.bitmap1.i_total)) { + mlog(0, "all bits were taken!\n"); + goto bail; + } + + la_start_blk = ocfs2_clusters_to_blocks(osb->sb, + le32_to_cpu(la->la_bm_off)); + bitmap = la->la_bitmap; + start = count = bit_off = 0; + left = le32_to_cpu(alloc->id1.bitmap1.i_total); + + while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) + != -1) { + if ((bit_off < left) && (bit_off == start)) { + count++; + start++; + continue; + } + if (count) { + blkno = la_start_blk + + ocfs2_clusters_to_blocks(osb->sb, + start - count); + + mlog(0, "freeing %u bits starting at local alloc bit " + "%u (la_start_blk = %llu, blkno = %llu)\n", + count, start - count, + (unsigned long long)la_start_blk, + (unsigned long long)blkno); + + status = ocfs2_free_clusters(handle, main_bm_inode, + main_bm_bh, blkno, count); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + if (bit_off >= left) + break; + count = 1; + start = bit_off + 1; + } + +bail: + mlog_exit(status); + return status; +} + +enum ocfs2_la_event { + OCFS2_LA_EVENT_SLIDE, /* Normal window slide. */ + OCFS2_LA_EVENT_FRAGMENTED, /* The global bitmap has + * enough bits theoretically + * free, but a contiguous + * allocation could not be + * found. */ + OCFS2_LA_EVENT_ENOSPC, /* Global bitmap doesn't have + * enough bits free to satisfy + * our request. */ +}; +#define OCFS2_LA_ENABLE_INTERVAL (30 * HZ) +/* + * Given an event, calculate the size of our next local alloc window. + * + * This should always be called under i_mutex of the local alloc inode + * so that local alloc disabling doesn't race with processes trying to + * use the allocator. + * + * Returns the state which the local alloc was left in. This value can + * be ignored by some paths. + */ +static int ocfs2_recalc_la_window(struct ocfs2_super *osb, + enum ocfs2_la_event event) +{ + unsigned int bits; + int state; + + spin_lock(&osb->osb_lock); + if (osb->local_alloc_state == OCFS2_LA_DISABLED) { + WARN_ON_ONCE(osb->local_alloc_state == OCFS2_LA_DISABLED); + goto out_unlock; + } + + /* + * ENOSPC and fragmentation are treated similarly for now. + */ + if (event == OCFS2_LA_EVENT_ENOSPC || + event == OCFS2_LA_EVENT_FRAGMENTED) { + /* + * We ran out of contiguous space in the primary + * bitmap. Drastically reduce the number of bits used + * by local alloc until we have to disable it. + */ + bits = osb->local_alloc_bits >> 1; + if (bits > ocfs2_megabytes_to_clusters(osb->sb, 1)) { + /* + * By setting state to THROTTLED, we'll keep + * the number of local alloc bits used down + * until an event occurs which would give us + * reason to assume the bitmap situation might + * have changed. + */ + osb->local_alloc_state = OCFS2_LA_THROTTLED; + osb->local_alloc_bits = bits; + } else { + osb->local_alloc_state = OCFS2_LA_DISABLED; + } + queue_delayed_work(ocfs2_wq, &osb->la_enable_wq, + OCFS2_LA_ENABLE_INTERVAL); + goto out_unlock; + } + + /* + * Don't increase the size of the local alloc window until we + * know we might be able to fulfill the request. Otherwise, we + * risk bouncing around the global bitmap during periods of + * low space. + */ + if (osb->local_alloc_state != OCFS2_LA_THROTTLED) + osb->local_alloc_bits = osb->local_alloc_default_bits; + +out_unlock: + state = osb->local_alloc_state; + spin_unlock(&osb->osb_lock); + + return state; +} + +static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, + struct ocfs2_alloc_context **ac, + struct inode **bitmap_inode, + struct buffer_head **bitmap_bh) +{ + int status; + + *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); + if (!(*ac)) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + +retry_enospc: + (*ac)->ac_bits_wanted = osb->local_alloc_bits; + + status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); + if (status == -ENOSPC) { + if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) == + OCFS2_LA_DISABLED) + goto bail; + + ocfs2_free_ac_resource(*ac); + memset(*ac, 0, sizeof(struct ocfs2_alloc_context)); + goto retry_enospc; + } + if (status < 0) { + mlog_errno(status); + goto bail; + } + + *bitmap_inode = (*ac)->ac_inode; + igrab(*bitmap_inode); + *bitmap_bh = (*ac)->ac_bh; + get_bh(*bitmap_bh); + status = 0; +bail: + if ((status < 0) && *ac) { + ocfs2_free_alloc_context(*ac); + *ac = NULL; + } + + mlog_exit(status); + return status; +} + +/* + * pass it the bitmap lock in lock_bh if you have it. + */ +static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac) +{ + int status = 0; + u32 cluster_off, cluster_count; + struct ocfs2_dinode *alloc = NULL; + struct ocfs2_local_alloc *la; + + mlog_entry_void(); + + alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; + la = OCFS2_LOCAL_ALLOC(alloc); + + if (alloc->id1.bitmap1.i_total) + mlog(0, "asking me to alloc a new window over a non-empty " + "one\n"); + + mlog(0, "Allocating %u clusters for a new window.\n", + osb->local_alloc_bits); + + /* Instruct the allocation code to try the most recently used + * cluster group. We'll re-record the group used this pass + * below. */ + ac->ac_last_group = osb->la_last_gd; + + /* we used the generic suballoc reserve function, but we set + * everything up nicely, so there's no reason why we can't use + * the more specific cluster api to claim bits. */ + status = ocfs2_claim_clusters(osb, handle, ac, osb->local_alloc_bits, + &cluster_off, &cluster_count); + if (status == -ENOSPC) { +retry_enospc: + /* + * Note: We could also try syncing the journal here to + * allow use of any free bits which the current + * transaction can't give us access to. --Mark + */ + if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_FRAGMENTED) == + OCFS2_LA_DISABLED) + goto bail; + + status = ocfs2_claim_clusters(osb, handle, ac, + osb->local_alloc_bits, + &cluster_off, + &cluster_count); + if (status == -ENOSPC) + goto retry_enospc; + /* + * We only shrunk the *minimum* number of in our + * request - it's entirely possible that the allocator + * might give us more than we asked for. + */ + if (status == 0) { + spin_lock(&osb->osb_lock); + osb->local_alloc_bits = cluster_count; + spin_unlock(&osb->osb_lock); + } + } + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + osb->la_last_gd = ac->ac_last_group; + + la->la_bm_off = cpu_to_le32(cluster_off); + alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); + /* just in case... In the future when we find space ourselves, + * we don't have to get all contiguous -- but we'll have to + * set all previously used bits in bitmap and update + * la_bits_set before setting the bits in the main bitmap. */ + alloc->id1.bitmap1.i_used = 0; + memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0, + le16_to_cpu(la->la_size)); + + mlog(0, "New window allocated:\n"); + mlog(0, "window la_bm_off = %u\n", + OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); + mlog(0, "window bits = %u\n", le32_to_cpu(alloc->id1.bitmap1.i_total)); + +bail: + mlog_exit(status); + return status; +} + +/* Note that we do *NOT* lock the local alloc inode here as + * it's been locked already for us. */ +static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, + struct inode *local_alloc_inode) +{ + int status = 0; + struct buffer_head *main_bm_bh = NULL; + struct inode *main_bm_inode = NULL; + handle_t *handle = NULL; + struct ocfs2_dinode *alloc; + struct ocfs2_dinode *alloc_copy = NULL; + struct ocfs2_alloc_context *ac = NULL; + + mlog_entry_void(); + + ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE); + + /* This will lock the main bitmap for us. */ + status = ocfs2_local_alloc_reserve_for_window(osb, + &ac, + &main_bm_inode, + &main_bm_bh); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto bail; + } + + alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; + + /* We want to clear the local alloc before doing anything + * else, so that if we error later during this operation, + * local alloc shutdown won't try to double free main bitmap + * bits. Make a copy so the sync function knows which bits to + * free. */ + alloc_copy = kmalloc(osb->local_alloc_bh->b_size, GFP_NOFS); + if (!alloc_copy) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size); + + status = ocfs2_journal_access(handle, local_alloc_inode, + osb->local_alloc_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + ocfs2_clear_local_alloc(alloc); + + status = ocfs2_journal_dirty(handle, osb->local_alloc_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_sync_local_to_main(osb, handle, alloc_copy, + main_bm_inode, main_bm_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_local_alloc_new_window(osb, handle, ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + atomic_inc(&osb->alloc_stats.moves); + + status = 0; +bail: + if (handle) + ocfs2_commit_trans(osb, handle); + + brelse(main_bm_bh); + + if (main_bm_inode) + iput(main_bm_inode); + + if (alloc_copy) + kfree(alloc_copy); + + if (ac) + ocfs2_free_alloc_context(ac); + + mlog_exit(status); + return status; +} + diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h new file mode 100644 index 0000000..ac5ea9f --- /dev/null +++ b/fs/ocfs2/localalloc.h @@ -0,0 +1,59 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * localalloc.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_LOCALALLOC_H +#define OCFS2_LOCALALLOC_H + +int ocfs2_load_local_alloc(struct ocfs2_super *osb); + +void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb); + +int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb, + int node_num, + struct ocfs2_dinode **alloc_copy); + +int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb, + struct ocfs2_dinode *alloc); + +int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, + u64 bits); + +struct ocfs2_alloc_context; +int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, + u32 bits_wanted, + struct ocfs2_alloc_context *ac); + +int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 bits_wanted, + u32 *bit_off, + u32 *num_bits); + +void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb, + unsigned int num_clusters); +void ocfs2_la_enable_worker(struct work_struct *work); + +#endif /* OCFS2_LOCALALLOC_H */ diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c new file mode 100644 index 0000000..544ac62 --- /dev/null +++ b/fs/ocfs2/locks.c @@ -0,0 +1,140 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * locks.c + * + * Userspace file locking support + * + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/fcntl.h> + +#define MLOG_MASK_PREFIX ML_INODE +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "dlmglue.h" +#include "file.h" +#include "inode.h" +#include "locks.h" + +static int ocfs2_do_flock(struct file *file, struct inode *inode, + int cmd, struct file_lock *fl) +{ + int ret = 0, level = 0, trylock = 0; + struct ocfs2_file_private *fp = file->private_data; + struct ocfs2_lock_res *lockres = &fp->fp_flock; + + if (fl->fl_type == F_WRLCK) + level = 1; + if (!IS_SETLKW(cmd)) + trylock = 1; + + mutex_lock(&fp->fp_mutex); + + if (lockres->l_flags & OCFS2_LOCK_ATTACHED && + lockres->l_level > LKM_NLMODE) { + int old_level = 0; + + if (lockres->l_level == LKM_EXMODE) + old_level = 1; + + if (level == old_level) + goto out; + + /* + * Converting an existing lock is not guaranteed to be + * atomic, so we can get away with simply unlocking + * here and allowing the lock code to try at the new + * level. + */ + + flock_lock_file_wait(file, + &(struct file_lock){.fl_type = F_UNLCK}); + + ocfs2_file_unlock(file); + } + + ret = ocfs2_file_lock(file, level, trylock); + if (ret) { + if (ret == -EAGAIN && trylock) + ret = -EWOULDBLOCK; + else + mlog_errno(ret); + goto out; + } + + ret = flock_lock_file_wait(file, fl); + +out: + mutex_unlock(&fp->fp_mutex); + + return ret; +} + +static int ocfs2_do_funlock(struct file *file, int cmd, struct file_lock *fl) +{ + int ret; + struct ocfs2_file_private *fp = file->private_data; + + mutex_lock(&fp->fp_mutex); + ocfs2_file_unlock(file); + ret = flock_lock_file_wait(file, fl); + mutex_unlock(&fp->fp_mutex); + + return ret; +} + +/* + * Overall flow of ocfs2_flock() was influenced by gfs2_flock(). + */ +int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl) +{ + struct inode *inode = file->f_mapping->host; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (!(fl->fl_flags & FL_FLOCK)) + return -ENOLCK; + if (__mandatory_lock(inode)) + return -ENOLCK; + + if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) || + ocfs2_mount_local(osb)) + return flock_lock_file_wait(file, fl); + + if (fl->fl_type == F_UNLCK) + return ocfs2_do_funlock(file, cmd, fl); + else + return ocfs2_do_flock(file, inode, cmd, fl); +} + +int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl) +{ + struct inode *inode = file->f_mapping->host; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (!(fl->fl_flags & FL_POSIX)) + return -ENOLCK; + if (__mandatory_lock(inode)) + return -ENOLCK; + + return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl); +} diff --git a/fs/ocfs2/locks.h b/fs/ocfs2/locks.h new file mode 100644 index 0000000..496d488 --- /dev/null +++ b/fs/ocfs2/locks.h @@ -0,0 +1,32 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * locks.h + * + * Function prototypes for Userspace file locking support + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_LOCKS_H +#define OCFS2_LOCKS_H + +int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl); +int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl); + +#endif /* OCFS2_LOCKS_H */ diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c new file mode 100644 index 0000000..eea1d24 --- /dev/null +++ b/fs/ocfs2/mmap.c @@ -0,0 +1,224 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * mmap.c + * + * Code to deal with the mess that is clustered mmap. + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/pagemap.h> +#include <linux/uio.h> +#include <linux/signal.h> +#include <linux/rbtree.h> + +#define MLOG_MASK_PREFIX ML_FILE_IO +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "aops.h" +#include "dlmglue.h" +#include "file.h" +#include "inode.h" +#include "mmap.h" + +static inline int ocfs2_vm_op_block_sigs(sigset_t *blocked, sigset_t *oldset) +{ + /* The best way to deal with signals in the vm path is + * to block them upfront, rather than allowing the + * locking paths to return -ERESTARTSYS. */ + sigfillset(blocked); + + /* We should technically never get a bad return value + * from sigprocmask */ + return sigprocmask(SIG_BLOCK, blocked, oldset); +} + +static inline int ocfs2_vm_op_unblock_sigs(sigset_t *oldset) +{ + return sigprocmask(SIG_SETMASK, oldset, NULL); +} + +static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) +{ + sigset_t blocked, oldset; + int error, ret; + + mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff); + + error = ocfs2_vm_op_block_sigs(&blocked, &oldset); + if (error < 0) { + mlog_errno(error); + ret = VM_FAULT_SIGBUS; + goto out; + } + + ret = filemap_fault(area, vmf); + + error = ocfs2_vm_op_unblock_sigs(&oldset); + if (error < 0) + mlog_errno(error); +out: + mlog_exit_ptr(vmf->page); + return ret; +} + +static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh, + struct page *page) +{ + int ret; + struct address_space *mapping = inode->i_mapping; + loff_t pos = page_offset(page); + unsigned int len = PAGE_CACHE_SIZE; + pgoff_t last_index; + struct page *locked_page = NULL; + void *fsdata; + loff_t size = i_size_read(inode); + + /* + * Another node might have truncated while we were waiting on + * cluster locks. + */ + last_index = size >> PAGE_CACHE_SHIFT; + if (page->index > last_index) { + ret = -EINVAL; + goto out; + } + + /* + * The i_size check above doesn't catch the case where nodes + * truncated and then re-extended the file. We'll re-check the + * page mapping after taking the page lock inside of + * ocfs2_write_begin_nolock(). + */ + if (!PageUptodate(page) || page->mapping != inode->i_mapping) { + /* + * the page has been umapped in ocfs2_data_downconvert_worker. + * So return 0 here and let VFS retry. + */ + ret = 0; + goto out; + } + + /* + * Call ocfs2_write_begin() and ocfs2_write_end() to take + * advantage of the allocation code there. We pass a write + * length of the whole page (chopped to i_size) to make sure + * the whole thing is allocated. + * + * Since we know the page is up to date, we don't have to + * worry about ocfs2_write_begin() skipping some buffer reads + * because the "write" would invalidate their data. + */ + if (page->index == last_index) + len = size & ~PAGE_CACHE_MASK; + + ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page, + &fsdata, di_bh, page); + if (ret) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + + ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, + fsdata); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + BUG_ON(ret != len); + ret = 0; +out: + return ret; +} + +static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page) +{ + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; + struct buffer_head *di_bh = NULL; + sigset_t blocked, oldset; + int ret, ret2; + + ret = ocfs2_vm_op_block_sigs(&blocked, &oldset); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + /* + * The cluster locks taken will block a truncate from another + * node. Taking the data lock will also ensure that we don't + * attempt page truncation as part of a downconvert. + */ + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + /* + * The alloc sem should be enough to serialize with + * ocfs2_truncate_file() changing i_size as well as any thread + * modifying the inode btree. + */ + down_write(&OCFS2_I(inode)->ip_alloc_sem); + + ret = __ocfs2_page_mkwrite(inode, di_bh, page); + + up_write(&OCFS2_I(inode)->ip_alloc_sem); + + brelse(di_bh); + ocfs2_inode_unlock(inode, 1); + +out: + ret2 = ocfs2_vm_op_unblock_sigs(&oldset); + if (ret2 < 0) + mlog_errno(ret2); + + return ret; +} + +static struct vm_operations_struct ocfs2_file_vm_ops = { + .fault = ocfs2_fault, + .page_mkwrite = ocfs2_page_mkwrite, +}; + +int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) +{ + int ret = 0, lock_level = 0; + + ret = ocfs2_inode_lock_atime(file->f_dentry->d_inode, + file->f_vfsmnt, &lock_level); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + ocfs2_inode_unlock(file->f_dentry->d_inode, lock_level); +out: + vma->vm_ops = &ocfs2_file_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; + return 0; +} + diff --git a/fs/ocfs2/mmap.h b/fs/ocfs2/mmap.h new file mode 100644 index 0000000..1274ee0 --- /dev/null +++ b/fs/ocfs2/mmap.h @@ -0,0 +1,6 @@ +#ifndef OCFS2_MMAP_H +#define OCFS2_MMAP_H + +int ocfs2_mmap(struct file *file, struct vm_area_struct *vma); + +#endif /* OCFS2_MMAP_H */ diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c new file mode 100644 index 0000000..f4967e6 --- /dev/null +++ b/fs/ocfs2/namei.c @@ -0,0 +1,1896 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * namei.c + * + * Create and rename file, directory, symlinks + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * Portions of this code from linux/fs/ext3/dir.c + * + * Copyright (C) 1992, 1993, 1994, 1995 + * Remy Card (card@masi.ibp.fr) + * Laboratoire MASI - Institut Blaise pascal + * Universite Pierre et Marie Curie (Paris VI) + * + * from + * + * linux/fs/minix/dir.c + * + * Copyright (C) 1991, 1992 Linux Torvalds + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> + +#define MLOG_MASK_PREFIX ML_NAMEI +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dcache.h" +#include "dir.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "file.h" +#include "inode.h" +#include "journal.h" +#include "namei.h" +#include "suballoc.h" +#include "super.h" +#include "symlink.h" +#include "sysfile.h" +#include "uptodate.h" +#include "xattr.h" + +#include "buffer_head_io.h" + +static int ocfs2_mknod_locked(struct ocfs2_super *osb, + struct inode *dir, + struct dentry *dentry, int mode, + dev_t dev, + struct buffer_head **new_fe_bh, + struct buffer_head *parent_fe_bh, + handle_t *handle, + struct inode **ret_inode, + struct ocfs2_alloc_context *inode_ac); + +static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, + struct inode **ret_orphan_dir, + struct inode *inode, + char *name, + struct buffer_head **de_bh); + +static int ocfs2_orphan_add(struct ocfs2_super *osb, + handle_t *handle, + struct inode *inode, + struct ocfs2_dinode *fe, + char *name, + struct buffer_head *de_bh, + struct inode *orphan_dir_inode); + +static int ocfs2_create_symlink_data(struct ocfs2_super *osb, + handle_t *handle, + struct inode *inode, + const char *symname); + +/* An orphan dir name is an 8 byte value, printed as a hex string */ +#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) + +static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *nd) +{ + int status; + u64 blkno; + struct inode *inode = NULL; + struct dentry *ret; + struct ocfs2_inode_info *oi; + + mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, + dentry->d_name.len, dentry->d_name.name); + + if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) { + ret = ERR_PTR(-ENAMETOOLONG); + goto bail; + } + + mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len, + dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno); + + status = ocfs2_inode_lock(dir, NULL, 0); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + ret = ERR_PTR(status); + goto bail; + } + + status = ocfs2_lookup_ino_from_name(dir, dentry->d_name.name, + dentry->d_name.len, &blkno); + if (status < 0) + goto bail_add; + + inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0); + if (IS_ERR(inode)) { + ret = ERR_PTR(-EACCES); + goto bail_unlock; + } + + oi = OCFS2_I(inode); + /* Clear any orphaned state... If we were able to look up the + * inode from a directory, it certainly can't be orphaned. We + * might have the bad state from a node which intended to + * orphan this inode but crashed before it could commit the + * unlink. */ + spin_lock(&oi->ip_lock); + oi->ip_flags &= ~OCFS2_INODE_MAYBE_ORPHANED; + spin_unlock(&oi->ip_lock); + +bail_add: + dentry->d_op = &ocfs2_dentry_ops; + ret = d_splice_alias(inode, dentry); + + if (inode) { + /* + * If d_splice_alias() finds a DCACHE_DISCONNECTED + * dentry, it will d_move() it on top of ourse. The + * return value will indicate this however, so in + * those cases, we switch them around for the locking + * code. + * + * NOTE: This dentry already has ->d_op set from + * ocfs2_get_parent() and ocfs2_get_dentry() + */ + if (ret) + dentry = ret; + + status = ocfs2_dentry_attach_lock(dentry, inode, + OCFS2_I(dir)->ip_blkno); + if (status) { + mlog_errno(status); + ret = ERR_PTR(status); + goto bail_unlock; + } + } + +bail_unlock: + /* Don't drop the cluster lock until *after* the d_add -- + * unlink on another node will message us to remove that + * dentry under this lock so otherwise we can race this with + * the downconvert thread and have a stale dentry. */ + ocfs2_inode_unlock(dir, 0); + +bail: + + mlog_exit_ptr(ret); + + return ret; +} + +static int ocfs2_mknod(struct inode *dir, + struct dentry *dentry, + int mode, + dev_t dev) +{ + int status = 0; + struct buffer_head *parent_fe_bh = NULL; + handle_t *handle = NULL; + struct ocfs2_super *osb; + struct ocfs2_dinode *dirfe; + struct buffer_head *new_fe_bh = NULL; + struct buffer_head *de_bh = NULL; + struct inode *inode = NULL; + struct ocfs2_alloc_context *inode_ac = NULL; + struct ocfs2_alloc_context *data_ac = NULL; + + mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode, + (unsigned long)dev, dentry->d_name.len, + dentry->d_name.name); + + /* get our super block */ + osb = OCFS2_SB(dir->i_sb); + + status = ocfs2_inode_lock(dir, &parent_fe_bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + return status; + } + + if (S_ISDIR(mode) && (dir->i_nlink >= OCFS2_LINK_MAX)) { + status = -EMLINK; + goto leave; + } + + dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data; + if (!dirfe->i_links_count) { + /* can't make a file in a deleted directory. */ + status = -ENOENT; + goto leave; + } + + status = ocfs2_check_dir_for_entry(dir, dentry->d_name.name, + dentry->d_name.len); + if (status) + goto leave; + + /* get a spot inside the dir. */ + status = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh, + dentry->d_name.name, + dentry->d_name.len, &de_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + /* reserve an inode spot */ + status = ocfs2_reserve_new_inode(osb, &inode_ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto leave; + } + + /* Reserve a cluster if creating an extent based directory. */ + if (S_ISDIR(mode) && !ocfs2_supports_inline_data(osb)) { + status = ocfs2_reserve_clusters(osb, 1, &data_ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto leave; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_MKNOD_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto leave; + } + + /* do the real work now. */ + status = ocfs2_mknod_locked(osb, dir, dentry, mode, dev, + &new_fe_bh, parent_fe_bh, handle, + &inode, inode_ac); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + if (S_ISDIR(mode)) { + status = ocfs2_fill_new_dir(osb, handle, dir, inode, + new_fe_bh, data_ac); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_journal_access(handle, dir, parent_fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + le16_add_cpu(&dirfe->i_links_count, 1); + status = ocfs2_journal_dirty(handle, parent_fe_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + inc_nlink(dir); + } + + status = ocfs2_add_entry(handle, dentry, inode, + OCFS2_I(inode)->ip_blkno, parent_fe_bh, + de_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_dentry_attach_lock(dentry, inode, + OCFS2_I(dir)->ip_blkno); + if (status) { + mlog_errno(status); + goto leave; + } + + insert_inode_hash(inode); + dentry->d_op = &ocfs2_dentry_ops; + d_instantiate(dentry, inode); + status = 0; +leave: + if (handle) + ocfs2_commit_trans(osb, handle); + + ocfs2_inode_unlock(dir, 1); + + if (status == -ENOSPC) + mlog(0, "Disk is full\n"); + + brelse(new_fe_bh); + brelse(de_bh); + brelse(parent_fe_bh); + + if ((status < 0) && inode) + iput(inode); + + if (inode_ac) + ocfs2_free_alloc_context(inode_ac); + + if (data_ac) + ocfs2_free_alloc_context(data_ac); + + mlog_exit(status); + + return status; +} + +static int ocfs2_mknod_locked(struct ocfs2_super *osb, + struct inode *dir, + struct dentry *dentry, int mode, + dev_t dev, + struct buffer_head **new_fe_bh, + struct buffer_head *parent_fe_bh, + handle_t *handle, + struct inode **ret_inode, + struct ocfs2_alloc_context *inode_ac) +{ + int status = 0; + struct ocfs2_dinode *fe = NULL; + struct ocfs2_extent_list *fel; + u64 fe_blkno = 0; + u16 suballoc_bit; + struct inode *inode = NULL; + + mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode, + (unsigned long)dev, dentry->d_name.len, + dentry->d_name.name); + + *new_fe_bh = NULL; + *ret_inode = NULL; + + status = ocfs2_claim_new_inode(osb, handle, inode_ac, &suballoc_bit, + &fe_blkno); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + inode = new_inode(dir->i_sb); + if (!inode) { + status = -ENOMEM; + mlog(ML_ERROR, "new_inode failed!\n"); + goto leave; + } + + /* populate as many fields early on as possible - many of + * these are used by the support functions here and in + * callers. */ + inode->i_ino = ino_from_blkno(osb->sb, fe_blkno); + OCFS2_I(inode)->ip_blkno = fe_blkno; + if (S_ISDIR(mode)) + inode->i_nlink = 2; + else + inode->i_nlink = 1; + inode->i_mode = mode; + spin_lock(&osb->osb_lock); + inode->i_generation = osb->s_next_generation++; + spin_unlock(&osb->osb_lock); + + *new_fe_bh = sb_getblk(osb->sb, fe_blkno); + if (!*new_fe_bh) { + status = -EIO; + mlog_errno(status); + goto leave; + } + ocfs2_set_new_buffer_uptodate(inode, *new_fe_bh); + + status = ocfs2_journal_access(handle, inode, *new_fe_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + fe = (struct ocfs2_dinode *) (*new_fe_bh)->b_data; + memset(fe, 0, osb->sb->s_blocksize); + + fe->i_generation = cpu_to_le32(inode->i_generation); + fe->i_fs_generation = cpu_to_le32(osb->fs_generation); + fe->i_blkno = cpu_to_le64(fe_blkno); + fe->i_suballoc_bit = cpu_to_le16(suballoc_bit); + fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot); + fe->i_uid = cpu_to_le32(current->fsuid); + if (dir->i_mode & S_ISGID) { + fe->i_gid = cpu_to_le32(dir->i_gid); + if (S_ISDIR(mode)) + mode |= S_ISGID; + } else + fe->i_gid = cpu_to_le32(current->fsgid); + fe->i_mode = cpu_to_le16(mode); + if (S_ISCHR(mode) || S_ISBLK(mode)) + fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev)); + + fe->i_links_count = cpu_to_le16(inode->i_nlink); + + fe->i_last_eb_blk = 0; + strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE); + le32_add_cpu(&fe->i_flags, OCFS2_VALID_FL); + fe->i_atime = fe->i_ctime = fe->i_mtime = + cpu_to_le64(CURRENT_TIME.tv_sec); + fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec = + cpu_to_le32(CURRENT_TIME.tv_nsec); + fe->i_dtime = 0; + + /* + * If supported, directories start with inline data. + */ + if (S_ISDIR(mode) && ocfs2_supports_inline_data(osb)) { + u16 feat = le16_to_cpu(fe->i_dyn_features); + + fe->i_dyn_features = cpu_to_le16(feat | OCFS2_INLINE_DATA_FL); + + fe->id2.i_data.id_count = cpu_to_le16(ocfs2_max_inline_data(osb->sb)); + } else { + fel = &fe->id2.i_list; + fel->l_tree_depth = 0; + fel->l_next_free_rec = 0; + fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb)); + } + + status = ocfs2_journal_dirty(handle, *new_fe_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + if (ocfs2_populate_inode(inode, fe, 1) < 0) { + mlog(ML_ERROR, "populate inode failed! bh->b_blocknr=%llu, " + "i_blkno=%llu, i_ino=%lu\n", + (unsigned long long)(*new_fe_bh)->b_blocknr, + (unsigned long long)le64_to_cpu(fe->i_blkno), + inode->i_ino); + BUG(); + } + + ocfs2_inode_set_new(osb, inode); + if (!ocfs2_mount_local(osb)) { + status = ocfs2_create_new_inode_locks(inode); + if (status < 0) + mlog_errno(status); + } + + status = 0; /* error in ocfs2_create_new_inode_locks is not + * critical */ + + *ret_inode = inode; +leave: + if (status < 0) { + if (*new_fe_bh) { + brelse(*new_fe_bh); + *new_fe_bh = NULL; + } + if (inode) { + clear_nlink(inode); + iput(inode); + } + } + + mlog_exit(status); + return status; +} + +static int ocfs2_mkdir(struct inode *dir, + struct dentry *dentry, + int mode) +{ + int ret; + + mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode, + dentry->d_name.len, dentry->d_name.name); + ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0); + mlog_exit(ret); + + return ret; +} + +static int ocfs2_create(struct inode *dir, + struct dentry *dentry, + int mode, + struct nameidata *nd) +{ + int ret; + + mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode, + dentry->d_name.len, dentry->d_name.name); + ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0); + mlog_exit(ret); + + return ret; +} + +static int ocfs2_link(struct dentry *old_dentry, + struct inode *dir, + struct dentry *dentry) +{ + handle_t *handle; + struct inode *inode = old_dentry->d_inode; + int err; + struct buffer_head *fe_bh = NULL; + struct buffer_head *parent_fe_bh = NULL; + struct buffer_head *de_bh = NULL; + struct ocfs2_dinode *fe = NULL; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); + + mlog_entry("(inode=%lu, old='%.*s' new='%.*s')\n", inode->i_ino, + old_dentry->d_name.len, old_dentry->d_name.name, + dentry->d_name.len, dentry->d_name.name); + + if (S_ISDIR(inode->i_mode)) + return -EPERM; + + err = ocfs2_inode_lock(dir, &parent_fe_bh, 1); + if (err < 0) { + if (err != -ENOENT) + mlog_errno(err); + return err; + } + + if (!dir->i_nlink) { + err = -ENOENT; + goto out; + } + + err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name, + dentry->d_name.len); + if (err) + goto out; + + err = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh, + dentry->d_name.name, + dentry->d_name.len, &de_bh); + if (err < 0) { + mlog_errno(err); + goto out; + } + + err = ocfs2_inode_lock(inode, &fe_bh, 1); + if (err < 0) { + if (err != -ENOENT) + mlog_errno(err); + goto out; + } + + fe = (struct ocfs2_dinode *) fe_bh->b_data; + if (le16_to_cpu(fe->i_links_count) >= OCFS2_LINK_MAX) { + err = -EMLINK; + goto out_unlock_inode; + } + + handle = ocfs2_start_trans(osb, OCFS2_LINK_CREDITS); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + handle = NULL; + mlog_errno(err); + goto out_unlock_inode; + } + + err = ocfs2_journal_access(handle, inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (err < 0) { + mlog_errno(err); + goto out_commit; + } + + inc_nlink(inode); + inode->i_ctime = CURRENT_TIME; + fe->i_links_count = cpu_to_le16(inode->i_nlink); + fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); + fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); + + err = ocfs2_journal_dirty(handle, fe_bh); + if (err < 0) { + le16_add_cpu(&fe->i_links_count, -1); + drop_nlink(inode); + mlog_errno(err); + goto out_commit; + } + + err = ocfs2_add_entry(handle, dentry, inode, + OCFS2_I(inode)->ip_blkno, + parent_fe_bh, de_bh); + if (err) { + le16_add_cpu(&fe->i_links_count, -1); + drop_nlink(inode); + mlog_errno(err); + goto out_commit; + } + + err = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno); + if (err) { + mlog_errno(err); + goto out_commit; + } + + atomic_inc(&inode->i_count); + dentry->d_op = &ocfs2_dentry_ops; + d_instantiate(dentry, inode); + +out_commit: + ocfs2_commit_trans(osb, handle); +out_unlock_inode: + ocfs2_inode_unlock(inode, 1); + +out: + ocfs2_inode_unlock(dir, 1); + + brelse(de_bh); + brelse(fe_bh); + brelse(parent_fe_bh); + + mlog_exit(err); + + return err; +} + +/* + * Takes and drops an exclusive lock on the given dentry. This will + * force other nodes to drop it. + */ +static int ocfs2_remote_dentry_delete(struct dentry *dentry) +{ + int ret; + + ret = ocfs2_dentry_lock(dentry, 1); + if (ret) + mlog_errno(ret); + else + ocfs2_dentry_unlock(dentry, 1); + + return ret; +} + +static inline int inode_is_unlinkable(struct inode *inode) +{ + if (S_ISDIR(inode->i_mode)) { + if (inode->i_nlink == 2) + return 1; + return 0; + } + + if (inode->i_nlink == 1) + return 1; + return 0; +} + +static int ocfs2_unlink(struct inode *dir, + struct dentry *dentry) +{ + int status; + int child_locked = 0; + struct inode *inode = dentry->d_inode; + struct inode *orphan_dir = NULL; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); + u64 blkno; + struct ocfs2_dinode *fe = NULL; + struct buffer_head *fe_bh = NULL; + struct buffer_head *parent_node_bh = NULL; + handle_t *handle = NULL; + struct ocfs2_dir_entry *dirent = NULL; + struct buffer_head *dirent_bh = NULL; + char orphan_name[OCFS2_ORPHAN_NAMELEN + 1]; + struct buffer_head *orphan_entry_bh = NULL; + + mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, + dentry->d_name.len, dentry->d_name.name); + + BUG_ON(dentry->d_parent->d_inode != dir); + + mlog(0, "ino = %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); + + if (inode == osb->root_inode) { + mlog(0, "Cannot delete the root directory\n"); + return -EPERM; + } + + status = ocfs2_inode_lock(dir, &parent_node_bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + return status; + } + + status = ocfs2_find_files_on_disk(dentry->d_name.name, + dentry->d_name.len, &blkno, + dir, &dirent_bh, &dirent); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + goto leave; + } + + if (OCFS2_I(inode)->ip_blkno != blkno) { + status = -ENOENT; + + mlog(0, "ip_blkno %llu != dirent blkno %llu ip_flags = %x\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)blkno, OCFS2_I(inode)->ip_flags); + goto leave; + } + + status = ocfs2_inode_lock(inode, &fe_bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + goto leave; + } + child_locked = 1; + + if (S_ISDIR(inode->i_mode)) { + if (!ocfs2_empty_dir(inode)) { + status = -ENOTEMPTY; + goto leave; + } else if (inode->i_nlink != 2) { + status = -ENOTEMPTY; + goto leave; + } + } + + status = ocfs2_remote_dentry_delete(dentry); + if (status < 0) { + /* This remote delete should succeed under all normal + * circumstances. */ + mlog_errno(status); + goto leave; + } + + if (inode_is_unlinkable(inode)) { + status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, inode, + orphan_name, + &orphan_entry_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_UNLINK_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto leave; + } + + status = ocfs2_journal_access(handle, inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + fe = (struct ocfs2_dinode *) fe_bh->b_data; + + if (inode_is_unlinkable(inode)) { + status = ocfs2_orphan_add(osb, handle, inode, fe, orphan_name, + orphan_entry_bh, orphan_dir); + if (status < 0) { + mlog_errno(status); + goto leave; + } + } + + /* delete the name from the parent dir */ + status = ocfs2_delete_entry(handle, dir, dirent, dirent_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + if (S_ISDIR(inode->i_mode)) + drop_nlink(inode); + drop_nlink(inode); + fe->i_links_count = cpu_to_le16(inode->i_nlink); + + status = ocfs2_journal_dirty(handle, fe_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + dir->i_ctime = dir->i_mtime = CURRENT_TIME; + if (S_ISDIR(inode->i_mode)) + drop_nlink(dir); + + status = ocfs2_mark_inode_dirty(handle, dir, parent_node_bh); + if (status < 0) { + mlog_errno(status); + if (S_ISDIR(inode->i_mode)) + inc_nlink(dir); + } + +leave: + if (handle) + ocfs2_commit_trans(osb, handle); + + if (child_locked) + ocfs2_inode_unlock(inode, 1); + + ocfs2_inode_unlock(dir, 1); + + if (orphan_dir) { + /* This was locked for us in ocfs2_prepare_orphan_dir() */ + ocfs2_inode_unlock(orphan_dir, 1); + mutex_unlock(&orphan_dir->i_mutex); + iput(orphan_dir); + } + + brelse(fe_bh); + brelse(dirent_bh); + brelse(parent_node_bh); + brelse(orphan_entry_bh); + + mlog_exit(status); + + return status; +} + +/* + * The only place this should be used is rename! + * if they have the same id, then the 1st one is the only one locked. + */ +static int ocfs2_double_lock(struct ocfs2_super *osb, + struct buffer_head **bh1, + struct inode *inode1, + struct buffer_head **bh2, + struct inode *inode2) +{ + int status; + struct ocfs2_inode_info *oi1 = OCFS2_I(inode1); + struct ocfs2_inode_info *oi2 = OCFS2_I(inode2); + struct buffer_head **tmpbh; + struct inode *tmpinode; + + mlog_entry("(inode1 = %llu, inode2 = %llu)\n", + (unsigned long long)oi1->ip_blkno, + (unsigned long long)oi2->ip_blkno); + + if (*bh1) + *bh1 = NULL; + if (*bh2) + *bh2 = NULL; + + /* we always want to lock the one with the lower lockid first. */ + if (oi1->ip_blkno != oi2->ip_blkno) { + if (oi1->ip_blkno < oi2->ip_blkno) { + /* switch id1 and id2 around */ + mlog(0, "switching them around...\n"); + tmpbh = bh2; + bh2 = bh1; + bh1 = tmpbh; + + tmpinode = inode2; + inode2 = inode1; + inode1 = tmpinode; + } + /* lock id2 */ + status = ocfs2_inode_lock(inode2, bh2, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + goto bail; + } + } + + /* lock id1 */ + status = ocfs2_inode_lock(inode1, bh1, 1); + if (status < 0) { + /* + * An error return must mean that no cluster locks + * were held on function exit. + */ + if (oi1->ip_blkno != oi2->ip_blkno) + ocfs2_inode_unlock(inode2, 1); + + if (status != -ENOENT) + mlog_errno(status); + } + +bail: + mlog_exit(status); + return status; +} + +static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2) +{ + ocfs2_inode_unlock(inode1, 1); + + if (inode1 != inode2) + ocfs2_inode_unlock(inode2, 1); +} + +static int ocfs2_rename(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry) +{ + int status = 0, rename_lock = 0, parents_locked = 0; + int old_child_locked = 0, new_child_locked = 0; + struct inode *old_inode = old_dentry->d_inode; + struct inode *new_inode = new_dentry->d_inode; + struct inode *orphan_dir = NULL; + struct ocfs2_dinode *newfe = NULL; + char orphan_name[OCFS2_ORPHAN_NAMELEN + 1]; + struct buffer_head *orphan_entry_bh = NULL; + struct buffer_head *newfe_bh = NULL; + struct buffer_head *old_inode_bh = NULL; + struct buffer_head *insert_entry_bh = NULL; + struct ocfs2_super *osb = NULL; + u64 newfe_blkno, old_de_ino; + handle_t *handle = NULL; + struct buffer_head *old_dir_bh = NULL; + struct buffer_head *new_dir_bh = NULL; + struct ocfs2_dir_entry *old_inode_dot_dot_de = NULL, *old_de = NULL, + *new_de = NULL; + struct buffer_head *new_de_bh = NULL, *old_de_bh = NULL; // bhs for above + struct buffer_head *old_inode_de_bh = NULL; // if old_dentry is a dir, + // this is the 1st dirent bh + nlink_t old_dir_nlink = old_dir->i_nlink; + struct ocfs2_dinode *old_di; + + /* At some point it might be nice to break this function up a + * bit. */ + + mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p, from='%.*s' to='%.*s')\n", + old_dir, old_dentry, new_dir, new_dentry, + old_dentry->d_name.len, old_dentry->d_name.name, + new_dentry->d_name.len, new_dentry->d_name.name); + + osb = OCFS2_SB(old_dir->i_sb); + + if (new_inode) { + if (!igrab(new_inode)) + BUG(); + } + + /* Assume a directory hierarchy thusly: + * a/b/c + * a/d + * a,b,c, and d are all directories. + * + * from cwd of 'a' on both nodes: + * node1: mv b/c d + * node2: mv d b/c + * + * And that's why, just like the VFS, we need a file system + * rename lock. */ + if (old_dir != new_dir && S_ISDIR(old_inode->i_mode)) { + status = ocfs2_rename_lock(osb); + if (status < 0) { + mlog_errno(status); + goto bail; + } + rename_lock = 1; + } + + /* if old and new are the same, this'll just do one lock. */ + status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, + &new_dir_bh, new_dir); + if (status < 0) { + mlog_errno(status); + goto bail; + } + parents_locked = 1; + + /* make sure both dirs have bhs + * get an extra ref on old_dir_bh if old==new */ + if (!new_dir_bh) { + if (old_dir_bh) { + new_dir_bh = old_dir_bh; + get_bh(new_dir_bh); + } else { + mlog(ML_ERROR, "no old_dir_bh!\n"); + status = -EIO; + goto bail; + } + } + + /* + * Aside from allowing a meta data update, the locking here + * also ensures that the downconvert thread on other nodes + * won't have to concurrently downconvert the inode and the + * dentry locks. + */ + status = ocfs2_inode_lock(old_inode, &old_inode_bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + goto bail; + } + old_child_locked = 1; + + status = ocfs2_remote_dentry_delete(old_dentry); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + if (S_ISDIR(old_inode->i_mode)) { + u64 old_inode_parent; + + status = ocfs2_find_files_on_disk("..", 2, &old_inode_parent, + old_inode, &old_inode_de_bh, + &old_inode_dot_dot_de); + if (status) { + status = -EIO; + goto bail; + } + + if (old_inode_parent != OCFS2_I(old_dir)->ip_blkno) { + status = -EIO; + goto bail; + } + + if (!new_inode && new_dir != old_dir && + new_dir->i_nlink >= OCFS2_LINK_MAX) { + status = -EMLINK; + goto bail; + } + } + + status = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name, + old_dentry->d_name.len, + &old_de_ino); + if (status) { + status = -ENOENT; + goto bail; + } + + /* + * Check for inode number is _not_ due to possible IO errors. + * We might rmdir the source, keep it as pwd of some process + * and merrily kill the link to whatever was created under the + * same name. Goodbye sticky bit ;-< + */ + if (old_de_ino != OCFS2_I(old_inode)->ip_blkno) { + status = -ENOENT; + goto bail; + } + + /* check if the target already exists (in which case we need + * to delete it */ + status = ocfs2_find_files_on_disk(new_dentry->d_name.name, + new_dentry->d_name.len, + &newfe_blkno, new_dir, &new_de_bh, + &new_de); + /* The only error we allow here is -ENOENT because the new + * file not existing is perfectly valid. */ + if ((status < 0) && (status != -ENOENT)) { + /* If we cannot find the file specified we should just */ + /* return the error... */ + mlog_errno(status); + goto bail; + } + + if (!new_de && new_inode) { + /* + * Target was unlinked by another node while we were + * waiting to get to ocfs2_rename(). There isn't + * anything we can do here to help the situation, so + * bubble up the appropriate error. + */ + status = -ENOENT; + goto bail; + } + + /* In case we need to overwrite an existing file, we blow it + * away first */ + if (new_de) { + /* VFS didn't think there existed an inode here, but + * someone else in the cluster must have raced our + * rename to create one. Today we error cleanly, in + * the future we should consider calling iget to build + * a new struct inode for this entry. */ + if (!new_inode) { + status = -EACCES; + + mlog(0, "We found an inode for name %.*s but VFS " + "didn't give us one.\n", new_dentry->d_name.len, + new_dentry->d_name.name); + goto bail; + } + + if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) { + status = -EACCES; + + mlog(0, "Inode %llu and dir %llu disagree. flags = %x\n", + (unsigned long long)OCFS2_I(new_inode)->ip_blkno, + (unsigned long long)newfe_blkno, + OCFS2_I(new_inode)->ip_flags); + goto bail; + } + + status = ocfs2_inode_lock(new_inode, &newfe_bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + goto bail; + } + new_child_locked = 1; + + status = ocfs2_remote_dentry_delete(new_dentry); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + newfe = (struct ocfs2_dinode *) newfe_bh->b_data; + + mlog(0, "aha rename over existing... new_de=%p new_blkno=%llu " + "newfebh=%p bhblocknr=%llu\n", new_de, + (unsigned long long)newfe_blkno, newfe_bh, newfe_bh ? + (unsigned long long)newfe_bh->b_blocknr : 0ULL); + + if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) { + status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, + new_inode, + orphan_name, + &orphan_entry_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + } else { + BUG_ON(new_dentry->d_parent->d_inode != new_dir); + + status = ocfs2_check_dir_for_entry(new_dir, + new_dentry->d_name.name, + new_dentry->d_name.len); + if (status) + goto bail; + + status = ocfs2_prepare_dir_for_insert(osb, new_dir, new_dir_bh, + new_dentry->d_name.name, + new_dentry->d_name.len, + &insert_entry_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_RENAME_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto bail; + } + + if (new_de) { + if (S_ISDIR(new_inode->i_mode)) { + if (!ocfs2_empty_dir(new_inode) || + new_inode->i_nlink != 2) { + status = -ENOTEMPTY; + goto bail; + } + } + status = ocfs2_journal_access(handle, new_inode, newfe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + if (S_ISDIR(new_inode->i_mode) || + (newfe->i_links_count == cpu_to_le16(1))){ + status = ocfs2_orphan_add(osb, handle, new_inode, + newfe, orphan_name, + orphan_entry_bh, orphan_dir); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + /* change the dirent to point to the correct inode */ + status = ocfs2_update_entry(new_dir, handle, new_de_bh, + new_de, old_inode); + if (status < 0) { + mlog_errno(status); + goto bail; + } + new_dir->i_version++; + + if (S_ISDIR(new_inode->i_mode)) + newfe->i_links_count = 0; + else + le16_add_cpu(&newfe->i_links_count, -1); + + status = ocfs2_journal_dirty(handle, newfe_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } else { + /* if the name was not found in new_dir, add it now */ + status = ocfs2_add_entry(handle, new_dentry, old_inode, + OCFS2_I(old_inode)->ip_blkno, + new_dir_bh, insert_entry_bh); + } + + old_inode->i_ctime = CURRENT_TIME; + mark_inode_dirty(old_inode); + + status = ocfs2_journal_access(handle, old_inode, old_inode_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status >= 0) { + old_di = (struct ocfs2_dinode *) old_inode_bh->b_data; + + old_di->i_ctime = cpu_to_le64(old_inode->i_ctime.tv_sec); + old_di->i_ctime_nsec = cpu_to_le32(old_inode->i_ctime.tv_nsec); + + status = ocfs2_journal_dirty(handle, old_inode_bh); + if (status < 0) + mlog_errno(status); + } else + mlog_errno(status); + + /* + * Now that the name has been added to new_dir, remove the old name. + * + * We don't keep any directory entry context around until now + * because the insert might have changed the type of directory + * we're dealing with. + */ + old_de_bh = ocfs2_find_entry(old_dentry->d_name.name, + old_dentry->d_name.len, + old_dir, &old_de); + if (!old_de_bh) { + status = -EIO; + goto bail; + } + + status = ocfs2_delete_entry(handle, old_dir, old_de, old_de_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + if (new_inode) { + new_inode->i_nlink--; + new_inode->i_ctime = CURRENT_TIME; + } + old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME; + if (old_inode_de_bh) { + status = ocfs2_update_entry(old_inode, handle, old_inode_de_bh, + old_inode_dot_dot_de, new_dir); + old_dir->i_nlink--; + if (new_inode) { + new_inode->i_nlink--; + } else { + inc_nlink(new_dir); + mark_inode_dirty(new_dir); + } + } + mark_inode_dirty(old_dir); + ocfs2_mark_inode_dirty(handle, old_dir, old_dir_bh); + if (new_inode) { + mark_inode_dirty(new_inode); + ocfs2_mark_inode_dirty(handle, new_inode, newfe_bh); + } + + if (old_dir != new_dir) { + /* Keep the same times on both directories.*/ + new_dir->i_ctime = new_dir->i_mtime = old_dir->i_ctime; + + /* + * This will also pick up the i_nlink change from the + * block above. + */ + ocfs2_mark_inode_dirty(handle, new_dir, new_dir_bh); + } + + if (old_dir_nlink != old_dir->i_nlink) { + if (!old_dir_bh) { + mlog(ML_ERROR, "need to change nlink for old dir " + "%llu from %d to %d but bh is NULL!\n", + (unsigned long long)OCFS2_I(old_dir)->ip_blkno, + (int)old_dir_nlink, old_dir->i_nlink); + } else { + struct ocfs2_dinode *fe; + status = ocfs2_journal_access(handle, old_dir, + old_dir_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + fe = (struct ocfs2_dinode *) old_dir_bh->b_data; + fe->i_links_count = cpu_to_le16(old_dir->i_nlink); + status = ocfs2_journal_dirty(handle, old_dir_bh); + } + } + + ocfs2_dentry_move(old_dentry, new_dentry, old_dir, new_dir); + status = 0; +bail: + if (rename_lock) + ocfs2_rename_unlock(osb); + + if (handle) + ocfs2_commit_trans(osb, handle); + + if (parents_locked) + ocfs2_double_unlock(old_dir, new_dir); + + if (old_child_locked) + ocfs2_inode_unlock(old_inode, 1); + + if (new_child_locked) + ocfs2_inode_unlock(new_inode, 1); + + if (orphan_dir) { + /* This was locked for us in ocfs2_prepare_orphan_dir() */ + ocfs2_inode_unlock(orphan_dir, 1); + mutex_unlock(&orphan_dir->i_mutex); + iput(orphan_dir); + } + + if (new_inode) + sync_mapping_buffers(old_inode->i_mapping); + + if (new_inode) + iput(new_inode); + brelse(newfe_bh); + brelse(old_inode_bh); + brelse(old_dir_bh); + brelse(new_dir_bh); + brelse(new_de_bh); + brelse(old_de_bh); + brelse(old_inode_de_bh); + brelse(orphan_entry_bh); + brelse(insert_entry_bh); + + mlog_exit(status); + + return status; +} + +/* + * we expect i_size = strlen(symname). Copy symname into the file + * data, including the null terminator. + */ +static int ocfs2_create_symlink_data(struct ocfs2_super *osb, + handle_t *handle, + struct inode *inode, + const char *symname) +{ + struct buffer_head **bhs = NULL; + const char *c; + struct super_block *sb = osb->sb; + u64 p_blkno, p_blocks; + int virtual, blocks, status, i, bytes_left; + + bytes_left = i_size_read(inode) + 1; + /* we can't trust i_blocks because we're actually going to + * write i_size + 1 bytes. */ + blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; + + mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n", + (unsigned long long)inode->i_blocks, + i_size_read(inode), blocks); + + /* Sanity check -- make sure we're going to fit. */ + if (bytes_left > + ocfs2_clusters_to_bytes(sb, OCFS2_I(inode)->ip_clusters)) { + status = -EIO; + mlog_errno(status); + goto bail; + } + + bhs = kcalloc(blocks, sizeof(struct buffer_head *), GFP_KERNEL); + if (!bhs) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + status = ocfs2_extent_map_get_blocks(inode, 0, &p_blkno, &p_blocks, + NULL); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + /* links can never be larger than one cluster so we know this + * is all going to be contiguous, but do a sanity check + * anyway. */ + if ((p_blocks << sb->s_blocksize_bits) < bytes_left) { + status = -EIO; + mlog_errno(status); + goto bail; + } + + virtual = 0; + while(bytes_left > 0) { + c = &symname[virtual * sb->s_blocksize]; + + bhs[virtual] = sb_getblk(sb, p_blkno); + if (!bhs[virtual]) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + ocfs2_set_new_buffer_uptodate(inode, bhs[virtual]); + + status = ocfs2_journal_access(handle, inode, bhs[virtual], + OCFS2_JOURNAL_ACCESS_CREATE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + memset(bhs[virtual]->b_data, 0, sb->s_blocksize); + + memcpy(bhs[virtual]->b_data, c, + (bytes_left > sb->s_blocksize) ? sb->s_blocksize : + bytes_left); + + status = ocfs2_journal_dirty(handle, bhs[virtual]); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + virtual++; + p_blkno++; + bytes_left -= sb->s_blocksize; + } + + status = 0; +bail: + + if (bhs) { + for(i = 0; i < blocks; i++) + brelse(bhs[i]); + kfree(bhs); + } + + mlog_exit(status); + return status; +} + +static int ocfs2_symlink(struct inode *dir, + struct dentry *dentry, + const char *symname) +{ + int status, l, credits; + u64 newsize; + struct ocfs2_super *osb = NULL; + struct inode *inode = NULL; + struct super_block *sb; + struct buffer_head *new_fe_bh = NULL; + struct buffer_head *de_bh = NULL; + struct buffer_head *parent_fe_bh = NULL; + struct ocfs2_dinode *fe = NULL; + struct ocfs2_dinode *dirfe; + handle_t *handle = NULL; + struct ocfs2_alloc_context *inode_ac = NULL; + struct ocfs2_alloc_context *data_ac = NULL; + + mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir, + dentry, symname, dentry->d_name.len, dentry->d_name.name); + + sb = dir->i_sb; + osb = OCFS2_SB(sb); + + l = strlen(symname) + 1; + + credits = ocfs2_calc_symlink_credits(sb); + + /* lock the parent directory */ + status = ocfs2_inode_lock(dir, &parent_fe_bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + return status; + } + + dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data; + if (!dirfe->i_links_count) { + /* can't make a file in a deleted directory. */ + status = -ENOENT; + goto bail; + } + + status = ocfs2_check_dir_for_entry(dir, dentry->d_name.name, + dentry->d_name.len); + if (status) + goto bail; + + status = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh, + dentry->d_name.name, + dentry->d_name.len, &de_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_reserve_new_inode(osb, &inode_ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + /* don't reserve bitmap space for fast symlinks. */ + if (l > ocfs2_fast_symlink_chars(sb)) { + status = ocfs2_reserve_clusters(osb, 1, &data_ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + } + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto bail; + } + + status = ocfs2_mknod_locked(osb, dir, dentry, + S_IFLNK | S_IRWXUGO, 0, + &new_fe_bh, parent_fe_bh, handle, + &inode, inode_ac); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + fe = (struct ocfs2_dinode *) new_fe_bh->b_data; + inode->i_rdev = 0; + newsize = l - 1; + if (l > ocfs2_fast_symlink_chars(sb)) { + u32 offset = 0; + + inode->i_op = &ocfs2_symlink_inode_operations; + status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0, + new_fe_bh, + handle, data_ac, NULL, + NULL); + if (status < 0) { + if (status != -ENOSPC && status != -EINTR) { + mlog(ML_ERROR, + "Failed to extend file to %llu\n", + (unsigned long long)newsize); + mlog_errno(status); + status = -ENOSPC; + } + goto bail; + } + i_size_write(inode, newsize); + inode->i_blocks = ocfs2_inode_sector_count(inode); + } else { + inode->i_op = &ocfs2_fast_symlink_inode_operations; + memcpy((char *) fe->id2.i_symlink, symname, l); + i_size_write(inode, newsize); + inode->i_blocks = 0; + } + + status = ocfs2_mark_inode_dirty(handle, inode, new_fe_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + if (!ocfs2_inode_is_fast_symlink(inode)) { + status = ocfs2_create_symlink_data(osb, handle, inode, + symname); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + status = ocfs2_add_entry(handle, dentry, inode, + le64_to_cpu(fe->i_blkno), parent_fe_bh, + de_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno); + if (status) { + mlog_errno(status); + goto bail; + } + + insert_inode_hash(inode); + dentry->d_op = &ocfs2_dentry_ops; + d_instantiate(dentry, inode); +bail: + if (handle) + ocfs2_commit_trans(osb, handle); + + ocfs2_inode_unlock(dir, 1); + + brelse(new_fe_bh); + brelse(parent_fe_bh); + brelse(de_bh); + if (inode_ac) + ocfs2_free_alloc_context(inode_ac); + if (data_ac) + ocfs2_free_alloc_context(data_ac); + if ((status < 0) && inode) + iput(inode); + + mlog_exit(status); + + return status; +} + +static int ocfs2_blkno_stringify(u64 blkno, char *name) +{ + int status, namelen; + + mlog_entry_void(); + + namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016llx", + (long long)blkno); + if (namelen <= 0) { + if (namelen) + status = namelen; + else + status = -EINVAL; + mlog_errno(status); + goto bail; + } + if (namelen != OCFS2_ORPHAN_NAMELEN) { + status = -EINVAL; + mlog_errno(status); + goto bail; + } + + mlog(0, "built filename '%s' for orphan dir (len=%d)\n", name, + namelen); + + status = 0; +bail: + mlog_exit(status); + return status; +} + +static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, + struct inode **ret_orphan_dir, + struct inode *inode, + char *name, + struct buffer_head **de_bh) +{ + struct inode *orphan_dir_inode; + struct buffer_head *orphan_dir_bh = NULL; + int status = 0; + + status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); + if (status < 0) { + mlog_errno(status); + return status; + } + + orphan_dir_inode = ocfs2_get_system_file_inode(osb, + ORPHAN_DIR_SYSTEM_INODE, + osb->slot_num); + if (!orphan_dir_inode) { + status = -ENOENT; + mlog_errno(status); + return status; + } + + mutex_lock(&orphan_dir_inode->i_mutex); + + status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, + orphan_dir_bh, name, + OCFS2_ORPHAN_NAMELEN, de_bh); + if (status < 0) { + ocfs2_inode_unlock(orphan_dir_inode, 1); + + mlog_errno(status); + goto leave; + } + + *ret_orphan_dir = orphan_dir_inode; + +leave: + if (status) { + mutex_unlock(&orphan_dir_inode->i_mutex); + iput(orphan_dir_inode); + } + + brelse(orphan_dir_bh); + + mlog_exit(status); + return status; +} + +static int ocfs2_orphan_add(struct ocfs2_super *osb, + handle_t *handle, + struct inode *inode, + struct ocfs2_dinode *fe, + char *name, + struct buffer_head *de_bh, + struct inode *orphan_dir_inode) +{ + struct buffer_head *orphan_dir_bh = NULL; + int status = 0; + struct ocfs2_dinode *orphan_fe; + + mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino); + + status = ocfs2_read_block(orphan_dir_inode, + OCFS2_I(orphan_dir_inode)->ip_blkno, + &orphan_dir_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_journal_access(handle, orphan_dir_inode, orphan_dir_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + /* we're a cluster, and nlink can change on disk from + * underneath us... */ + orphan_fe = (struct ocfs2_dinode *) orphan_dir_bh->b_data; + if (S_ISDIR(inode->i_mode)) + le16_add_cpu(&orphan_fe->i_links_count, 1); + orphan_dir_inode->i_nlink = le16_to_cpu(orphan_fe->i_links_count); + + status = ocfs2_journal_dirty(handle, orphan_dir_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = __ocfs2_add_entry(handle, orphan_dir_inode, name, + OCFS2_ORPHAN_NAMELEN, inode, + OCFS2_I(inode)->ip_blkno, + orphan_dir_bh, de_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL); + + /* Record which orphan dir our inode now resides + * in. delete_inode will use this to determine which orphan + * dir to lock. */ + fe->i_orphaned_slot = cpu_to_le16(osb->slot_num); + + mlog(0, "Inode %llu orphaned in slot %d\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num); + +leave: + brelse(orphan_dir_bh); + + mlog_exit(status); + return status; +} + +/* unlike orphan_add, we expect the orphan dir to already be locked here. */ +int ocfs2_orphan_del(struct ocfs2_super *osb, + handle_t *handle, + struct inode *orphan_dir_inode, + struct inode *inode, + struct buffer_head *orphan_dir_bh) +{ + char name[OCFS2_ORPHAN_NAMELEN + 1]; + struct ocfs2_dinode *orphan_fe; + int status = 0; + struct buffer_head *target_de_bh = NULL; + struct ocfs2_dir_entry *target_de = NULL; + + mlog_entry_void(); + + status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + mlog(0, "removing '%s' from orphan dir %llu (namelen=%d)\n", + name, (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno, + OCFS2_ORPHAN_NAMELEN); + + /* find it's spot in the orphan directory */ + target_de_bh = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, + orphan_dir_inode, &target_de); + if (!target_de_bh) { + status = -ENOENT; + mlog_errno(status); + goto leave; + } + + /* remove it from the orphan directory */ + status = ocfs2_delete_entry(handle, orphan_dir_inode, target_de, + target_de_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_journal_access(handle,orphan_dir_inode, orphan_dir_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + /* do the i_nlink dance! :) */ + orphan_fe = (struct ocfs2_dinode *) orphan_dir_bh->b_data; + if (S_ISDIR(inode->i_mode)) + le16_add_cpu(&orphan_fe->i_links_count, -1); + orphan_dir_inode->i_nlink = le16_to_cpu(orphan_fe->i_links_count); + + status = ocfs2_journal_dirty(handle, orphan_dir_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + +leave: + brelse(target_de_bh); + + mlog_exit(status); + return status; +} + +const struct inode_operations ocfs2_dir_iops = { + .create = ocfs2_create, + .lookup = ocfs2_lookup, + .link = ocfs2_link, + .unlink = ocfs2_unlink, + .rmdir = ocfs2_unlink, + .symlink = ocfs2_symlink, + .mkdir = ocfs2_mkdir, + .mknod = ocfs2_mknod, + .rename = ocfs2_rename, + .setattr = ocfs2_setattr, + .getattr = ocfs2_getattr, + .permission = ocfs2_permission, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = ocfs2_listxattr, + .removexattr = generic_removexattr, +}; diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h new file mode 100644 index 0000000..688aef6 --- /dev/null +++ b/fs/ocfs2/namei.h @@ -0,0 +1,39 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * namei.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_NAMEI_H +#define OCFS2_NAMEI_H + +extern const struct inode_operations ocfs2_dir_iops; + +struct dentry *ocfs2_get_parent(struct dentry *child); + +int ocfs2_orphan_del(struct ocfs2_super *osb, + handle_t *handle, + struct inode *orphan_dir_inode, + struct inode *inode, + struct buffer_head *orphan_dir_bh); + +#endif /* OCFS2_NAMEI_H */ diff --git a/fs/ocfs2/ocfs1_fs_compat.h b/fs/ocfs2/ocfs1_fs_compat.h new file mode 100644 index 0000000..dfb313b --- /dev/null +++ b/fs/ocfs2/ocfs1_fs_compat.h @@ -0,0 +1,109 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs1_fs_compat.h + * + * OCFS1 volume header definitions. OCFS2 creates valid but unmountable + * OCFS1 volume headers on the first two sectors of an OCFS2 volume. + * This allows an OCFS1 volume to see the partition and cleanly fail to + * mount it. + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef _OCFS1_FS_COMPAT_H +#define _OCFS1_FS_COMPAT_H + +#define OCFS1_MAX_VOL_SIGNATURE_LEN 128 +#define OCFS1_MAX_MOUNT_POINT_LEN 128 +#define OCFS1_MAX_VOL_ID_LENGTH 16 +#define OCFS1_MAX_VOL_LABEL_LEN 64 +#define OCFS1_MAX_CLUSTER_NAME_LEN 64 + +#define OCFS1_MAJOR_VERSION (2) +#define OCFS1_MINOR_VERSION (0) +#define OCFS1_VOLUME_SIGNATURE "OracleCFS" + +/* + * OCFS1 superblock. Lives at sector 0. + */ +struct ocfs1_vol_disk_hdr +{ +/*00*/ __u32 minor_version; + __u32 major_version; +/*08*/ __u8 signature[OCFS1_MAX_VOL_SIGNATURE_LEN]; +/*88*/ __u8 mount_point[OCFS1_MAX_MOUNT_POINT_LEN]; +/*108*/ __u64 serial_num; +/*110*/ __u64 device_size; + __u64 start_off; +/*120*/ __u64 bitmap_off; + __u64 publ_off; +/*130*/ __u64 vote_off; + __u64 root_bitmap_off; +/*140*/ __u64 data_start_off; + __u64 root_bitmap_size; +/*150*/ __u64 root_off; + __u64 root_size; +/*160*/ __u64 cluster_size; + __u64 num_nodes; +/*170*/ __u64 num_clusters; + __u64 dir_node_size; +/*180*/ __u64 file_node_size; + __u64 internal_off; +/*190*/ __u64 node_cfg_off; + __u64 node_cfg_size; +/*1A0*/ __u64 new_cfg_off; + __u32 prot_bits; + __s32 excl_mount; +/*1B0*/ +}; + + +struct ocfs1_disk_lock +{ +/*00*/ __u32 curr_master; + __u8 file_lock; + __u8 compat_pad[3]; /* Not in original definition. Used to + make the already existing alignment + explicit */ + __u64 last_write_time; +/*10*/ __u64 last_read_time; + __u32 writer_node_num; + __u32 reader_node_num; +/*20*/ __u64 oin_node_map; + __u64 dlock_seq_num; +/*30*/ +}; + +/* + * OCFS1 volume label. Lives at sector 1. + */ +struct ocfs1_vol_label +{ +/*00*/ struct ocfs1_disk_lock disk_lock; +/*30*/ __u8 label[OCFS1_MAX_VOL_LABEL_LEN]; +/*70*/ __u16 label_len; +/*72*/ __u8 vol_id[OCFS1_MAX_VOL_ID_LENGTH]; +/*82*/ __u16 vol_id_len; +/*84*/ __u8 cluster_name[OCFS1_MAX_CLUSTER_NAME_LEN]; +/*A4*/ __u16 cluster_name_len; +/*A6*/ +}; + + +#endif /* _OCFS1_FS_COMPAT_H */ + diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h new file mode 100644 index 0000000..3fed9e3 --- /dev/null +++ b/fs/ocfs2/ocfs2.h @@ -0,0 +1,636 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs2.h + * + * Defines macros and structures used in OCFS2 + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_H +#define OCFS2_H + +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/workqueue.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#ifndef CONFIG_OCFS2_COMPAT_JBD +# include <linux/jbd2.h> +#else +# include <linux/jbd.h> +# include "ocfs2_jbd_compat.h" +#endif + +/* For union ocfs2_dlm_lksb */ +#include "stackglue.h" + +#include "ocfs2_fs.h" +#include "ocfs2_lockid.h" + +/* Most user visible OCFS2 inodes will have very few pieces of + * metadata, but larger files (including bitmaps, etc) must be taken + * into account when designing an access scheme. We allow a small + * amount of inlined blocks to be stored on an array and grow the + * structure into a rb tree when necessary. */ +#define OCFS2_INODE_MAX_CACHE_ARRAY 2 + +struct ocfs2_caching_info { + unsigned int ci_num_cached; + union { + sector_t ci_array[OCFS2_INODE_MAX_CACHE_ARRAY]; + struct rb_root ci_tree; + } ci_cache; +}; + +/* this limits us to 256 nodes + * if we need more, we can do a kmalloc for the map */ +#define OCFS2_NODE_MAP_MAX_NODES 256 +struct ocfs2_node_map { + u16 num_nodes; + unsigned long map[BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES)]; +}; + +enum ocfs2_ast_action { + OCFS2_AST_INVALID = 0, + OCFS2_AST_ATTACH, + OCFS2_AST_CONVERT, + OCFS2_AST_DOWNCONVERT, +}; + +/* actions for an unlockast function to take. */ +enum ocfs2_unlock_action { + OCFS2_UNLOCK_INVALID = 0, + OCFS2_UNLOCK_CANCEL_CONVERT, + OCFS2_UNLOCK_DROP_LOCK, +}; + +/* ocfs2_lock_res->l_flags flags. */ +#define OCFS2_LOCK_ATTACHED (0x00000001) /* we have initialized + * the lvb */ +#define OCFS2_LOCK_BUSY (0x00000002) /* we are currently in + * dlm_lock */ +#define OCFS2_LOCK_BLOCKED (0x00000004) /* blocked waiting to + * downconvert*/ +#define OCFS2_LOCK_LOCAL (0x00000008) /* newly created inode */ +#define OCFS2_LOCK_NEEDS_REFRESH (0x00000010) +#define OCFS2_LOCK_REFRESHING (0x00000020) +#define OCFS2_LOCK_INITIALIZED (0x00000040) /* track initialization + * for shutdown paths */ +#define OCFS2_LOCK_FREEING (0x00000080) /* help dlmglue track + * when to skip queueing + * a lock because it's + * about to be + * dropped. */ +#define OCFS2_LOCK_QUEUED (0x00000100) /* queued for downconvert */ +#define OCFS2_LOCK_NOCACHE (0x00000200) /* don't use a holder count */ +#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a + call to dlm_lock. Only + exists with BUSY set. */ + +struct ocfs2_lock_res_ops; + +typedef void (*ocfs2_lock_callback)(int status, unsigned long data); + +struct ocfs2_lock_res { + void *l_priv; + struct ocfs2_lock_res_ops *l_ops; + spinlock_t l_lock; + + struct list_head l_blocked_list; + struct list_head l_mask_waiters; + + enum ocfs2_lock_type l_type; + unsigned long l_flags; + char l_name[OCFS2_LOCK_ID_MAX_LEN]; + int l_level; + unsigned int l_ro_holders; + unsigned int l_ex_holders; + union ocfs2_dlm_lksb l_lksb; + + /* used from AST/BAST funcs. */ + enum ocfs2_ast_action l_action; + enum ocfs2_unlock_action l_unlock_action; + int l_requested; + int l_blocking; + unsigned int l_pending_gen; + + wait_queue_head_t l_event; + + struct list_head l_debug_list; + +#ifdef CONFIG_OCFS2_FS_STATS + unsigned long long l_lock_num_prmode; /* PR acquires */ + unsigned long long l_lock_num_exmode; /* EX acquires */ + unsigned int l_lock_num_prmode_failed; /* Failed PR gets */ + unsigned int l_lock_num_exmode_failed; /* Failed EX gets */ + unsigned long long l_lock_total_prmode; /* Tot wait for PR */ + unsigned long long l_lock_total_exmode; /* Tot wait for EX */ + unsigned int l_lock_max_prmode; /* Max wait for PR */ + unsigned int l_lock_max_exmode; /* Max wait for EX */ + unsigned int l_lock_refresh; /* Disk refreshes */ +#endif +}; + +struct ocfs2_dlm_debug { + struct kref d_refcnt; + struct dentry *d_locking_state; + struct list_head d_lockres_tracking; +}; + +enum ocfs2_vol_state +{ + VOLUME_INIT = 0, + VOLUME_MOUNTED, + VOLUME_DISMOUNTED, + VOLUME_DISABLED +}; + +struct ocfs2_alloc_stats +{ + atomic_t moves; + atomic_t local_data; + atomic_t bitmap_data; + atomic_t bg_allocs; + atomic_t bg_extends; +}; + +enum ocfs2_local_alloc_state +{ + OCFS2_LA_UNUSED = 0, /* Local alloc will never be used for + * this mountpoint. */ + OCFS2_LA_ENABLED, /* Local alloc is in use. */ + OCFS2_LA_THROTTLED, /* Local alloc is in use, but number + * of bits has been reduced. */ + OCFS2_LA_DISABLED /* Local alloc has temporarily been + * disabled. */ +}; + +enum ocfs2_mount_options +{ + OCFS2_MOUNT_HB_LOCAL = 1 << 0, /* Heartbeat started in local mode */ + OCFS2_MOUNT_BARRIER = 1 << 1, /* Use block barriers */ + OCFS2_MOUNT_NOINTR = 1 << 2, /* Don't catch signals */ + OCFS2_MOUNT_ERRORS_PANIC = 1 << 3, /* Panic on errors */ + OCFS2_MOUNT_DATA_WRITEBACK = 1 << 4, /* No data ordering */ + OCFS2_MOUNT_LOCALFLOCKS = 1 << 5, /* No cluster aware user file locks */ + OCFS2_MOUNT_NOUSERXATTR = 1 << 6, /* No user xattr */ + OCFS2_MOUNT_INODE64 = 1 << 7, /* Allow inode numbers > 2^32 */ +}; + +#define OCFS2_OSB_SOFT_RO 0x0001 +#define OCFS2_OSB_HARD_RO 0x0002 +#define OCFS2_OSB_ERROR_FS 0x0004 +#define OCFS2_DEFAULT_ATIME_QUANTUM 60 + +struct ocfs2_journal; +struct ocfs2_slot_info; +struct ocfs2_recovery_map; +struct ocfs2_super +{ + struct task_struct *commit_task; + struct super_block *sb; + struct inode *root_inode; + struct inode *sys_root_inode; + struct inode *system_inodes[NUM_SYSTEM_INODES]; + + struct ocfs2_slot_info *slot_info; + + u32 *slot_recovery_generations; + + spinlock_t node_map_lock; + + u64 root_blkno; + u64 system_dir_blkno; + u64 bitmap_blkno; + u32 bitmap_cpg; + u8 *uuid; + char *uuid_str; + u32 uuid_hash; + u8 *vol_label; + u64 first_cluster_group_blkno; + u32 fs_generation; + + u32 s_feature_compat; + u32 s_feature_incompat; + u32 s_feature_ro_compat; + + /* Protects s_next_generation, osb_flags and s_inode_steal_slot. + * Could protect more on osb as it's very short lived. + */ + spinlock_t osb_lock; + u32 s_next_generation; + unsigned long osb_flags; + s16 s_inode_steal_slot; + atomic_t s_num_inodes_stolen; + + unsigned long s_mount_opt; + unsigned int s_atime_quantum; + + unsigned int max_slots; + unsigned int node_num; + int slot_num; + int preferred_slot; + int s_sectsize_bits; + int s_clustersize; + int s_clustersize_bits; + unsigned int s_xattr_inline_size; + + atomic_t vol_state; + struct mutex recovery_lock; + struct ocfs2_recovery_map *recovery_map; + struct task_struct *recovery_thread_task; + int disable_recovery; + wait_queue_head_t checkpoint_event; + atomic_t needs_checkpoint; + struct ocfs2_journal *journal; + unsigned long osb_commit_interval; + + struct delayed_work la_enable_wq; + + /* + * Must hold local alloc i_mutex and osb->osb_lock to change + * local_alloc_bits. Reads can be done under either lock. + */ + unsigned int local_alloc_bits; + unsigned int local_alloc_default_bits; + + enum ocfs2_local_alloc_state local_alloc_state; /* protected + * by osb_lock */ + + struct buffer_head *local_alloc_bh; + + u64 la_last_gd; + +#ifdef CONFIG_OCFS2_FS_STATS + struct dentry *local_alloc_debug; + char *local_alloc_debug_buf; +#endif + + /* Next two fields are for local node slot recovery during + * mount. */ + int dirty; + struct ocfs2_dinode *local_alloc_copy; + + struct ocfs2_alloc_stats alloc_stats; + char dev_str[20]; /* "major,minor" of the device */ + + char osb_cluster_stack[OCFS2_STACK_LABEL_LEN + 1]; + struct ocfs2_cluster_connection *cconn; + struct ocfs2_lock_res osb_super_lockres; + struct ocfs2_lock_res osb_rename_lockres; + struct ocfs2_dlm_debug *osb_dlm_debug; + + struct dentry *osb_debug_root; + + wait_queue_head_t recovery_event; + + spinlock_t dc_task_lock; + struct task_struct *dc_task; + wait_queue_head_t dc_event; + unsigned long dc_wake_sequence; + unsigned long dc_work_sequence; + + /* + * Any thread can add locks to the list, but the downconvert + * thread is the only one allowed to remove locks. Any change + * to this rule requires updating + * ocfs2_downconvert_thread_do_work(). + */ + struct list_head blocked_lock_list; + unsigned long blocked_lock_count; + + wait_queue_head_t osb_mount_event; + + /* Truncate log info */ + struct inode *osb_tl_inode; + struct buffer_head *osb_tl_bh; + struct delayed_work osb_truncate_log_wq; + + struct ocfs2_node_map osb_recovering_orphan_dirs; + unsigned int *osb_orphan_wipes; + wait_queue_head_t osb_wipe_event; +}; + +#define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) + +static inline int ocfs2_should_order_data(struct inode *inode) +{ + if (!S_ISREG(inode->i_mode)) + return 0; + if (OCFS2_SB(inode->i_sb)->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) + return 0; + return 1; +} + +static inline int ocfs2_sparse_alloc(struct ocfs2_super *osb) +{ + if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC) + return 1; + return 0; +} + +static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb) +{ + /* + * Support for sparse files is a pre-requisite + */ + if (!ocfs2_sparse_alloc(osb)) + return 0; + + if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_UNWRITTEN) + return 1; + return 0; +} + +static inline int ocfs2_supports_inline_data(struct ocfs2_super *osb) +{ + if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INLINE_DATA) + return 1; + return 0; +} + +static inline int ocfs2_supports_xattr(struct ocfs2_super *osb) +{ + if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR) + return 1; + return 0; +} + +/* set / clear functions because cluster events can make these happen + * in parallel so we want the transitions to be atomic. this also + * means that any future flags osb_flags must be protected by spinlock + * too! */ +static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb, + unsigned long flag) +{ + spin_lock(&osb->osb_lock); + osb->osb_flags |= flag; + spin_unlock(&osb->osb_lock); +} + +static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb, + int hard) +{ + spin_lock(&osb->osb_lock); + osb->osb_flags &= ~(OCFS2_OSB_SOFT_RO|OCFS2_OSB_HARD_RO); + if (hard) + osb->osb_flags |= OCFS2_OSB_HARD_RO; + else + osb->osb_flags |= OCFS2_OSB_SOFT_RO; + spin_unlock(&osb->osb_lock); +} + +static inline int ocfs2_is_hard_readonly(struct ocfs2_super *osb) +{ + int ret; + + spin_lock(&osb->osb_lock); + ret = osb->osb_flags & OCFS2_OSB_HARD_RO; + spin_unlock(&osb->osb_lock); + + return ret; +} + +static inline int ocfs2_is_soft_readonly(struct ocfs2_super *osb) +{ + int ret; + + spin_lock(&osb->osb_lock); + ret = osb->osb_flags & OCFS2_OSB_SOFT_RO; + spin_unlock(&osb->osb_lock); + + return ret; +} + +static inline int ocfs2_userspace_stack(struct ocfs2_super *osb) +{ + return (osb->s_feature_incompat & + OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK); +} + +static inline int ocfs2_mount_local(struct ocfs2_super *osb) +{ + return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT); +} + +static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb) +{ + return (osb->s_feature_incompat & + OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP); +} + + +#define OCFS2_IS_VALID_DINODE(ptr) \ + (!strcmp((ptr)->i_signature, OCFS2_INODE_SIGNATURE)) + +#define OCFS2_RO_ON_INVALID_DINODE(__sb, __di) do { \ + typeof(__di) ____di = (__di); \ + ocfs2_error((__sb), \ + "Dinode # %llu has bad signature %.*s", \ + (unsigned long long)le64_to_cpu((____di)->i_blkno), 7, \ + (____di)->i_signature); \ +} while (0) + +#define OCFS2_IS_VALID_EXTENT_BLOCK(ptr) \ + (!strcmp((ptr)->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE)) + +#define OCFS2_RO_ON_INVALID_EXTENT_BLOCK(__sb, __eb) do { \ + typeof(__eb) ____eb = (__eb); \ + ocfs2_error((__sb), \ + "Extent Block # %llu has bad signature %.*s", \ + (unsigned long long)le64_to_cpu((____eb)->h_blkno), 7, \ + (____eb)->h_signature); \ +} while (0) + +#define OCFS2_IS_VALID_GROUP_DESC(ptr) \ + (!strcmp((ptr)->bg_signature, OCFS2_GROUP_DESC_SIGNATURE)) + +#define OCFS2_RO_ON_INVALID_GROUP_DESC(__sb, __gd) do { \ + typeof(__gd) ____gd = (__gd); \ + ocfs2_error((__sb), \ + "Group Descriptor # %llu has bad signature %.*s", \ + (unsigned long long)le64_to_cpu((____gd)->bg_blkno), 7, \ + (____gd)->bg_signature); \ +} while (0) + +#define OCFS2_IS_VALID_XATTR_BLOCK(ptr) \ + (!strcmp((ptr)->xb_signature, OCFS2_XATTR_BLOCK_SIGNATURE)) + +static inline unsigned long ino_from_blkno(struct super_block *sb, + u64 blkno) +{ + return (unsigned long)(blkno & (u64)ULONG_MAX); +} + +static inline u64 ocfs2_clusters_to_blocks(struct super_block *sb, + u32 clusters) +{ + int c_to_b_bits = OCFS2_SB(sb)->s_clustersize_bits - + sb->s_blocksize_bits; + + return (u64)clusters << c_to_b_bits; +} + +static inline u32 ocfs2_blocks_to_clusters(struct super_block *sb, + u64 blocks) +{ + int b_to_c_bits = OCFS2_SB(sb)->s_clustersize_bits - + sb->s_blocksize_bits; + + return (u32)(blocks >> b_to_c_bits); +} + +static inline unsigned int ocfs2_clusters_for_bytes(struct super_block *sb, + u64 bytes) +{ + int cl_bits = OCFS2_SB(sb)->s_clustersize_bits; + unsigned int clusters; + + bytes += OCFS2_SB(sb)->s_clustersize - 1; + /* OCFS2 just cannot have enough clusters to overflow this */ + clusters = (unsigned int)(bytes >> cl_bits); + + return clusters; +} + +static inline u64 ocfs2_blocks_for_bytes(struct super_block *sb, + u64 bytes) +{ + bytes += sb->s_blocksize - 1; + return bytes >> sb->s_blocksize_bits; +} + +static inline u64 ocfs2_clusters_to_bytes(struct super_block *sb, + u32 clusters) +{ + return (u64)clusters << OCFS2_SB(sb)->s_clustersize_bits; +} + +static inline u64 ocfs2_align_bytes_to_clusters(struct super_block *sb, + u64 bytes) +{ + int cl_bits = OCFS2_SB(sb)->s_clustersize_bits; + unsigned int clusters; + + clusters = ocfs2_clusters_for_bytes(sb, bytes); + return (u64)clusters << cl_bits; +} + +static inline u64 ocfs2_align_bytes_to_blocks(struct super_block *sb, + u64 bytes) +{ + u64 blocks; + + blocks = ocfs2_blocks_for_bytes(sb, bytes); + return blocks << sb->s_blocksize_bits; +} + +static inline unsigned long ocfs2_align_bytes_to_sectors(u64 bytes) +{ + return (unsigned long)((bytes + 511) >> 9); +} + +static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb, + unsigned long pg_index) +{ + u32 clusters = pg_index; + unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; + + if (unlikely(PAGE_CACHE_SHIFT > cbits)) + clusters = pg_index << (PAGE_CACHE_SHIFT - cbits); + else if (PAGE_CACHE_SHIFT < cbits) + clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT); + + return clusters; +} + +/* + * Find the 1st page index which covers the given clusters. + */ +static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb, + u32 clusters) +{ + unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; + pgoff_t index = clusters; + + if (PAGE_CACHE_SHIFT > cbits) { + index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits); + } else if (PAGE_CACHE_SHIFT < cbits) { + index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT); + } + + return index; +} + +static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb) +{ + unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; + unsigned int pages_per_cluster = 1; + + if (PAGE_CACHE_SHIFT < cbits) + pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT); + + return pages_per_cluster; +} + +static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb, + unsigned int megs) +{ + BUILD_BUG_ON(OCFS2_MAX_CLUSTERSIZE > 1048576); + + return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits); +} + +static inline void ocfs2_init_inode_steal_slot(struct ocfs2_super *osb) +{ + spin_lock(&osb->osb_lock); + osb->s_inode_steal_slot = OCFS2_INVALID_SLOT; + spin_unlock(&osb->osb_lock); + atomic_set(&osb->s_num_inodes_stolen, 0); +} + +static inline void ocfs2_set_inode_steal_slot(struct ocfs2_super *osb, + s16 slot) +{ + spin_lock(&osb->osb_lock); + osb->s_inode_steal_slot = slot; + spin_unlock(&osb->osb_lock); +} + +static inline s16 ocfs2_get_inode_steal_slot(struct ocfs2_super *osb) +{ + s16 slot; + + spin_lock(&osb->osb_lock); + slot = osb->s_inode_steal_slot; + spin_unlock(&osb->osb_lock); + + return slot; +} + +#define ocfs2_set_bit ext2_set_bit +#define ocfs2_clear_bit ext2_clear_bit +#define ocfs2_test_bit ext2_test_bit +#define ocfs2_find_next_zero_bit ext2_find_next_zero_bit +#endif /* OCFS2_H */ + diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h new file mode 100644 index 0000000..5e0c0d0 --- /dev/null +++ b/fs/ocfs2/ocfs2_fs.h @@ -0,0 +1,1131 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs2_fs.h + * + * On-disk structures for OCFS2. + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef _OCFS2_FS_H +#define _OCFS2_FS_H + +/* Version */ +#define OCFS2_MAJOR_REV_LEVEL 0 +#define OCFS2_MINOR_REV_LEVEL 90 + +/* + * An OCFS2 volume starts this way: + * Sector 0: Valid ocfs1_vol_disk_hdr that cleanly fails to mount OCFS. + * Sector 1: Valid ocfs1_vol_label that cleanly fails to mount OCFS. + * Block OCFS2_SUPER_BLOCK_BLKNO: OCFS2 superblock. + * + * All other structures are found from the superblock information. + * + * OCFS2_SUPER_BLOCK_BLKNO is in blocks, not sectors. eg, for a + * blocksize of 2K, it is 4096 bytes into disk. + */ +#define OCFS2_SUPER_BLOCK_BLKNO 2 + +/* + * Cluster size limits. The maximum is kept arbitrarily at 1 MB, and could + * grow if needed. + */ +#define OCFS2_MIN_CLUSTERSIZE 4096 +#define OCFS2_MAX_CLUSTERSIZE 1048576 + +/* + * Blocks cannot be bigger than clusters, so the maximum blocksize is the + * minimum cluster size. + */ +#define OCFS2_MIN_BLOCKSIZE 512 +#define OCFS2_MAX_BLOCKSIZE OCFS2_MIN_CLUSTERSIZE + +/* Filesystem magic number */ +#define OCFS2_SUPER_MAGIC 0x7461636f + +/* Object signatures */ +#define OCFS2_SUPER_BLOCK_SIGNATURE "OCFSV2" +#define OCFS2_INODE_SIGNATURE "INODE01" +#define OCFS2_EXTENT_BLOCK_SIGNATURE "EXBLK01" +#define OCFS2_GROUP_DESC_SIGNATURE "GROUP01" +#define OCFS2_XATTR_BLOCK_SIGNATURE "XATTR01" + +/* Compatibility flags */ +#define OCFS2_HAS_COMPAT_FEATURE(sb,mask) \ + ( OCFS2_SB(sb)->s_feature_compat & (mask) ) +#define OCFS2_HAS_RO_COMPAT_FEATURE(sb,mask) \ + ( OCFS2_SB(sb)->s_feature_ro_compat & (mask) ) +#define OCFS2_HAS_INCOMPAT_FEATURE(sb,mask) \ + ( OCFS2_SB(sb)->s_feature_incompat & (mask) ) +#define OCFS2_SET_COMPAT_FEATURE(sb,mask) \ + OCFS2_SB(sb)->s_feature_compat |= (mask) +#define OCFS2_SET_RO_COMPAT_FEATURE(sb,mask) \ + OCFS2_SB(sb)->s_feature_ro_compat |= (mask) +#define OCFS2_SET_INCOMPAT_FEATURE(sb,mask) \ + OCFS2_SB(sb)->s_feature_incompat |= (mask) +#define OCFS2_CLEAR_COMPAT_FEATURE(sb,mask) \ + OCFS2_SB(sb)->s_feature_compat &= ~(mask) +#define OCFS2_CLEAR_RO_COMPAT_FEATURE(sb,mask) \ + OCFS2_SB(sb)->s_feature_ro_compat &= ~(mask) +#define OCFS2_CLEAR_INCOMPAT_FEATURE(sb,mask) \ + OCFS2_SB(sb)->s_feature_incompat &= ~(mask) + +#define OCFS2_FEATURE_COMPAT_SUPP (OCFS2_FEATURE_COMPAT_BACKUP_SB \ + | OCFS2_FEATURE_COMPAT_JBD2_SB) +#define OCFS2_FEATURE_INCOMPAT_SUPP (OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT \ + | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC \ + | OCFS2_FEATURE_INCOMPAT_INLINE_DATA \ + | OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP \ + | OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK \ + | OCFS2_FEATURE_INCOMPAT_XATTR) +#define OCFS2_FEATURE_RO_COMPAT_SUPP OCFS2_FEATURE_RO_COMPAT_UNWRITTEN + +/* + * Heartbeat-only devices are missing journals and other files. The + * filesystem driver can't load them, but the library can. Never put + * this in OCFS2_FEATURE_INCOMPAT_SUPP, *ever*. + */ +#define OCFS2_FEATURE_INCOMPAT_HEARTBEAT_DEV 0x0002 + +/* + * tunefs sets this incompat flag before starting the resize and clears it + * at the end. This flag protects users from inadvertently mounting the fs + * after an aborted run without fsck-ing. + */ +#define OCFS2_FEATURE_INCOMPAT_RESIZE_INPROG 0x0004 + +/* Used to denote a non-clustered volume */ +#define OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT 0x0008 + +/* Support for sparse allocation in b-trees */ +#define OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC 0x0010 + +/* + * Tunefs sets this incompat flag before starting an operation which + * would require cleanup on abort. This is done to protect users from + * inadvertently mounting the fs after an aborted run without + * fsck-ing. + * + * s_tunefs_flags on the super block describes precisely which + * operations were in progress. + */ +#define OCFS2_FEATURE_INCOMPAT_TUNEFS_INPROG 0x0020 + +/* Support for data packed into inode blocks */ +#define OCFS2_FEATURE_INCOMPAT_INLINE_DATA 0x0040 + +/* + * Support for alternate, userspace cluster stacks. If set, the superblock + * field s_cluster_info contains a tag for the alternate stack in use as + * well as the name of the cluster being joined. + * mount.ocfs2 must pass in a matching stack name. + * + * If not set, the classic stack will be used. This is compatbile with + * all older versions. + */ +#define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080 + +/* Support for the extended slot map */ +#define OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP 0x100 + +/* Support for extended attributes */ +#define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200 + +/* + * backup superblock flag is used to indicate that this volume + * has backup superblocks. + */ +#define OCFS2_FEATURE_COMPAT_BACKUP_SB 0x0001 + +/* + * The filesystem will correctly handle journal feature bits. + */ +#define OCFS2_FEATURE_COMPAT_JBD2_SB 0x0002 + +/* + * Unwritten extents support. + */ +#define OCFS2_FEATURE_RO_COMPAT_UNWRITTEN 0x0001 + +/* The byte offset of the first backup block will be 1G. + * The following will be 4G, 16G, 64G, 256G and 1T. + */ +#define OCFS2_BACKUP_SB_START 1 << 30 + +/* the max backup superblock nums */ +#define OCFS2_MAX_BACKUP_SUPERBLOCKS 6 + +/* + * Flags on ocfs2_super_block.s_tunefs_flags + */ +#define OCFS2_TUNEFS_INPROG_REMOVE_SLOT 0x0001 /* Removing slots */ + +/* + * Flags on ocfs2_dinode.i_flags + */ +#define OCFS2_VALID_FL (0x00000001) /* Inode is valid */ +#define OCFS2_UNUSED2_FL (0x00000002) +#define OCFS2_ORPHANED_FL (0x00000004) /* On the orphan list */ +#define OCFS2_UNUSED3_FL (0x00000008) +/* System inode flags */ +#define OCFS2_SYSTEM_FL (0x00000010) /* System inode */ +#define OCFS2_SUPER_BLOCK_FL (0x00000020) /* Super block */ +#define OCFS2_LOCAL_ALLOC_FL (0x00000040) /* Slot local alloc bitmap */ +#define OCFS2_BITMAP_FL (0x00000080) /* Allocation bitmap */ +#define OCFS2_JOURNAL_FL (0x00000100) /* Slot local journal */ +#define OCFS2_HEARTBEAT_FL (0x00000200) /* Heartbeat area */ +#define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */ +#define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */ + +/* + * Flags on ocfs2_dinode.i_dyn_features + * + * These can change much more often than i_flags. When adding flags, + * keep in mind that i_dyn_features is only 16 bits wide. + */ +#define OCFS2_INLINE_DATA_FL (0x0001) /* Data stored in inode block */ +#define OCFS2_HAS_XATTR_FL (0x0002) +#define OCFS2_INLINE_XATTR_FL (0x0004) +#define OCFS2_INDEXED_DIR_FL (0x0008) + +/* Inode attributes, keep in sync with EXT2 */ +#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ +#define OCFS2_UNRM_FL (0x00000002) /* Undelete */ +#define OCFS2_COMPR_FL (0x00000004) /* Compress file */ +#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ +#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ +#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ +#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ +#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ +#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ + +#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ +#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ + +/* + * Extent record flags (e_node.leaf.flags) + */ +#define OCFS2_EXT_UNWRITTEN (0x01) /* Extent is allocated but + * unwritten */ + +/* + * ioctl commands + */ +#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) +#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) +#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int) +#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int) + +/* + * Space reservation / allocation / free ioctls and argument structure + * are designed to be compatible with XFS. + * + * ALLOCSP* and FREESP* are not and will never be supported, but are + * included here for completeness. + */ +struct ocfs2_space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; /* len == 0 means until end of file */ + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; /* reserve area */ +}; + +#define OCFS2_IOC_ALLOCSP _IOW ('X', 10, struct ocfs2_space_resv) +#define OCFS2_IOC_FREESP _IOW ('X', 11, struct ocfs2_space_resv) +#define OCFS2_IOC_RESVSP _IOW ('X', 40, struct ocfs2_space_resv) +#define OCFS2_IOC_UNRESVSP _IOW ('X', 41, struct ocfs2_space_resv) +#define OCFS2_IOC_ALLOCSP64 _IOW ('X', 36, struct ocfs2_space_resv) +#define OCFS2_IOC_FREESP64 _IOW ('X', 37, struct ocfs2_space_resv) +#define OCFS2_IOC_RESVSP64 _IOW ('X', 42, struct ocfs2_space_resv) +#define OCFS2_IOC_UNRESVSP64 _IOW ('X', 43, struct ocfs2_space_resv) + +/* Used to pass group descriptor data when online resize is done */ +struct ocfs2_new_group_input { + __u64 group; /* Group descriptor's blkno. */ + __u32 clusters; /* Total number of clusters in this group */ + __u32 frees; /* Total free clusters in this group */ + __u16 chain; /* Chain for this group */ + __u16 reserved1; + __u32 reserved2; +}; + +#define OCFS2_IOC_GROUP_EXTEND _IOW('o', 1, int) +#define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input) +#define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input) + +/* + * Journal Flags (ocfs2_dinode.id1.journal1.i_flags) + */ +#define OCFS2_JOURNAL_DIRTY_FL (0x00000001) /* Journal needs recovery */ + +/* + * superblock s_state flags + */ +#define OCFS2_ERROR_FS (0x00000001) /* FS saw errors */ + +/* Limit of space in ocfs2_dir_entry */ +#define OCFS2_MAX_FILENAME_LEN 255 + +/* Maximum slots on an ocfs2 file system */ +#define OCFS2_MAX_SLOTS 255 + +/* Slot map indicator for an empty slot */ +#define OCFS2_INVALID_SLOT -1 + +#define OCFS2_VOL_UUID_LEN 16 +#define OCFS2_MAX_VOL_LABEL_LEN 64 + +/* The alternate, userspace stack fields */ +#define OCFS2_STACK_LABEL_LEN 4 +#define OCFS2_CLUSTER_NAME_LEN 16 + +/* Journal limits (in bytes) */ +#define OCFS2_MIN_JOURNAL_SIZE (4 * 1024 * 1024) + +/* + * Default local alloc size (in megabytes) + * + * The value chosen should be such that most allocations, including new + * block groups, use local alloc. + */ +#define OCFS2_DEFAULT_LOCAL_ALLOC_SIZE 8 + +/* + * Inline extended attribute size (in bytes) + * The value chosen should be aligned to 16 byte boundaries. + */ +#define OCFS2_MIN_XATTR_INLINE_SIZE 256 + +struct ocfs2_system_inode_info { + char *si_name; + int si_iflags; + int si_mode; +}; + +/* System file index */ +enum { + BAD_BLOCK_SYSTEM_INODE = 0, + GLOBAL_INODE_ALLOC_SYSTEM_INODE, + SLOT_MAP_SYSTEM_INODE, +#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE + HEARTBEAT_SYSTEM_INODE, + GLOBAL_BITMAP_SYSTEM_INODE, +#define OCFS2_LAST_GLOBAL_SYSTEM_INODE GLOBAL_BITMAP_SYSTEM_INODE + ORPHAN_DIR_SYSTEM_INODE, + EXTENT_ALLOC_SYSTEM_INODE, + INODE_ALLOC_SYSTEM_INODE, + JOURNAL_SYSTEM_INODE, + LOCAL_ALLOC_SYSTEM_INODE, + TRUNCATE_LOG_SYSTEM_INODE, + NUM_SYSTEM_INODES +}; + +static struct ocfs2_system_inode_info ocfs2_system_inodes[NUM_SYSTEM_INODES] = { + /* Global system inodes (single copy) */ + /* The first two are only used from userspace mfks/tunefs */ + [BAD_BLOCK_SYSTEM_INODE] = { "bad_blocks", 0, S_IFREG | 0644 }, + [GLOBAL_INODE_ALLOC_SYSTEM_INODE] = { "global_inode_alloc", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 }, + + /* These are used by the running filesystem */ + [SLOT_MAP_SYSTEM_INODE] = { "slot_map", 0, S_IFREG | 0644 }, + [HEARTBEAT_SYSTEM_INODE] = { "heartbeat", OCFS2_HEARTBEAT_FL, S_IFREG | 0644 }, + [GLOBAL_BITMAP_SYSTEM_INODE] = { "global_bitmap", 0, S_IFREG | 0644 }, + + /* Slot-specific system inodes (one copy per slot) */ + [ORPHAN_DIR_SYSTEM_INODE] = { "orphan_dir:%04d", 0, S_IFDIR | 0755 }, + [EXTENT_ALLOC_SYSTEM_INODE] = { "extent_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 }, + [INODE_ALLOC_SYSTEM_INODE] = { "inode_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 }, + [JOURNAL_SYSTEM_INODE] = { "journal:%04d", OCFS2_JOURNAL_FL, S_IFREG | 0644 }, + [LOCAL_ALLOC_SYSTEM_INODE] = { "local_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_LOCAL_ALLOC_FL, S_IFREG | 0644 }, + [TRUNCATE_LOG_SYSTEM_INODE] = { "truncate_log:%04d", OCFS2_DEALLOC_FL, S_IFREG | 0644 } +}; + +/* Parameter passed from mount.ocfs2 to module */ +#define OCFS2_HB_NONE "heartbeat=none" +#define OCFS2_HB_LOCAL "heartbeat=local" + +/* + * OCFS2 directory file types. Only the low 3 bits are used. The + * other bits are reserved for now. + */ +#define OCFS2_FT_UNKNOWN 0 +#define OCFS2_FT_REG_FILE 1 +#define OCFS2_FT_DIR 2 +#define OCFS2_FT_CHRDEV 3 +#define OCFS2_FT_BLKDEV 4 +#define OCFS2_FT_FIFO 5 +#define OCFS2_FT_SOCK 6 +#define OCFS2_FT_SYMLINK 7 + +#define OCFS2_FT_MAX 8 + +/* + * OCFS2_DIR_PAD defines the directory entries boundaries + * + * NOTE: It must be a multiple of 4 + */ +#define OCFS2_DIR_PAD 4 +#define OCFS2_DIR_ROUND (OCFS2_DIR_PAD - 1) +#define OCFS2_DIR_MEMBER_LEN offsetof(struct ocfs2_dir_entry, name) +#define OCFS2_DIR_REC_LEN(name_len) (((name_len) + OCFS2_DIR_MEMBER_LEN + \ + OCFS2_DIR_ROUND) & \ + ~OCFS2_DIR_ROUND) + +#define OCFS2_LINK_MAX 32000 + +#define S_SHIFT 12 +static unsigned char ocfs2_type_by_mode[S_IFMT >> S_SHIFT] = { + [S_IFREG >> S_SHIFT] = OCFS2_FT_REG_FILE, + [S_IFDIR >> S_SHIFT] = OCFS2_FT_DIR, + [S_IFCHR >> S_SHIFT] = OCFS2_FT_CHRDEV, + [S_IFBLK >> S_SHIFT] = OCFS2_FT_BLKDEV, + [S_IFIFO >> S_SHIFT] = OCFS2_FT_FIFO, + [S_IFSOCK >> S_SHIFT] = OCFS2_FT_SOCK, + [S_IFLNK >> S_SHIFT] = OCFS2_FT_SYMLINK, +}; + + +/* + * Convenience casts + */ +#define OCFS2_RAW_SB(dinode) (&((dinode)->id2.i_super)) + +/* + * On disk extent record for OCFS2 + * It describes a range of clusters on disk. + * + * Length fields are divided into interior and leaf node versions. + * This leaves room for a flags field (OCFS2_EXT_*) in the leaf nodes. + */ +struct ocfs2_extent_rec { +/*00*/ __le32 e_cpos; /* Offset into the file, in clusters */ + union { + __le32 e_int_clusters; /* Clusters covered by all children */ + struct { + __le16 e_leaf_clusters; /* Clusters covered by this + extent */ + __u8 e_reserved1; + __u8 e_flags; /* Extent flags */ + }; + }; + __le64 e_blkno; /* Physical disk offset, in blocks */ +/*10*/ +}; + +struct ocfs2_chain_rec { + __le32 c_free; /* Number of free bits in this chain. */ + __le32 c_total; /* Number of total bits in this chain */ + __le64 c_blkno; /* Physical disk offset (blocks) of 1st group */ +}; + +struct ocfs2_truncate_rec { + __le32 t_start; /* 1st cluster in this log */ + __le32 t_clusters; /* Number of total clusters covered */ +}; + +/* + * On disk extent list for OCFS2 (node in the tree). Note that this + * is contained inside ocfs2_dinode or ocfs2_extent_block, so the + * offsets are relative to ocfs2_dinode.id2.i_list or + * ocfs2_extent_block.h_list, respectively. + */ +struct ocfs2_extent_list { +/*00*/ __le16 l_tree_depth; /* Extent tree depth from this + point. 0 means data extents + hang directly off this + header (a leaf) + NOTE: The high 8 bits cannot be + used - tree_depth is never that big. + */ + __le16 l_count; /* Number of extent records */ + __le16 l_next_free_rec; /* Next unused extent slot */ + __le16 l_reserved1; + __le64 l_reserved2; /* Pad to + sizeof(ocfs2_extent_rec) */ +/*10*/ struct ocfs2_extent_rec l_recs[0]; /* Extent records */ +}; + +/* + * On disk allocation chain list for OCFS2. Note that this is + * contained inside ocfs2_dinode, so the offsets are relative to + * ocfs2_dinode.id2.i_chain. + */ +struct ocfs2_chain_list { +/*00*/ __le16 cl_cpg; /* Clusters per Block Group */ + __le16 cl_bpc; /* Bits per cluster */ + __le16 cl_count; /* Total chains in this list */ + __le16 cl_next_free_rec; /* Next unused chain slot */ + __le64 cl_reserved1; +/*10*/ struct ocfs2_chain_rec cl_recs[0]; /* Chain records */ +}; + +/* + * On disk deallocation log for OCFS2. Note that this is + * contained inside ocfs2_dinode, so the offsets are relative to + * ocfs2_dinode.id2.i_dealloc. + */ +struct ocfs2_truncate_log { +/*00*/ __le16 tl_count; /* Total records in this log */ + __le16 tl_used; /* Number of records in use */ + __le32 tl_reserved1; +/*08*/ struct ocfs2_truncate_rec tl_recs[0]; /* Truncate records */ +}; + +/* + * On disk extent block (indirect block) for OCFS2 + */ +struct ocfs2_extent_block +{ +/*00*/ __u8 h_signature[8]; /* Signature for verification */ + __le64 h_reserved1; +/*10*/ __le16 h_suballoc_slot; /* Slot suballocator this + extent_header belongs to */ + __le16 h_suballoc_bit; /* Bit offset in suballocator + block group */ + __le32 h_fs_generation; /* Must match super block */ + __le64 h_blkno; /* Offset on disk, in blocks */ +/*20*/ __le64 h_reserved3; + __le64 h_next_leaf_blk; /* Offset on disk, in blocks, + of next leaf header pointing + to data */ +/*30*/ struct ocfs2_extent_list h_list; /* Extent record list */ +/* Actual on-disk size is one block */ +}; + +/* + * On disk slot map for OCFS2. This defines the contents of the "slot_map" + * system file. A slot is valid if it contains a node number >= 0. The + * value -1 (0xFFFF) is OCFS2_INVALID_SLOT. This marks a slot empty. + */ +struct ocfs2_slot_map { +/*00*/ __le16 sm_slots[0]; +/* + * Actual on-disk size is one block. OCFS2_MAX_SLOTS is 255, + * 255 * sizeof(__le16) == 512B, within the 512B block minimum blocksize. + */ +}; + +struct ocfs2_extended_slot { +/*00*/ __u8 es_valid; + __u8 es_reserved1[3]; + __le32 es_node_num; +/*10*/ +}; + +/* + * The extended slot map, used when OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP + * is set. It separates out the valid marker from the node number, and + * has room to grow. Unlike the old slot map, this format is defined by + * i_size. + */ +struct ocfs2_slot_map_extended { +/*00*/ struct ocfs2_extended_slot se_slots[0]; +/* + * Actual size is i_size of the slot_map system file. It should + * match s_max_slots * sizeof(struct ocfs2_extended_slot) + */ +}; + +struct ocfs2_cluster_info { +/*00*/ __u8 ci_stack[OCFS2_STACK_LABEL_LEN]; + __le32 ci_reserved; +/*08*/ __u8 ci_cluster[OCFS2_CLUSTER_NAME_LEN]; +/*18*/ +}; + +/* + * On disk superblock for OCFS2 + * Note that it is contained inside an ocfs2_dinode, so all offsets + * are relative to the start of ocfs2_dinode.id2. + */ +struct ocfs2_super_block { +/*00*/ __le16 s_major_rev_level; + __le16 s_minor_rev_level; + __le16 s_mnt_count; + __le16 s_max_mnt_count; + __le16 s_state; /* File system state */ + __le16 s_errors; /* Behaviour when detecting errors */ + __le32 s_checkinterval; /* Max time between checks */ +/*10*/ __le64 s_lastcheck; /* Time of last check */ + __le32 s_creator_os; /* OS */ + __le32 s_feature_compat; /* Compatible feature set */ +/*20*/ __le32 s_feature_incompat; /* Incompatible feature set */ + __le32 s_feature_ro_compat; /* Readonly-compatible feature set */ + __le64 s_root_blkno; /* Offset, in blocks, of root directory + dinode */ +/*30*/ __le64 s_system_dir_blkno; /* Offset, in blocks, of system + directory dinode */ + __le32 s_blocksize_bits; /* Blocksize for this fs */ + __le32 s_clustersize_bits; /* Clustersize for this fs */ +/*40*/ __le16 s_max_slots; /* Max number of simultaneous mounts + before tunefs required */ + __le16 s_tunefs_flag; + __le32 s_uuid_hash; /* hash value of uuid */ + __le64 s_first_cluster_group; /* Block offset of 1st cluster + * group header */ +/*50*/ __u8 s_label[OCFS2_MAX_VOL_LABEL_LEN]; /* Label for mounting, etc. */ +/*90*/ __u8 s_uuid[OCFS2_VOL_UUID_LEN]; /* 128-bit uuid */ +/*A0*/ struct ocfs2_cluster_info s_cluster_info; /* Selected userspace + stack. Only valid + with INCOMPAT flag. */ +/*B8*/ __le16 s_xattr_inline_size; /* extended attribute inline size + for this fs*/ + __le16 s_reserved0; + __le32 s_reserved1; +/*C0*/ __le64 s_reserved2[16]; /* Fill out superblock */ +/*140*/ + + /* + * NOTE: As stated above, all offsets are relative to + * ocfs2_dinode.id2, which is at 0xC0 in the inode. + * 0xC0 + 0x140 = 0x200 or 512 bytes. A superblock must fit within + * our smallest blocksize, which is 512 bytes. To ensure this, + * we reserve the space in s_reserved2. Anything past s_reserved2 + * will not be available on the smallest blocksize. + */ +}; + +/* + * Local allocation bitmap for OCFS2 slots + * Note that it exists inside an ocfs2_dinode, so all offsets are + * relative to the start of ocfs2_dinode.id2. + */ +struct ocfs2_local_alloc +{ +/*00*/ __le32 la_bm_off; /* Starting bit offset in main bitmap */ + __le16 la_size; /* Size of included bitmap, in bytes */ + __le16 la_reserved1; + __le64 la_reserved2; +/*10*/ __u8 la_bitmap[0]; +}; + +/* + * Data-in-inode header. This is only used if i_dyn_features has + * OCFS2_INLINE_DATA_FL set. + */ +struct ocfs2_inline_data +{ +/*00*/ __le16 id_count; /* Number of bytes that can be used + * for data, starting at id_data */ + __le16 id_reserved0; + __le32 id_reserved1; + __u8 id_data[0]; /* Start of user data */ +}; + +/* + * On disk inode for OCFS2 + */ +struct ocfs2_dinode { +/*00*/ __u8 i_signature[8]; /* Signature for validation */ + __le32 i_generation; /* Generation number */ + __le16 i_suballoc_slot; /* Slot suballocator this inode + belongs to */ + __le16 i_suballoc_bit; /* Bit offset in suballocator + block group */ +/*10*/ __le16 i_reserved0; + __le16 i_xattr_inline_size; + __le32 i_clusters; /* Cluster count */ + __le32 i_uid; /* Owner UID */ + __le32 i_gid; /* Owning GID */ +/*20*/ __le64 i_size; /* Size in bytes */ + __le16 i_mode; /* File mode */ + __le16 i_links_count; /* Links count */ + __le32 i_flags; /* File flags */ +/*30*/ __le64 i_atime; /* Access time */ + __le64 i_ctime; /* Creation time */ +/*40*/ __le64 i_mtime; /* Modification time */ + __le64 i_dtime; /* Deletion time */ +/*50*/ __le64 i_blkno; /* Offset on disk, in blocks */ + __le64 i_last_eb_blk; /* Pointer to last extent + block */ +/*60*/ __le32 i_fs_generation; /* Generation per fs-instance */ + __le32 i_atime_nsec; + __le32 i_ctime_nsec; + __le32 i_mtime_nsec; +/*70*/ __le32 i_attr; + __le16 i_orphaned_slot; /* Only valid when OCFS2_ORPHANED_FL + was set in i_flags */ + __le16 i_dyn_features; + __le64 i_xattr_loc; +/*80*/ __le64 i_reserved2[7]; +/*B8*/ union { + __le64 i_pad1; /* Generic way to refer to this + 64bit union */ + struct { + __le64 i_rdev; /* Device number */ + } dev1; + struct { /* Info for bitmap system + inodes */ + __le32 i_used; /* Bits (ie, clusters) used */ + __le32 i_total; /* Total bits (clusters) + available */ + } bitmap1; + struct { /* Info for journal system + inodes */ + __le32 ij_flags; /* Mounted, version, etc. */ + __le32 ij_recovery_generation; /* Incremented when the + journal is recovered + after an unclean + shutdown */ + } journal1; + } id1; /* Inode type dependant 1 */ +/*C0*/ union { + struct ocfs2_super_block i_super; + struct ocfs2_local_alloc i_lab; + struct ocfs2_chain_list i_chain; + struct ocfs2_extent_list i_list; + struct ocfs2_truncate_log i_dealloc; + struct ocfs2_inline_data i_data; + __u8 i_symlink[0]; + } id2; +/* Actual on-disk size is one block */ +}; + +/* + * On-disk directory entry structure for OCFS2 + * + * Packed as this structure could be accessed unaligned on 64-bit platforms + */ +struct ocfs2_dir_entry { +/*00*/ __le64 inode; /* Inode number */ + __le16 rec_len; /* Directory entry length */ + __u8 name_len; /* Name length */ + __u8 file_type; +/*0C*/ char name[OCFS2_MAX_FILENAME_LEN]; /* File name */ +/* Actual on-disk length specified by rec_len */ +} __attribute__ ((packed)); + +/* + * On disk allocator group structure for OCFS2 + */ +struct ocfs2_group_desc +{ +/*00*/ __u8 bg_signature[8]; /* Signature for validation */ + __le16 bg_size; /* Size of included bitmap in + bytes. */ + __le16 bg_bits; /* Bits represented by this + group. */ + __le16 bg_free_bits_count; /* Free bits count */ + __le16 bg_chain; /* What chain I am in. */ +/*10*/ __le32 bg_generation; + __le32 bg_reserved1; + __le64 bg_next_group; /* Next group in my list, in + blocks */ +/*20*/ __le64 bg_parent_dinode; /* dinode which owns me, in + blocks */ + __le64 bg_blkno; /* Offset on disk, in blocks */ +/*30*/ __le64 bg_reserved2[2]; +/*40*/ __u8 bg_bitmap[0]; +}; + +/* + * On disk extended attribute structure for OCFS2. + */ + +/* + * ocfs2_xattr_entry indicates one extend attribute. + * + * Note that it can be stored in inode, one block or one xattr bucket. + */ +struct ocfs2_xattr_entry { + __le32 xe_name_hash; /* hash value of xattr prefix+suffix. */ + __le16 xe_name_offset; /* byte offset from the 1st entry in the + local xattr storage(inode, xattr block or + xattr bucket). */ + __u8 xe_name_len; /* xattr name len, does't include prefix. */ + __u8 xe_type; /* the low 7 bits indicate the name prefix + * type and the highest bit indicates whether + * the EA is stored in the local storage. */ + __le64 xe_value_size; /* real xattr value length. */ +}; + +/* + * On disk structure for xattr header. + * + * One ocfs2_xattr_header describes how many ocfs2_xattr_entry records in + * the local xattr storage. + */ +struct ocfs2_xattr_header { + __le16 xh_count; /* contains the count of how + many records are in the + local xattr storage. */ + __le16 xh_free_start; /* current offset for storing + xattr. */ + __le16 xh_name_value_len; /* total length of name/value + length in this bucket. */ + __le16 xh_num_buckets; /* Number of xattr buckets + in this extent record, + only valid in the first + bucket. */ + __le64 xh_csum; + struct ocfs2_xattr_entry xh_entries[0]; /* xattr entry list. */ +}; + +/* + * On disk structure for xattr value root. + * + * When an xattr's value is large enough, it is stored in an external + * b-tree like file data. The xattr value root points to this structure. + */ +struct ocfs2_xattr_value_root { +/*00*/ __le32 xr_clusters; /* clusters covered by xattr value. */ + __le32 xr_reserved0; + __le64 xr_last_eb_blk; /* Pointer to last extent block */ +/*10*/ struct ocfs2_extent_list xr_list; /* Extent record list */ +}; + +/* + * On disk structure for xattr tree root. + * + * It is used when there are too many extended attributes for one file. These + * attributes will be organized and stored in an indexed-btree. + */ +struct ocfs2_xattr_tree_root { +/*00*/ __le32 xt_clusters; /* clusters covered by xattr. */ + __le32 xt_reserved0; + __le64 xt_last_eb_blk; /* Pointer to last extent block */ +/*10*/ struct ocfs2_extent_list xt_list; /* Extent record list */ +}; + +#define OCFS2_XATTR_INDEXED 0x1 +#define OCFS2_HASH_SHIFT 5 +#define OCFS2_XATTR_ROUND 3 +#define OCFS2_XATTR_SIZE(size) (((size) + OCFS2_XATTR_ROUND) & \ + ~(OCFS2_XATTR_ROUND)) + +#define OCFS2_XATTR_BUCKET_SIZE 4096 +#define OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET (OCFS2_XATTR_BUCKET_SIZE \ + / OCFS2_MIN_BLOCKSIZE) + +/* + * On disk structure for xattr block. + */ +struct ocfs2_xattr_block { +/*00*/ __u8 xb_signature[8]; /* Signature for verification */ + __le16 xb_suballoc_slot; /* Slot suballocator this + block belongs to. */ + __le16 xb_suballoc_bit; /* Bit offset in suballocator + block group */ + __le32 xb_fs_generation; /* Must match super block */ +/*10*/ __le64 xb_blkno; /* Offset on disk, in blocks */ + __le64 xb_csum; +/*20*/ __le16 xb_flags; /* Indicates whether this block contains + real xattr or a xattr tree. */ + __le16 xb_reserved0; + __le32 xb_reserved1; + __le64 xb_reserved2; +/*30*/ union { + struct ocfs2_xattr_header xb_header; /* xattr header if this + block contains xattr */ + struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this + block cotains xattr + tree. */ + } xb_attrs; +}; + +#define OCFS2_XATTR_ENTRY_LOCAL 0x80 +#define OCFS2_XATTR_TYPE_MASK 0x7F +static inline void ocfs2_xattr_set_local(struct ocfs2_xattr_entry *xe, + int local) +{ + if (local) + xe->xe_type |= OCFS2_XATTR_ENTRY_LOCAL; + else + xe->xe_type &= ~OCFS2_XATTR_ENTRY_LOCAL; +} + +static inline int ocfs2_xattr_is_local(struct ocfs2_xattr_entry *xe) +{ + return xe->xe_type & OCFS2_XATTR_ENTRY_LOCAL; +} + +static inline void ocfs2_xattr_set_type(struct ocfs2_xattr_entry *xe, int type) +{ + xe->xe_type |= type & OCFS2_XATTR_TYPE_MASK; +} + +static inline int ocfs2_xattr_get_type(struct ocfs2_xattr_entry *xe) +{ + return xe->xe_type & OCFS2_XATTR_TYPE_MASK; +} + +#ifdef __KERNEL__ +static inline int ocfs2_fast_symlink_chars(struct super_block *sb) +{ + return sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_symlink); +} + +static inline int ocfs2_max_inline_data(struct super_block *sb) +{ + return sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_data.id_data); +} + +static inline int ocfs2_max_inline_data_with_xattr(struct super_block *sb, + struct ocfs2_dinode *di) +{ + unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size); + + if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL) + return sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_data.id_data) - + xattrsize; + else + return sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_data.id_data); +} + +static inline int ocfs2_extent_recs_per_inode(struct super_block *sb) +{ + int size; + + size = sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_list.l_recs); + + return size / sizeof(struct ocfs2_extent_rec); +} + +static inline int ocfs2_extent_recs_per_inode_with_xattr( + struct super_block *sb, + struct ocfs2_dinode *di) +{ + int size; + unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size); + + if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL) + size = sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_list.l_recs) - + xattrsize; + else + size = sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_list.l_recs); + + return size / sizeof(struct ocfs2_extent_rec); +} + +static inline int ocfs2_chain_recs_per_inode(struct super_block *sb) +{ + int size; + + size = sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_chain.cl_recs); + + return size / sizeof(struct ocfs2_chain_rec); +} + +static inline u16 ocfs2_extent_recs_per_eb(struct super_block *sb) +{ + int size; + + size = sb->s_blocksize - + offsetof(struct ocfs2_extent_block, h_list.l_recs); + + return size / sizeof(struct ocfs2_extent_rec); +} + +static inline u16 ocfs2_local_alloc_size(struct super_block *sb) +{ + u16 size; + + size = sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_lab.la_bitmap); + + return size; +} + +static inline int ocfs2_group_bitmap_size(struct super_block *sb) +{ + int size; + + size = sb->s_blocksize - + offsetof(struct ocfs2_group_desc, bg_bitmap); + + return size; +} + +static inline int ocfs2_truncate_recs_per_inode(struct super_block *sb) +{ + int size; + + size = sb->s_blocksize - + offsetof(struct ocfs2_dinode, id2.i_dealloc.tl_recs); + + return size / sizeof(struct ocfs2_truncate_rec); +} + +static inline u64 ocfs2_backup_super_blkno(struct super_block *sb, int index) +{ + u64 offset = OCFS2_BACKUP_SB_START; + + if (index >= 0 && index < OCFS2_MAX_BACKUP_SUPERBLOCKS) { + offset <<= (2 * index); + offset >>= sb->s_blocksize_bits; + return offset; + } + + return 0; + +} + +static inline u16 ocfs2_xattr_recs_per_xb(struct super_block *sb) +{ + int size; + + size = sb->s_blocksize - + offsetof(struct ocfs2_xattr_block, + xb_attrs.xb_root.xt_list.l_recs); + + return size / sizeof(struct ocfs2_extent_rec); +} +#else +static inline int ocfs2_fast_symlink_chars(int blocksize) +{ + return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); +} + +static inline int ocfs2_max_inline_data(int blocksize) +{ + return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); +} + +static inline int ocfs2_extent_recs_per_inode(int blocksize) +{ + int size; + + size = blocksize - + offsetof(struct ocfs2_dinode, id2.i_list.l_recs); + + return size / sizeof(struct ocfs2_extent_rec); +} + +static inline int ocfs2_chain_recs_per_inode(int blocksize) +{ + int size; + + size = blocksize - + offsetof(struct ocfs2_dinode, id2.i_chain.cl_recs); + + return size / sizeof(struct ocfs2_chain_rec); +} + +static inline int ocfs2_extent_recs_per_eb(int blocksize) +{ + int size; + + size = blocksize - + offsetof(struct ocfs2_extent_block, h_list.l_recs); + + return size / sizeof(struct ocfs2_extent_rec); +} + +static inline int ocfs2_local_alloc_size(int blocksize) +{ + int size; + + size = blocksize - + offsetof(struct ocfs2_dinode, id2.i_lab.la_bitmap); + + return size; +} + +static inline int ocfs2_group_bitmap_size(int blocksize) +{ + int size; + + size = blocksize - + offsetof(struct ocfs2_group_desc, bg_bitmap); + + return size; +} + +static inline int ocfs2_truncate_recs_per_inode(int blocksize) +{ + int size; + + size = blocksize - + offsetof(struct ocfs2_dinode, id2.i_dealloc.tl_recs); + + return size / sizeof(struct ocfs2_truncate_rec); +} + +static inline uint64_t ocfs2_backup_super_blkno(int blocksize, int index) +{ + uint64_t offset = OCFS2_BACKUP_SB_START; + + if (index >= 0 && index < OCFS2_MAX_BACKUP_SUPERBLOCKS) { + offset <<= (2 * index); + offset /= blocksize; + return offset; + } + + return 0; +} + +static inline int ocfs2_xattr_recs_per_xb(int blocksize) +{ + int size; + + size = blocksize - + offsetof(struct ocfs2_xattr_block, + xb_attrs.xb_root.xt_list.l_recs); + + return size / sizeof(struct ocfs2_extent_rec); +} +#endif /* __KERNEL__ */ + + +static inline int ocfs2_system_inode_is_global(int type) +{ + return ((type >= 0) && + (type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE)); +} + +static inline int ocfs2_sprintf_system_inode_name(char *buf, int len, + int type, int slot) +{ + int chars; + + /* + * Global system inodes can only have one copy. Everything + * after OCFS2_LAST_GLOBAL_SYSTEM_INODE in the system inode + * list has a copy per slot. + */ + if (type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE) + chars = snprintf(buf, len, "%s", + ocfs2_system_inodes[type].si_name); + else + chars = snprintf(buf, len, + ocfs2_system_inodes[type].si_name, + slot); + + return chars; +} + +static inline void ocfs2_set_de_type(struct ocfs2_dir_entry *de, + umode_t mode) +{ + de->file_type = ocfs2_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; +} + +#endif /* _OCFS2_FS_H */ + diff --git a/fs/ocfs2/ocfs2_jbd_compat.h b/fs/ocfs2/ocfs2_jbd_compat.h new file mode 100644 index 0000000..b91c78f --- /dev/null +++ b/fs/ocfs2/ocfs2_jbd_compat.h @@ -0,0 +1,82 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs2_jbd_compat.h + * + * Compatibility defines for JBD. + * + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef OCFS2_JBD_COMPAT_H +#define OCFS2_JBD_COMPAT_H + +#ifndef CONFIG_OCFS2_COMPAT_JBD +# error Should not have been included +#endif + +struct jbd2_inode { + unsigned int dummy; +}; + +#define JBD2_BARRIER JFS_BARRIER +#define JBD2_DEFAULT_MAX_COMMIT_AGE JBD_DEFAULT_MAX_COMMIT_AGE + +#define jbd2_journal_ack_err journal_ack_err +#define jbd2_journal_clear_err journal_clear_err +#define jbd2_journal_destroy journal_destroy +#define jbd2_journal_dirty_metadata journal_dirty_metadata +#define jbd2_journal_errno journal_errno +#define jbd2_journal_extend journal_extend +#define jbd2_journal_flush journal_flush +#define jbd2_journal_force_commit journal_force_commit +#define jbd2_journal_get_write_access journal_get_write_access +#define jbd2_journal_get_undo_access journal_get_undo_access +#define jbd2_journal_init_inode journal_init_inode +#define jbd2_journal_invalidatepage journal_invalidatepage +#define jbd2_journal_load journal_load +#define jbd2_journal_lock_updates journal_lock_updates +#define jbd2_journal_restart journal_restart +#define jbd2_journal_start journal_start +#define jbd2_journal_start_commit journal_start_commit +#define jbd2_journal_stop journal_stop +#define jbd2_journal_try_to_free_buffers journal_try_to_free_buffers +#define jbd2_journal_unlock_updates journal_unlock_updates +#define jbd2_journal_wipe journal_wipe +#define jbd2_log_wait_commit log_wait_commit + +static inline int jbd2_journal_file_inode(handle_t *handle, + struct jbd2_inode *inode) +{ + return 0; +} + +static inline int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, + loff_t new_size) +{ + return 0; +} + +static inline void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, + struct inode *inode) +{ + return; +} + +static inline void jbd2_journal_release_jbd_inode(journal_t *journal, + struct jbd2_inode *jinode) +{ + return; +} + + +#endif /* OCFS2_JBD_COMPAT_H */ diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h new file mode 100644 index 0000000..82c200f --- /dev/null +++ b/fs/ocfs2/ocfs2_lockid.h @@ -0,0 +1,108 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs2_lockid.h + * + * Defines OCFS2 lockid bits. + * + * Copyright (C) 2002, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_LOCKID_H +#define OCFS2_LOCKID_H + +/* lock ids are made up in the following manner: + * name[0] --> type + * name[1-6] --> 6 pad characters, reserved for now + * name[7-22] --> block number, expressed in hex as 16 chars + * name[23-30] --> i_generation, expressed in hex 8 chars + * name[31] --> '\0' */ +#define OCFS2_LOCK_ID_MAX_LEN 32 +#define OCFS2_LOCK_ID_PAD "000000" + +#define OCFS2_DENTRY_LOCK_INO_START 18 + +enum ocfs2_lock_type { + OCFS2_LOCK_TYPE_META = 0, + OCFS2_LOCK_TYPE_DATA, + OCFS2_LOCK_TYPE_SUPER, + OCFS2_LOCK_TYPE_RENAME, + OCFS2_LOCK_TYPE_RW, + OCFS2_LOCK_TYPE_DENTRY, + OCFS2_LOCK_TYPE_OPEN, + OCFS2_LOCK_TYPE_FLOCK, + OCFS2_NUM_LOCK_TYPES +}; + +static inline char ocfs2_lock_type_char(enum ocfs2_lock_type type) +{ + char c; + switch (type) { + case OCFS2_LOCK_TYPE_META: + c = 'M'; + break; + case OCFS2_LOCK_TYPE_DATA: + c = 'D'; + break; + case OCFS2_LOCK_TYPE_SUPER: + c = 'S'; + break; + case OCFS2_LOCK_TYPE_RENAME: + c = 'R'; + break; + case OCFS2_LOCK_TYPE_RW: + c = 'W'; + break; + case OCFS2_LOCK_TYPE_DENTRY: + c = 'N'; + break; + case OCFS2_LOCK_TYPE_OPEN: + c = 'O'; + break; + case OCFS2_LOCK_TYPE_FLOCK: + c = 'F'; + break; + default: + c = '\0'; + } + + return c; +} + +static char *ocfs2_lock_type_strings[] = { + [OCFS2_LOCK_TYPE_META] = "Meta", + [OCFS2_LOCK_TYPE_DATA] = "Data", + [OCFS2_LOCK_TYPE_SUPER] = "Super", + [OCFS2_LOCK_TYPE_RENAME] = "Rename", + /* Need to differntiate from [R]ename.. serializing writes is the + * important job it does, anyway. */ + [OCFS2_LOCK_TYPE_RW] = "Write/Read", + [OCFS2_LOCK_TYPE_DENTRY] = "Dentry", + [OCFS2_LOCK_TYPE_OPEN] = "Open", + [OCFS2_LOCK_TYPE_FLOCK] = "Flock", +}; + +static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type) +{ +#ifdef __KERNEL__ + BUG_ON(type >= OCFS2_NUM_LOCK_TYPES); +#endif + return ocfs2_lock_type_strings[type]; +} + +#endif /* OCFS2_LOCKID_H */ diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h new file mode 100644 index 0000000..82d5eea --- /dev/null +++ b/fs/ocfs2/ocfs2_lockingver.h @@ -0,0 +1,30 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ocfs2_lockingver.h + * + * Defines OCFS2 Locking version values. + * + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License, version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef OCFS2_LOCKINGVER_H +#define OCFS2_LOCKINGVER_H + +/* + * The protocol version for ocfs2 cluster locking. See dlmglue.c for + * more details. + */ +#define OCFS2_LOCKING_PROTOCOL_MAJOR 1 +#define OCFS2_LOCKING_PROTOCOL_MINOR 0 + +#endif /* OCFS2_LOCKINGVER_H */ diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c new file mode 100644 index 0000000..ffd48db --- /dev/null +++ b/fs/ocfs2/resize.c @@ -0,0 +1,633 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * resize.c + * + * volume resize. + * Inspired by ext3/resize.c. + * + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> + +#define MLOG_MASK_PREFIX ML_DISK_ALLOC +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dlmglue.h" +#include "inode.h" +#include "journal.h" +#include "super.h" +#include "sysfile.h" +#include "uptodate.h" + +#include "buffer_head_io.h" +#include "suballoc.h" +#include "resize.h" + +/* + * Check whether there are new backup superblocks exist + * in the last group. If there are some, mark them or clear + * them in the bitmap. + * + * Return how many backups we find in the last group. + */ +static u16 ocfs2_calc_new_backup_super(struct inode *inode, + struct ocfs2_group_desc *gd, + int new_clusters, + u32 first_new_cluster, + u16 cl_cpg, + int set) +{ + int i; + u16 backups = 0; + u32 cluster; + u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno); + + for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { + blkno = ocfs2_backup_super_blkno(inode->i_sb, i); + cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno); + + gd_blkno = ocfs2_which_cluster_group(inode, cluster); + if (gd_blkno < lgd_blkno) + continue; + else if (gd_blkno > lgd_blkno) + break; + + if (set) + ocfs2_set_bit(cluster % cl_cpg, + (unsigned long *)gd->bg_bitmap); + else + ocfs2_clear_bit(cluster % cl_cpg, + (unsigned long *)gd->bg_bitmap); + backups++; + } + + mlog_exit_void(); + return backups; +} + +static int ocfs2_update_last_group_and_inode(handle_t *handle, + struct inode *bm_inode, + struct buffer_head *bm_bh, + struct buffer_head *group_bh, + u32 first_new_cluster, + int new_clusters) +{ + int ret = 0; + struct ocfs2_super *osb = OCFS2_SB(bm_inode->i_sb); + struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bm_bh->b_data; + struct ocfs2_chain_list *cl = &fe->id2.i_chain; + struct ocfs2_chain_rec *cr; + struct ocfs2_group_desc *group; + u16 chain, num_bits, backups = 0; + u16 cl_bpc = le16_to_cpu(cl->cl_bpc); + u16 cl_cpg = le16_to_cpu(cl->cl_cpg); + + mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n", + new_clusters, first_new_cluster); + + ret = ocfs2_journal_access(handle, bm_inode, group_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + group = (struct ocfs2_group_desc *)group_bh->b_data; + + /* update the group first. */ + num_bits = new_clusters * cl_bpc; + le16_add_cpu(&group->bg_bits, num_bits); + le16_add_cpu(&group->bg_free_bits_count, num_bits); + + /* + * check whether there are some new backup superblocks exist in + * this group and update the group bitmap accordingly. + */ + if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, + OCFS2_FEATURE_COMPAT_BACKUP_SB)) { + backups = ocfs2_calc_new_backup_super(bm_inode, + group, + new_clusters, + first_new_cluster, + cl_cpg, 1); + le16_add_cpu(&group->bg_free_bits_count, -1 * backups); + } + + ret = ocfs2_journal_dirty(handle, group_bh); + if (ret < 0) { + mlog_errno(ret); + goto out_rollback; + } + + /* update the inode accordingly. */ + ret = ocfs2_journal_access(handle, bm_inode, bm_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out_rollback; + } + + chain = le16_to_cpu(group->bg_chain); + cr = (&cl->cl_recs[chain]); + le32_add_cpu(&cr->c_total, num_bits); + le32_add_cpu(&cr->c_free, num_bits); + le32_add_cpu(&fe->id1.bitmap1.i_total, num_bits); + le32_add_cpu(&fe->i_clusters, new_clusters); + + if (backups) { + le32_add_cpu(&cr->c_free, -1 * backups); + le32_add_cpu(&fe->id1.bitmap1.i_used, backups); + } + + spin_lock(&OCFS2_I(bm_inode)->ip_lock); + OCFS2_I(bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters); + le64_add_cpu(&fe->i_size, new_clusters << osb->s_clustersize_bits); + spin_unlock(&OCFS2_I(bm_inode)->ip_lock); + i_size_write(bm_inode, le64_to_cpu(fe->i_size)); + + ocfs2_journal_dirty(handle, bm_bh); + +out_rollback: + if (ret < 0) { + ocfs2_calc_new_backup_super(bm_inode, + group, + new_clusters, + first_new_cluster, + cl_cpg, 0); + le16_add_cpu(&group->bg_free_bits_count, backups); + le16_add_cpu(&group->bg_bits, -1 * num_bits); + le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits); + } +out: + mlog_exit(ret); + return ret; +} + +static int update_backups(struct inode * inode, u32 clusters, char *data) +{ + int i, ret = 0; + u32 cluster; + u64 blkno; + struct buffer_head *backup = NULL; + struct ocfs2_dinode *backup_di = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + /* calculate the real backups we need to update. */ + for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) { + blkno = ocfs2_backup_super_blkno(inode->i_sb, i); + cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno); + if (cluster > clusters) + break; + + ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup); + if (ret < 0) { + mlog_errno(ret); + break; + } + + memcpy(backup->b_data, data, inode->i_sb->s_blocksize); + + backup_di = (struct ocfs2_dinode *)backup->b_data; + backup_di->i_blkno = cpu_to_le64(blkno); + + ret = ocfs2_write_super_or_backup(osb, backup); + brelse(backup); + backup = NULL; + if (ret < 0) { + mlog_errno(ret); + break; + } + } + + return ret; +} + +static void ocfs2_update_super_and_backups(struct inode *inode, + int new_clusters) +{ + int ret; + u32 clusters = 0; + struct buffer_head *super_bh = NULL; + struct ocfs2_dinode *super_di = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + /* + * update the superblock last. + * It doesn't matter if the write failed. + */ + ret = ocfs2_read_blocks_sync(osb, OCFS2_SUPER_BLOCK_BLKNO, 1, + &super_bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + super_di = (struct ocfs2_dinode *)super_bh->b_data; + le32_add_cpu(&super_di->i_clusters, new_clusters); + clusters = le32_to_cpu(super_di->i_clusters); + + ret = ocfs2_write_super_or_backup(osb, super_bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_BACKUP_SB)) + ret = update_backups(inode, clusters, super_bh->b_data); + +out: + brelse(super_bh); + if (ret) + printk(KERN_WARNING "ocfs2: Failed to update super blocks on %s" + " during fs resize. This condition is not fatal," + " but fsck.ocfs2 should be run to fix it\n", + osb->dev_str); + return; +} + +/* + * Extend the filesystem to the new number of clusters specified. This entry + * point is only used to extend the current filesystem to the end of the last + * existing group. + */ +int ocfs2_group_extend(struct inode * inode, int new_clusters) +{ + int ret; + handle_t *handle; + struct buffer_head *main_bm_bh = NULL; + struct buffer_head *group_bh = NULL; + struct inode *main_bm_inode = NULL; + struct ocfs2_dinode *fe = NULL; + struct ocfs2_group_desc *group = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + u16 cl_bpc; + u32 first_new_cluster; + u64 lgd_blkno; + + mlog_entry_void(); + + if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + return -EROFS; + + if (new_clusters < 0) + return -EINVAL; + else if (new_clusters == 0) + return 0; + + main_bm_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_BITMAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT); + if (!main_bm_inode) { + ret = -EINVAL; + mlog_errno(ret); + goto out; + } + + mutex_lock(&main_bm_inode->i_mutex); + + ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); + if (ret < 0) { + mlog_errno(ret); + goto out_mutex; + } + + fe = (struct ocfs2_dinode *)main_bm_bh->b_data; + + if (le16_to_cpu(fe->id2.i_chain.cl_cpg) != + ocfs2_group_bitmap_size(osb->sb) * 8) { + mlog(ML_ERROR, "The disk is too old and small. " + "Force to do offline resize."); + ret = -EINVAL; + goto out_unlock; + } + + if (!OCFS2_IS_VALID_DINODE(fe)) { + OCFS2_RO_ON_INVALID_DINODE(main_bm_inode->i_sb, fe); + ret = -EIO; + goto out_unlock; + } + + first_new_cluster = le32_to_cpu(fe->i_clusters); + lgd_blkno = ocfs2_which_cluster_group(main_bm_inode, + first_new_cluster - 1); + + ret = ocfs2_read_block(main_bm_inode, lgd_blkno, &group_bh); + if (ret < 0) { + mlog_errno(ret); + goto out_unlock; + } + + group = (struct ocfs2_group_desc *)group_bh->b_data; + + ret = ocfs2_check_group_descriptor(inode->i_sb, fe, group); + if (ret) { + mlog_errno(ret); + goto out_unlock; + } + + cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc); + if (le16_to_cpu(group->bg_bits) / cl_bpc + new_clusters > + le16_to_cpu(fe->id2.i_chain.cl_cpg)) { + ret = -EINVAL; + goto out_unlock; + } + + mlog(0, "extend the last group at %llu, new clusters = %d\n", + (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters); + + handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS); + if (IS_ERR(handle)) { + mlog_errno(PTR_ERR(handle)); + ret = -EINVAL; + goto out_unlock; + } + + /* update the last group descriptor and inode. */ + ret = ocfs2_update_last_group_and_inode(handle, main_bm_inode, + main_bm_bh, group_bh, + first_new_cluster, + new_clusters); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ocfs2_update_super_and_backups(main_bm_inode, new_clusters); + +out_commit: + ocfs2_commit_trans(osb, handle); +out_unlock: + brelse(group_bh); + brelse(main_bm_bh); + + ocfs2_inode_unlock(main_bm_inode, 1); + +out_mutex: + mutex_unlock(&main_bm_inode->i_mutex); + iput(main_bm_inode); + +out: + mlog_exit_void(); + return ret; +} + +static int ocfs2_check_new_group(struct inode *inode, + struct ocfs2_dinode *di, + struct ocfs2_new_group_input *input, + struct buffer_head *group_bh) +{ + int ret; + struct ocfs2_group_desc *gd; + u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc); + unsigned int max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * + le16_to_cpu(di->id2.i_chain.cl_bpc); + + + gd = (struct ocfs2_group_desc *)group_bh->b_data; + + ret = -EIO; + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) + mlog(ML_ERROR, "Group descriptor # %llu isn't valid.\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno)); + else if (di->i_blkno != gd->bg_parent_dinode) + mlog(ML_ERROR, "Group descriptor # %llu has bad parent " + "pointer (%llu, expected %llu)\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), + (unsigned long long)le64_to_cpu(di->i_blkno)); + else if (le16_to_cpu(gd->bg_bits) > max_bits) + mlog(ML_ERROR, "Group descriptor # %llu has bit count of %u\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits)); + else if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) + mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but " + "claims that %u are free\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + le16_to_cpu(gd->bg_free_bits_count)); + else if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) + mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but " + "max bitmap bits of %u\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + 8 * le16_to_cpu(gd->bg_size)); + else if (le16_to_cpu(gd->bg_chain) != input->chain) + mlog(ML_ERROR, "Group descriptor # %llu has bad chain %u " + "while input has %u set.\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_chain), input->chain); + else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc) + mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but " + "input has %u clusters set\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), input->clusters); + else if (le16_to_cpu(gd->bg_free_bits_count) != input->frees * cl_bpc) + mlog(ML_ERROR, "Group descriptor # %llu has free bit count %u " + "but it should have %u set\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + input->frees * cl_bpc); + else + ret = 0; + + return ret; +} + +static int ocfs2_verify_group_and_input(struct inode *inode, + struct ocfs2_dinode *di, + struct ocfs2_new_group_input *input, + struct buffer_head *group_bh) +{ + u16 cl_count = le16_to_cpu(di->id2.i_chain.cl_count); + u16 cl_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg); + u16 next_free = le16_to_cpu(di->id2.i_chain.cl_next_free_rec); + u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group); + u32 total_clusters = le32_to_cpu(di->i_clusters); + int ret = -EINVAL; + + if (cluster < total_clusters) + mlog(ML_ERROR, "add a group which is in the current volume.\n"); + else if (input->chain >= cl_count) + mlog(ML_ERROR, "input chain exceeds the limit.\n"); + else if (next_free != cl_count && next_free != input->chain) + mlog(ML_ERROR, + "the add group should be in chain %u\n", next_free); + else if (total_clusters + input->clusters < total_clusters) + mlog(ML_ERROR, "add group's clusters overflow.\n"); + else if (input->clusters > cl_cpg) + mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n"); + else if (input->frees > input->clusters) + mlog(ML_ERROR, "the free cluster exceeds the total clusters\n"); + else if (total_clusters % cl_cpg != 0) + mlog(ML_ERROR, + "the last group isn't full. Use group extend first.\n"); + else if (input->group != ocfs2_which_cluster_group(inode, cluster)) + mlog(ML_ERROR, "group blkno is invalid\n"); + else if ((ret = ocfs2_check_new_group(inode, di, input, group_bh))) + mlog(ML_ERROR, "group descriptor check failed.\n"); + else + ret = 0; + + return ret; +} + +/* Add a new group descriptor to global_bitmap. */ +int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) +{ + int ret; + handle_t *handle; + struct buffer_head *main_bm_bh = NULL; + struct inode *main_bm_inode = NULL; + struct ocfs2_dinode *fe = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *group_bh = NULL; + struct ocfs2_group_desc *group = NULL; + struct ocfs2_chain_list *cl; + struct ocfs2_chain_rec *cr; + u16 cl_bpc; + + mlog_entry_void(); + + if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) + return -EROFS; + + main_bm_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_BITMAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT); + if (!main_bm_inode) { + ret = -EINVAL; + mlog_errno(ret); + goto out; + } + + mutex_lock(&main_bm_inode->i_mutex); + + ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); + if (ret < 0) { + mlog_errno(ret); + goto out_mutex; + } + + fe = (struct ocfs2_dinode *)main_bm_bh->b_data; + + if (le16_to_cpu(fe->id2.i_chain.cl_cpg) != + ocfs2_group_bitmap_size(osb->sb) * 8) { + mlog(ML_ERROR, "The disk is too old and small." + " Force to do offline resize."); + ret = -EINVAL; + goto out_unlock; + } + + ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh); + if (ret < 0) { + mlog(ML_ERROR, "Can't read the group descriptor # %llu " + "from the device.", (unsigned long long)input->group); + goto out_unlock; + } + + ocfs2_set_new_buffer_uptodate(inode, group_bh); + + ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh); + if (ret) { + mlog_errno(ret); + goto out_unlock; + } + + mlog(0, "Add a new group %llu in chain = %u, length = %u\n", + (unsigned long long)input->group, input->chain, input->clusters); + + handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS); + if (IS_ERR(handle)) { + mlog_errno(PTR_ERR(handle)); + ret = -EINVAL; + goto out_unlock; + } + + cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc); + cl = &fe->id2.i_chain; + cr = &cl->cl_recs[input->chain]; + + ret = ocfs2_journal_access(handle, main_bm_inode, group_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + + group = (struct ocfs2_group_desc *)group_bh->b_data; + group->bg_next_group = cr->c_blkno; + + ret = ocfs2_journal_dirty(handle, group_bh); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_journal_access(handle, main_bm_inode, main_bm_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + + if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) { + le16_add_cpu(&cl->cl_next_free_rec, 1); + memset(cr, 0, sizeof(struct ocfs2_chain_rec)); + } + + cr->c_blkno = cpu_to_le64(input->group); + le32_add_cpu(&cr->c_total, input->clusters * cl_bpc); + le32_add_cpu(&cr->c_free, input->frees * cl_bpc); + + le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc); + le32_add_cpu(&fe->id1.bitmap1.i_used, + (input->clusters - input->frees) * cl_bpc); + le32_add_cpu(&fe->i_clusters, input->clusters); + + ocfs2_journal_dirty(handle, main_bm_bh); + + spin_lock(&OCFS2_I(main_bm_inode)->ip_lock); + OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters); + le64_add_cpu(&fe->i_size, input->clusters << osb->s_clustersize_bits); + spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock); + i_size_write(main_bm_inode, le64_to_cpu(fe->i_size)); + + ocfs2_update_super_and_backups(main_bm_inode, input->clusters); + +out_commit: + ocfs2_commit_trans(osb, handle); +out_unlock: + brelse(group_bh); + brelse(main_bm_bh); + + ocfs2_inode_unlock(main_bm_inode, 1); + +out_mutex: + mutex_unlock(&main_bm_inode->i_mutex); + iput(main_bm_inode); + +out: + mlog_exit_void(); + return ret; +} diff --git a/fs/ocfs2/resize.h b/fs/ocfs2/resize.h new file mode 100644 index 0000000..f38841a --- /dev/null +++ b/fs/ocfs2/resize.h @@ -0,0 +1,32 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * resize.h + * + * Function prototypes + * + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_RESIZE_H +#define OCFS2_RESIZE_H + +int ocfs2_group_extend(struct inode * inode, int new_clusters); +int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input); + +#endif /* OCFS2_RESIZE_H */ diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c new file mode 100644 index 0000000..bdda2d8 --- /dev/null +++ b/fs/ocfs2/slot_map.c @@ -0,0 +1,544 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * slot_map.c + * + * + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> + +#define MLOG_MASK_PREFIX ML_SUPER +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "dlmglue.h" +#include "extent_map.h" +#include "heartbeat.h" +#include "inode.h" +#include "slot_map.h" +#include "super.h" +#include "sysfile.h" + +#include "buffer_head_io.h" + + +struct ocfs2_slot { + int sl_valid; + unsigned int sl_node_num; +}; + +struct ocfs2_slot_info { + int si_extended; + int si_slots_per_block; + struct inode *si_inode; + unsigned int si_blocks; + struct buffer_head **si_bh; + unsigned int si_num_slots; + struct ocfs2_slot *si_slots; +}; + + +static int __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si, + unsigned int node_num); + +static void ocfs2_invalidate_slot(struct ocfs2_slot_info *si, + int slot_num) +{ + BUG_ON((slot_num < 0) || (slot_num >= si->si_num_slots)); + si->si_slots[slot_num].sl_valid = 0; +} + +static void ocfs2_set_slot(struct ocfs2_slot_info *si, + int slot_num, unsigned int node_num) +{ + BUG_ON((slot_num < 0) || (slot_num >= si->si_num_slots)); + + si->si_slots[slot_num].sl_valid = 1; + si->si_slots[slot_num].sl_node_num = node_num; +} + +/* This version is for the extended slot map */ +static void ocfs2_update_slot_info_extended(struct ocfs2_slot_info *si) +{ + int b, i, slotno; + struct ocfs2_slot_map_extended *se; + + slotno = 0; + for (b = 0; b < si->si_blocks; b++) { + se = (struct ocfs2_slot_map_extended *)si->si_bh[b]->b_data; + for (i = 0; + (i < si->si_slots_per_block) && + (slotno < si->si_num_slots); + i++, slotno++) { + if (se->se_slots[i].es_valid) + ocfs2_set_slot(si, slotno, + le32_to_cpu(se->se_slots[i].es_node_num)); + else + ocfs2_invalidate_slot(si, slotno); + } + } +} + +/* + * Post the slot information on disk into our slot_info struct. + * Must be protected by osb_lock. + */ +static void ocfs2_update_slot_info_old(struct ocfs2_slot_info *si) +{ + int i; + struct ocfs2_slot_map *sm; + + sm = (struct ocfs2_slot_map *)si->si_bh[0]->b_data; + + for (i = 0; i < si->si_num_slots; i++) { + if (le16_to_cpu(sm->sm_slots[i]) == (u16)OCFS2_INVALID_SLOT) + ocfs2_invalidate_slot(si, i); + else + ocfs2_set_slot(si, i, le16_to_cpu(sm->sm_slots[i])); + } +} + +static void ocfs2_update_slot_info(struct ocfs2_slot_info *si) +{ + /* + * The slot data will have been refreshed when ocfs2_super_lock + * was taken. + */ + if (si->si_extended) + ocfs2_update_slot_info_extended(si); + else + ocfs2_update_slot_info_old(si); +} + +int ocfs2_refresh_slot_info(struct ocfs2_super *osb) +{ + int ret; + struct ocfs2_slot_info *si = osb->slot_info; + + if (si == NULL) + return 0; + + BUG_ON(si->si_blocks == 0); + BUG_ON(si->si_bh == NULL); + + mlog(0, "Refreshing slot map, reading %u block(s)\n", + si->si_blocks); + + /* + * We pass -1 as blocknr because we expect all of si->si_bh to + * be !NULL. Thus, ocfs2_read_blocks() will ignore blocknr. If + * this is not true, the read of -1 (UINT64_MAX) will fail. + */ + ret = ocfs2_read_blocks(si->si_inode, -1, si->si_blocks, si->si_bh, + OCFS2_BH_IGNORE_CACHE); + if (ret == 0) { + spin_lock(&osb->osb_lock); + ocfs2_update_slot_info(si); + spin_unlock(&osb->osb_lock); + } + + return ret; +} + +/* post the our slot info stuff into it's destination bh and write it + * out. */ +static void ocfs2_update_disk_slot_extended(struct ocfs2_slot_info *si, + int slot_num, + struct buffer_head **bh) +{ + int blkind = slot_num / si->si_slots_per_block; + int slotno = slot_num % si->si_slots_per_block; + struct ocfs2_slot_map_extended *se; + + BUG_ON(blkind >= si->si_blocks); + + se = (struct ocfs2_slot_map_extended *)si->si_bh[blkind]->b_data; + se->se_slots[slotno].es_valid = si->si_slots[slot_num].sl_valid; + if (si->si_slots[slot_num].sl_valid) + se->se_slots[slotno].es_node_num = + cpu_to_le32(si->si_slots[slot_num].sl_node_num); + *bh = si->si_bh[blkind]; +} + +static void ocfs2_update_disk_slot_old(struct ocfs2_slot_info *si, + int slot_num, + struct buffer_head **bh) +{ + int i; + struct ocfs2_slot_map *sm; + + sm = (struct ocfs2_slot_map *)si->si_bh[0]->b_data; + for (i = 0; i < si->si_num_slots; i++) { + if (si->si_slots[i].sl_valid) + sm->sm_slots[i] = + cpu_to_le16(si->si_slots[i].sl_node_num); + else + sm->sm_slots[i] = cpu_to_le16(OCFS2_INVALID_SLOT); + } + *bh = si->si_bh[0]; +} + +static int ocfs2_update_disk_slot(struct ocfs2_super *osb, + struct ocfs2_slot_info *si, + int slot_num) +{ + int status; + struct buffer_head *bh; + + spin_lock(&osb->osb_lock); + if (si->si_extended) + ocfs2_update_disk_slot_extended(si, slot_num, &bh); + else + ocfs2_update_disk_slot_old(si, slot_num, &bh); + spin_unlock(&osb->osb_lock); + + status = ocfs2_write_block(osb, bh, si->si_inode); + if (status < 0) + mlog_errno(status); + + return status; +} + +/* + * Calculate how many bytes are needed by the slot map. Returns + * an error if the slot map file is too small. + */ +static int ocfs2_slot_map_physical_size(struct ocfs2_super *osb, + struct inode *inode, + unsigned long long *bytes) +{ + unsigned long long bytes_needed; + + if (ocfs2_uses_extended_slot_map(osb)) { + bytes_needed = osb->max_slots * + sizeof(struct ocfs2_extended_slot); + } else { + bytes_needed = osb->max_slots * sizeof(__le16); + } + if (bytes_needed > i_size_read(inode)) { + mlog(ML_ERROR, + "Slot map file is too small! (size %llu, needed %llu)\n", + i_size_read(inode), bytes_needed); + return -ENOSPC; + } + + *bytes = bytes_needed; + return 0; +} + +/* try to find global node in the slot info. Returns -ENOENT + * if nothing is found. */ +static int __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si, + unsigned int node_num) +{ + int i, ret = -ENOENT; + + for(i = 0; i < si->si_num_slots; i++) { + if (si->si_slots[i].sl_valid && + (node_num == si->si_slots[i].sl_node_num)) { + ret = i; + break; + } + } + + return ret; +} + +static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si, + int preferred) +{ + int i, ret = -ENOSPC; + + if ((preferred >= 0) && (preferred < si->si_num_slots)) { + if (!si->si_slots[preferred].sl_valid) { + ret = preferred; + goto out; + } + } + + for(i = 0; i < si->si_num_slots; i++) { + if (!si->si_slots[i].sl_valid) { + ret = i; + break; + } + } +out: + return ret; +} + +int ocfs2_node_num_to_slot(struct ocfs2_super *osb, unsigned int node_num) +{ + int slot; + struct ocfs2_slot_info *si = osb->slot_info; + + spin_lock(&osb->osb_lock); + slot = __ocfs2_node_num_to_slot(si, node_num); + spin_unlock(&osb->osb_lock); + + return slot; +} + +int ocfs2_slot_to_node_num_locked(struct ocfs2_super *osb, int slot_num, + unsigned int *node_num) +{ + struct ocfs2_slot_info *si = osb->slot_info; + + assert_spin_locked(&osb->osb_lock); + + BUG_ON(slot_num < 0); + BUG_ON(slot_num > osb->max_slots); + + if (!si->si_slots[slot_num].sl_valid) + return -ENOENT; + + *node_num = si->si_slots[slot_num].sl_node_num; + return 0; +} + +static void __ocfs2_free_slot_info(struct ocfs2_slot_info *si) +{ + unsigned int i; + + if (si == NULL) + return; + + if (si->si_inode) + iput(si->si_inode); + if (si->si_bh) { + for (i = 0; i < si->si_blocks; i++) { + if (si->si_bh[i]) { + brelse(si->si_bh[i]); + si->si_bh[i] = NULL; + } + } + kfree(si->si_bh); + } + + kfree(si); +} + +int ocfs2_clear_slot(struct ocfs2_super *osb, int slot_num) +{ + struct ocfs2_slot_info *si = osb->slot_info; + + if (si == NULL) + return 0; + + spin_lock(&osb->osb_lock); + ocfs2_invalidate_slot(si, slot_num); + spin_unlock(&osb->osb_lock); + + return ocfs2_update_disk_slot(osb, osb->slot_info, slot_num); +} + +static int ocfs2_map_slot_buffers(struct ocfs2_super *osb, + struct ocfs2_slot_info *si) +{ + int status = 0; + u64 blkno; + unsigned long long blocks, bytes; + unsigned int i; + struct buffer_head *bh; + + status = ocfs2_slot_map_physical_size(osb, si->si_inode, &bytes); + if (status) + goto bail; + + blocks = ocfs2_blocks_for_bytes(si->si_inode->i_sb, bytes); + BUG_ON(blocks > UINT_MAX); + si->si_blocks = blocks; + if (!si->si_blocks) + goto bail; + + if (si->si_extended) + si->si_slots_per_block = + (osb->sb->s_blocksize / + sizeof(struct ocfs2_extended_slot)); + else + si->si_slots_per_block = osb->sb->s_blocksize / sizeof(__le16); + + /* The size checks above should ensure this */ + BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks); + + mlog(0, "Slot map needs %u buffers for %llu bytes\n", + si->si_blocks, bytes); + + si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks, + GFP_KERNEL); + if (!si->si_bh) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + for (i = 0; i < si->si_blocks; i++) { + status = ocfs2_extent_map_get_blocks(si->si_inode, i, + &blkno, NULL, NULL); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + mlog(0, "Reading slot map block %u at %llu\n", i, + (unsigned long long)blkno); + + bh = NULL; /* Acquire a fresh bh */ + status = ocfs2_read_blocks(si->si_inode, blkno, 1, &bh, + OCFS2_BH_IGNORE_CACHE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + si->si_bh[i] = bh; + } + +bail: + return status; +} + +int ocfs2_init_slot_info(struct ocfs2_super *osb) +{ + int status; + struct inode *inode = NULL; + struct ocfs2_slot_info *si; + + si = kzalloc(sizeof(struct ocfs2_slot_info) + + (sizeof(struct ocfs2_slot) * osb->max_slots), + GFP_KERNEL); + if (!si) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + si->si_extended = ocfs2_uses_extended_slot_map(osb); + si->si_num_slots = osb->max_slots; + si->si_slots = (struct ocfs2_slot *)((char *)si + + sizeof(struct ocfs2_slot_info)); + + inode = ocfs2_get_system_file_inode(osb, SLOT_MAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT); + if (!inode) { + status = -EINVAL; + mlog_errno(status); + goto bail; + } + + si->si_inode = inode; + status = ocfs2_map_slot_buffers(osb, si); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + osb->slot_info = (struct ocfs2_slot_info *)si; +bail: + if (status < 0 && si) + __ocfs2_free_slot_info(si); + + return status; +} + +void ocfs2_free_slot_info(struct ocfs2_super *osb) +{ + struct ocfs2_slot_info *si = osb->slot_info; + + osb->slot_info = NULL; + __ocfs2_free_slot_info(si); +} + +int ocfs2_find_slot(struct ocfs2_super *osb) +{ + int status; + int slot; + struct ocfs2_slot_info *si; + + mlog_entry_void(); + + si = osb->slot_info; + + spin_lock(&osb->osb_lock); + ocfs2_update_slot_info(si); + + /* search for ourselves first and take the slot if it already + * exists. Perhaps we need to mark this in a variable for our + * own journal recovery? Possibly not, though we certainly + * need to warn to the user */ + slot = __ocfs2_node_num_to_slot(si, osb->node_num); + if (slot < 0) { + /* if no slot yet, then just take 1st available + * one. */ + slot = __ocfs2_find_empty_slot(si, osb->preferred_slot); + if (slot < 0) { + spin_unlock(&osb->osb_lock); + mlog(ML_ERROR, "no free slots available!\n"); + status = -EINVAL; + goto bail; + } + } else + mlog(ML_NOTICE, "slot %d is already allocated to this node!\n", + slot); + + ocfs2_set_slot(si, slot, osb->node_num); + osb->slot_num = slot; + spin_unlock(&osb->osb_lock); + + mlog(0, "taking node slot %d\n", osb->slot_num); + + status = ocfs2_update_disk_slot(osb, si, osb->slot_num); + if (status < 0) + mlog_errno(status); + +bail: + mlog_exit(status); + return status; +} + +void ocfs2_put_slot(struct ocfs2_super *osb) +{ + int status, slot_num; + struct ocfs2_slot_info *si = osb->slot_info; + + if (!si) + return; + + spin_lock(&osb->osb_lock); + ocfs2_update_slot_info(si); + + slot_num = osb->slot_num; + ocfs2_invalidate_slot(si, osb->slot_num); + osb->slot_num = OCFS2_INVALID_SLOT; + spin_unlock(&osb->osb_lock); + + status = ocfs2_update_disk_slot(osb, si, slot_num); + if (status < 0) { + mlog_errno(status); + goto bail; + } + +bail: + ocfs2_free_slot_info(osb); +} + diff --git a/fs/ocfs2/slot_map.h b/fs/ocfs2/slot_map.h new file mode 100644 index 0000000..601c95f --- /dev/null +++ b/fs/ocfs2/slot_map.h @@ -0,0 +1,44 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * slotmap.h + * + * description here + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + + +#ifndef SLOTMAP_H +#define SLOTMAP_H + +int ocfs2_init_slot_info(struct ocfs2_super *osb); +void ocfs2_free_slot_info(struct ocfs2_super *osb); + +int ocfs2_find_slot(struct ocfs2_super *osb); +void ocfs2_put_slot(struct ocfs2_super *osb); + +int ocfs2_refresh_slot_info(struct ocfs2_super *osb); + +int ocfs2_node_num_to_slot(struct ocfs2_super *osb, unsigned int node_num); +int ocfs2_slot_to_node_num_locked(struct ocfs2_super *osb, int slot_num, + unsigned int *node_num); + +int ocfs2_clear_slot(struct ocfs2_super *osb, int slot_num); + +#endif diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c new file mode 100644 index 0000000..fcd120f --- /dev/null +++ b/fs/ocfs2/stack_o2cb.c @@ -0,0 +1,381 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * stack_o2cb.c + * + * Code which interfaces ocfs2 with the o2cb stack. + * + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/crc32.h> +#include <linux/module.h> + +/* Needed for AOP_TRUNCATED_PAGE in mlog_errno() */ +#include <linux/fs.h> + +#include "cluster/masklog.h" +#include "cluster/nodemanager.h" +#include "cluster/heartbeat.h" + +#include "stackglue.h" + +struct o2dlm_private { + struct dlm_eviction_cb op_eviction_cb; +}; + +static struct ocfs2_stack_plugin o2cb_stack; + +/* These should be identical */ +#if (DLM_LOCK_IV != LKM_IVMODE) +# error Lock modes do not match +#endif +#if (DLM_LOCK_NL != LKM_NLMODE) +# error Lock modes do not match +#endif +#if (DLM_LOCK_CR != LKM_CRMODE) +# error Lock modes do not match +#endif +#if (DLM_LOCK_CW != LKM_CWMODE) +# error Lock modes do not match +#endif +#if (DLM_LOCK_PR != LKM_PRMODE) +# error Lock modes do not match +#endif +#if (DLM_LOCK_PW != LKM_PWMODE) +# error Lock modes do not match +#endif +#if (DLM_LOCK_EX != LKM_EXMODE) +# error Lock modes do not match +#endif +static inline int mode_to_o2dlm(int mode) +{ + BUG_ON(mode > LKM_MAXMODE); + + return mode; +} + +#define map_flag(_generic, _o2dlm) \ + if (flags & (_generic)) { \ + flags &= ~(_generic); \ + o2dlm_flags |= (_o2dlm); \ + } +static int flags_to_o2dlm(u32 flags) +{ + int o2dlm_flags = 0; + + map_flag(DLM_LKF_NOQUEUE, LKM_NOQUEUE); + map_flag(DLM_LKF_CANCEL, LKM_CANCEL); + map_flag(DLM_LKF_CONVERT, LKM_CONVERT); + map_flag(DLM_LKF_VALBLK, LKM_VALBLK); + map_flag(DLM_LKF_IVVALBLK, LKM_INVVALBLK); + map_flag(DLM_LKF_ORPHAN, LKM_ORPHAN); + map_flag(DLM_LKF_FORCEUNLOCK, LKM_FORCE); + map_flag(DLM_LKF_TIMEOUT, LKM_TIMEOUT); + map_flag(DLM_LKF_LOCAL, LKM_LOCAL); + + /* map_flag() should have cleared every flag passed in */ + BUG_ON(flags != 0); + + return o2dlm_flags; +} +#undef map_flag + +/* + * Map an o2dlm status to standard errno values. + * + * o2dlm only uses a handful of these, and returns even fewer to the + * caller. Still, we try to assign sane values to each error. + * + * The following value pairs have special meanings to dlmglue, thus + * the right hand side needs to stay unique - never duplicate the + * mapping elsewhere in the table! + * + * DLM_NORMAL: 0 + * DLM_NOTQUEUED: -EAGAIN + * DLM_CANCELGRANT: -EBUSY + * DLM_CANCEL: -DLM_ECANCEL + */ +/* Keep in sync with dlmapi.h */ +static int status_map[] = { + [DLM_NORMAL] = 0, /* Success */ + [DLM_GRANTED] = -EINVAL, + [DLM_DENIED] = -EACCES, + [DLM_DENIED_NOLOCKS] = -EACCES, + [DLM_WORKING] = -EACCES, + [DLM_BLOCKED] = -EINVAL, + [DLM_BLOCKED_ORPHAN] = -EINVAL, + [DLM_DENIED_GRACE_PERIOD] = -EACCES, + [DLM_SYSERR] = -ENOMEM, /* It is what it is */ + [DLM_NOSUPPORT] = -EPROTO, + [DLM_CANCELGRANT] = -EBUSY, /* Cancel after grant */ + [DLM_IVLOCKID] = -EINVAL, + [DLM_SYNC] = -EINVAL, + [DLM_BADTYPE] = -EINVAL, + [DLM_BADRESOURCE] = -EINVAL, + [DLM_MAXHANDLES] = -ENOMEM, + [DLM_NOCLINFO] = -EINVAL, + [DLM_NOLOCKMGR] = -EINVAL, + [DLM_NOPURGED] = -EINVAL, + [DLM_BADARGS] = -EINVAL, + [DLM_VOID] = -EINVAL, + [DLM_NOTQUEUED] = -EAGAIN, /* Trylock failed */ + [DLM_IVBUFLEN] = -EINVAL, + [DLM_CVTUNGRANT] = -EPERM, + [DLM_BADPARAM] = -EINVAL, + [DLM_VALNOTVALID] = -EINVAL, + [DLM_REJECTED] = -EPERM, + [DLM_ABORT] = -EINVAL, + [DLM_CANCEL] = -DLM_ECANCEL, /* Successful cancel */ + [DLM_IVRESHANDLE] = -EINVAL, + [DLM_DEADLOCK] = -EDEADLK, + [DLM_DENIED_NOASTS] = -EINVAL, + [DLM_FORWARD] = -EINVAL, + [DLM_TIMEOUT] = -ETIMEDOUT, + [DLM_IVGROUPID] = -EINVAL, + [DLM_VERS_CONFLICT] = -EOPNOTSUPP, + [DLM_BAD_DEVICE_PATH] = -ENOENT, + [DLM_NO_DEVICE_PERMISSION] = -EPERM, + [DLM_NO_CONTROL_DEVICE] = -ENOENT, + [DLM_RECOVERING] = -ENOTCONN, + [DLM_MIGRATING] = -ERESTART, + [DLM_MAXSTATS] = -EINVAL, +}; + +static int dlm_status_to_errno(enum dlm_status status) +{ + BUG_ON(status > (sizeof(status_map) / sizeof(status_map[0]))); + + return status_map[status]; +} + +static void o2dlm_lock_ast_wrapper(void *astarg) +{ + BUG_ON(o2cb_stack.sp_proto == NULL); + + o2cb_stack.sp_proto->lp_lock_ast(astarg); +} + +static void o2dlm_blocking_ast_wrapper(void *astarg, int level) +{ + BUG_ON(o2cb_stack.sp_proto == NULL); + + o2cb_stack.sp_proto->lp_blocking_ast(astarg, level); +} + +static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status) +{ + int error = dlm_status_to_errno(status); + + BUG_ON(o2cb_stack.sp_proto == NULL); + + /* + * In o2dlm, you can get both the lock_ast() for the lock being + * granted and the unlock_ast() for the CANCEL failing. A + * successful cancel sends DLM_NORMAL here. If the + * lock grant happened before the cancel arrived, you get + * DLM_CANCELGRANT. + * + * There's no need for the double-ast. If we see DLM_CANCELGRANT, + * we just ignore it. We expect the lock_ast() to handle the + * granted lock. + */ + if (status == DLM_CANCELGRANT) + return; + + o2cb_stack.sp_proto->lp_unlock_ast(astarg, error); +} + +static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn, + int mode, + union ocfs2_dlm_lksb *lksb, + u32 flags, + void *name, + unsigned int namelen, + void *astarg) +{ + enum dlm_status status; + int o2dlm_mode = mode_to_o2dlm(mode); + int o2dlm_flags = flags_to_o2dlm(flags); + int ret; + + status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm, + o2dlm_flags, name, namelen, + o2dlm_lock_ast_wrapper, astarg, + o2dlm_blocking_ast_wrapper); + ret = dlm_status_to_errno(status); + return ret; +} + +static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn, + union ocfs2_dlm_lksb *lksb, + u32 flags, + void *astarg) +{ + enum dlm_status status; + int o2dlm_flags = flags_to_o2dlm(flags); + int ret; + + status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm, + o2dlm_flags, o2dlm_unlock_ast_wrapper, astarg); + ret = dlm_status_to_errno(status); + return ret; +} + +static int o2cb_dlm_lock_status(union ocfs2_dlm_lksb *lksb) +{ + return dlm_status_to_errno(lksb->lksb_o2dlm.status); +} + +static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb) +{ + return (void *)(lksb->lksb_o2dlm.lvb); +} + +static void o2cb_dump_lksb(union ocfs2_dlm_lksb *lksb) +{ + dlm_print_one_lock(lksb->lksb_o2dlm.lockid); +} + +/* + * Called from the dlm when it's about to evict a node. This is how the + * classic stack signals node death. + */ +static void o2dlm_eviction_cb(int node_num, void *data) +{ + struct ocfs2_cluster_connection *conn = data; + + mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n", + node_num, conn->cc_namelen, conn->cc_name); + + conn->cc_recovery_handler(node_num, conn->cc_recovery_data); +} + +static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) +{ + int rc = 0; + u32 dlm_key; + struct dlm_ctxt *dlm; + struct o2dlm_private *priv; + struct dlm_protocol_version dlm_version; + + BUG_ON(conn == NULL); + BUG_ON(o2cb_stack.sp_proto == NULL); + + /* for now we only have one cluster/node, make sure we see it + * in the heartbeat universe */ + if (!o2hb_check_local_node_heartbeating()) { + rc = -EINVAL; + goto out; + } + + priv = kzalloc(sizeof(struct o2dlm_private), GFP_KERNEL); + if (!priv) { + rc = -ENOMEM; + goto out_free; + } + + /* This just fills the structure in. It is safe to pass conn. */ + dlm_setup_eviction_cb(&priv->op_eviction_cb, o2dlm_eviction_cb, + conn); + + conn->cc_private = priv; + + /* used by the dlm code to make message headers unique, each + * node in this domain must agree on this. */ + dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); + dlm_version.pv_major = conn->cc_version.pv_major; + dlm_version.pv_minor = conn->cc_version.pv_minor; + + dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); + if (IS_ERR(dlm)) { + rc = PTR_ERR(dlm); + mlog_errno(rc); + goto out_free; + } + + conn->cc_version.pv_major = dlm_version.pv_major; + conn->cc_version.pv_minor = dlm_version.pv_minor; + conn->cc_lockspace = dlm; + + dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); + +out_free: + if (rc && conn->cc_private) + kfree(conn->cc_private); + +out: + return rc; +} + +static int o2cb_cluster_disconnect(struct ocfs2_cluster_connection *conn) +{ + struct dlm_ctxt *dlm = conn->cc_lockspace; + struct o2dlm_private *priv = conn->cc_private; + + dlm_unregister_eviction_cb(&priv->op_eviction_cb); + conn->cc_private = NULL; + kfree(priv); + + dlm_unregister_domain(dlm); + conn->cc_lockspace = NULL; + + return 0; +} + +static int o2cb_cluster_this_node(unsigned int *node) +{ + int node_num; + + node_num = o2nm_this_node(); + if (node_num == O2NM_INVALID_NODE_NUM) + return -ENOENT; + + if (node_num >= O2NM_MAX_NODES) + return -EOVERFLOW; + + *node = node_num; + return 0; +} + +static struct ocfs2_stack_operations o2cb_stack_ops = { + .connect = o2cb_cluster_connect, + .disconnect = o2cb_cluster_disconnect, + .this_node = o2cb_cluster_this_node, + .dlm_lock = o2cb_dlm_lock, + .dlm_unlock = o2cb_dlm_unlock, + .lock_status = o2cb_dlm_lock_status, + .lock_lvb = o2cb_dlm_lvb, + .dump_lksb = o2cb_dump_lksb, +}; + +static struct ocfs2_stack_plugin o2cb_stack = { + .sp_name = "o2cb", + .sp_ops = &o2cb_stack_ops, + .sp_owner = THIS_MODULE, +}; + +static int __init o2cb_stack_init(void) +{ + return ocfs2_stack_glue_register(&o2cb_stack); +} + +static void __exit o2cb_stack_exit(void) +{ + ocfs2_stack_glue_unregister(&o2cb_stack); +} + +MODULE_AUTHOR("Oracle"); +MODULE_DESCRIPTION("ocfs2 driver for the classic o2cb stack"); +MODULE_LICENSE("GPL"); +module_init(o2cb_stack_init); +module_exit(o2cb_stack_exit); diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c new file mode 100644 index 0000000..9b76d41 --- /dev/null +++ b/fs/ocfs2/stack_user.c @@ -0,0 +1,912 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * stack_user.c + * + * Code which interfaces ocfs2 with fs/dlm and a userspace stack. + * + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/mutex.h> +#include <linux/smp_lock.h> +#include <linux/reboot.h> +#include <asm/uaccess.h> + +#include "ocfs2.h" /* For struct ocfs2_lock_res */ +#include "stackglue.h" + +#include <linux/dlm_plock.h> + +/* + * The control protocol starts with a handshake. Until the handshake + * is complete, the control device will fail all write(2)s. + * + * The handshake is simple. First, the client reads until EOF. Each line + * of output is a supported protocol tag. All protocol tags are a single + * character followed by a two hex digit version number. Currently the + * only things supported is T01, for "Text-base version 0x01". Next, the + * client writes the version they would like to use, including the newline. + * Thus, the protocol tag is 'T01\n'. If the version tag written is + * unknown, -EINVAL is returned. Once the negotiation is complete, the + * client can start sending messages. + * + * The T01 protocol has three messages. First is the "SETN" message. + * It has the following syntax: + * + * SETN<space><8-char-hex-nodenum><newline> + * + * This is 14 characters. + * + * The "SETN" message must be the first message following the protocol. + * It tells ocfs2_control the local node number. + * + * Next comes the "SETV" message. It has the following syntax: + * + * SETV<space><2-char-hex-major><space><2-char-hex-minor><newline> + * + * This is 11 characters. + * + * The "SETV" message sets the filesystem locking protocol version as + * negotiated by the client. The client negotiates based on the maximum + * version advertised in /sys/fs/ocfs2/max_locking_protocol. The major + * number from the "SETV" message must match + * ocfs2_user_plugin.sp_proto->lp_max_version.pv_major, and the minor number + * must be less than or equal to ...->lp_max_version.pv_minor. + * + * Once this information has been set, mounts will be allowed. From this + * point on, the "DOWN" message can be sent for node down notification. + * It has the following syntax: + * + * DOWN<space><32-char-cap-hex-uuid><space><8-char-hex-nodenum><newline> + * + * eg: + * + * DOWN 632A924FDD844190BDA93C0DF6B94899 00000001\n + * + * This is 47 characters. + */ + +/* + * Whether or not the client has done the handshake. + * For now, we have just one protocol version. + */ +#define OCFS2_CONTROL_PROTO "T01\n" +#define OCFS2_CONTROL_PROTO_LEN 4 + +/* Handshake states */ +#define OCFS2_CONTROL_HANDSHAKE_INVALID (0) +#define OCFS2_CONTROL_HANDSHAKE_READ (1) +#define OCFS2_CONTROL_HANDSHAKE_PROTOCOL (2) +#define OCFS2_CONTROL_HANDSHAKE_VALID (3) + +/* Messages */ +#define OCFS2_CONTROL_MESSAGE_OP_LEN 4 +#define OCFS2_CONTROL_MESSAGE_SETNODE_OP "SETN" +#define OCFS2_CONTROL_MESSAGE_SETNODE_TOTAL_LEN 14 +#define OCFS2_CONTROL_MESSAGE_SETVERSION_OP "SETV" +#define OCFS2_CONTROL_MESSAGE_SETVERSION_TOTAL_LEN 11 +#define OCFS2_CONTROL_MESSAGE_DOWN_OP "DOWN" +#define OCFS2_CONTROL_MESSAGE_DOWN_TOTAL_LEN 47 +#define OCFS2_TEXT_UUID_LEN 32 +#define OCFS2_CONTROL_MESSAGE_VERNUM_LEN 2 +#define OCFS2_CONTROL_MESSAGE_NODENUM_LEN 8 + +/* + * ocfs2_live_connection is refcounted because the filesystem and + * miscdevice sides can detach in different order. Let's just be safe. + */ +struct ocfs2_live_connection { + struct list_head oc_list; + struct ocfs2_cluster_connection *oc_conn; +}; + +struct ocfs2_control_private { + struct list_head op_list; + int op_state; + int op_this_node; + struct ocfs2_protocol_version op_proto; +}; + +/* SETN<space><8-char-hex-nodenum><newline> */ +struct ocfs2_control_message_setn { + char tag[OCFS2_CONTROL_MESSAGE_OP_LEN]; + char space; + char nodestr[OCFS2_CONTROL_MESSAGE_NODENUM_LEN]; + char newline; +}; + +/* SETV<space><2-char-hex-major><space><2-char-hex-minor><newline> */ +struct ocfs2_control_message_setv { + char tag[OCFS2_CONTROL_MESSAGE_OP_LEN]; + char space1; + char major[OCFS2_CONTROL_MESSAGE_VERNUM_LEN]; + char space2; + char minor[OCFS2_CONTROL_MESSAGE_VERNUM_LEN]; + char newline; +}; + +/* DOWN<space><32-char-cap-hex-uuid><space><8-char-hex-nodenum><newline> */ +struct ocfs2_control_message_down { + char tag[OCFS2_CONTROL_MESSAGE_OP_LEN]; + char space1; + char uuid[OCFS2_TEXT_UUID_LEN]; + char space2; + char nodestr[OCFS2_CONTROL_MESSAGE_NODENUM_LEN]; + char newline; +}; + +union ocfs2_control_message { + char tag[OCFS2_CONTROL_MESSAGE_OP_LEN]; + struct ocfs2_control_message_setn u_setn; + struct ocfs2_control_message_setv u_setv; + struct ocfs2_control_message_down u_down; +}; + +static struct ocfs2_stack_plugin ocfs2_user_plugin; + +static atomic_t ocfs2_control_opened; +static int ocfs2_control_this_node = -1; +static struct ocfs2_protocol_version running_proto; + +static LIST_HEAD(ocfs2_live_connection_list); +static LIST_HEAD(ocfs2_control_private_list); +static DEFINE_MUTEX(ocfs2_control_lock); + +static inline void ocfs2_control_set_handshake_state(struct file *file, + int state) +{ + struct ocfs2_control_private *p = file->private_data; + p->op_state = state; +} + +static inline int ocfs2_control_get_handshake_state(struct file *file) +{ + struct ocfs2_control_private *p = file->private_data; + return p->op_state; +} + +static struct ocfs2_live_connection *ocfs2_connection_find(const char *name) +{ + size_t len = strlen(name); + struct ocfs2_live_connection *c; + + BUG_ON(!mutex_is_locked(&ocfs2_control_lock)); + + list_for_each_entry(c, &ocfs2_live_connection_list, oc_list) { + if ((c->oc_conn->cc_namelen == len) && + !strncmp(c->oc_conn->cc_name, name, len)) + return c; + } + + return c; +} + +/* + * ocfs2_live_connection structures are created underneath the ocfs2 + * mount path. Since the VFS prevents multiple calls to + * fill_super(), we can't get dupes here. + */ +static int ocfs2_live_connection_new(struct ocfs2_cluster_connection *conn, + struct ocfs2_live_connection **c_ret) +{ + int rc = 0; + struct ocfs2_live_connection *c; + + c = kzalloc(sizeof(struct ocfs2_live_connection), GFP_KERNEL); + if (!c) + return -ENOMEM; + + mutex_lock(&ocfs2_control_lock); + c->oc_conn = conn; + + if (atomic_read(&ocfs2_control_opened)) + list_add(&c->oc_list, &ocfs2_live_connection_list); + else { + printk(KERN_ERR + "ocfs2: Userspace control daemon is not present\n"); + rc = -ESRCH; + } + + mutex_unlock(&ocfs2_control_lock); + + if (!rc) + *c_ret = c; + else + kfree(c); + + return rc; +} + +/* + * This function disconnects the cluster connection from ocfs2_control. + * Afterwards, userspace can't affect the cluster connection. + */ +static void ocfs2_live_connection_drop(struct ocfs2_live_connection *c) +{ + mutex_lock(&ocfs2_control_lock); + list_del_init(&c->oc_list); + c->oc_conn = NULL; + mutex_unlock(&ocfs2_control_lock); + + kfree(c); +} + +static int ocfs2_control_cfu(void *target, size_t target_len, + const char __user *buf, size_t count) +{ + /* The T01 expects write(2) calls to have exactly one command */ + if ((count != target_len) || + (count > sizeof(union ocfs2_control_message))) + return -EINVAL; + + if (copy_from_user(target, buf, target_len)) + return -EFAULT; + + return 0; +} + +static ssize_t ocfs2_control_validate_protocol(struct file *file, + const char __user *buf, + size_t count) +{ + ssize_t ret; + char kbuf[OCFS2_CONTROL_PROTO_LEN]; + + ret = ocfs2_control_cfu(kbuf, OCFS2_CONTROL_PROTO_LEN, + buf, count); + if (ret) + return ret; + + if (strncmp(kbuf, OCFS2_CONTROL_PROTO, OCFS2_CONTROL_PROTO_LEN)) + return -EINVAL; + + ocfs2_control_set_handshake_state(file, + OCFS2_CONTROL_HANDSHAKE_PROTOCOL); + + return count; +} + +static void ocfs2_control_send_down(const char *uuid, + int nodenum) +{ + struct ocfs2_live_connection *c; + + mutex_lock(&ocfs2_control_lock); + + c = ocfs2_connection_find(uuid); + if (c) { + BUG_ON(c->oc_conn == NULL); + c->oc_conn->cc_recovery_handler(nodenum, + c->oc_conn->cc_recovery_data); + } + + mutex_unlock(&ocfs2_control_lock); +} + +/* + * Called whenever configuration elements are sent to /dev/ocfs2_control. + * If all configuration elements are present, try to set the global + * values. If there is a problem, return an error. Skip any missing + * elements, and only bump ocfs2_control_opened when we have all elements + * and are successful. + */ +static int ocfs2_control_install_private(struct file *file) +{ + int rc = 0; + int set_p = 1; + struct ocfs2_control_private *p = file->private_data; + + BUG_ON(p->op_state != OCFS2_CONTROL_HANDSHAKE_PROTOCOL); + + mutex_lock(&ocfs2_control_lock); + + if (p->op_this_node < 0) { + set_p = 0; + } else if ((ocfs2_control_this_node >= 0) && + (ocfs2_control_this_node != p->op_this_node)) { + rc = -EINVAL; + goto out_unlock; + } + + if (!p->op_proto.pv_major) { + set_p = 0; + } else if (!list_empty(&ocfs2_live_connection_list) && + ((running_proto.pv_major != p->op_proto.pv_major) || + (running_proto.pv_minor != p->op_proto.pv_minor))) { + rc = -EINVAL; + goto out_unlock; + } + + if (set_p) { + ocfs2_control_this_node = p->op_this_node; + running_proto.pv_major = p->op_proto.pv_major; + running_proto.pv_minor = p->op_proto.pv_minor; + } + +out_unlock: + mutex_unlock(&ocfs2_control_lock); + + if (!rc && set_p) { + /* We set the global values successfully */ + atomic_inc(&ocfs2_control_opened); + ocfs2_control_set_handshake_state(file, + OCFS2_CONTROL_HANDSHAKE_VALID); + } + + return rc; +} + +static int ocfs2_control_get_this_node(void) +{ + int rc; + + mutex_lock(&ocfs2_control_lock); + if (ocfs2_control_this_node < 0) + rc = -EINVAL; + else + rc = ocfs2_control_this_node; + mutex_unlock(&ocfs2_control_lock); + + return rc; +} + +static int ocfs2_control_do_setnode_msg(struct file *file, + struct ocfs2_control_message_setn *msg) +{ + long nodenum; + char *ptr = NULL; + struct ocfs2_control_private *p = file->private_data; + + if (ocfs2_control_get_handshake_state(file) != + OCFS2_CONTROL_HANDSHAKE_PROTOCOL) + return -EINVAL; + + if (strncmp(msg->tag, OCFS2_CONTROL_MESSAGE_SETNODE_OP, + OCFS2_CONTROL_MESSAGE_OP_LEN)) + return -EINVAL; + + if ((msg->space != ' ') || (msg->newline != '\n')) + return -EINVAL; + msg->space = msg->newline = '\0'; + + nodenum = simple_strtol(msg->nodestr, &ptr, 16); + if (!ptr || *ptr) + return -EINVAL; + + if ((nodenum == LONG_MIN) || (nodenum == LONG_MAX) || + (nodenum > INT_MAX) || (nodenum < 0)) + return -ERANGE; + p->op_this_node = nodenum; + + return ocfs2_control_install_private(file); +} + +static int ocfs2_control_do_setversion_msg(struct file *file, + struct ocfs2_control_message_setv *msg) + { + long major, minor; + char *ptr = NULL; + struct ocfs2_control_private *p = file->private_data; + struct ocfs2_protocol_version *max = + &ocfs2_user_plugin.sp_proto->lp_max_version; + + if (ocfs2_control_get_handshake_state(file) != + OCFS2_CONTROL_HANDSHAKE_PROTOCOL) + return -EINVAL; + + if (strncmp(msg->tag, OCFS2_CONTROL_MESSAGE_SETVERSION_OP, + OCFS2_CONTROL_MESSAGE_OP_LEN)) + return -EINVAL; + + if ((msg->space1 != ' ') || (msg->space2 != ' ') || + (msg->newline != '\n')) + return -EINVAL; + msg->space1 = msg->space2 = msg->newline = '\0'; + + major = simple_strtol(msg->major, &ptr, 16); + if (!ptr || *ptr) + return -EINVAL; + minor = simple_strtol(msg->minor, &ptr, 16); + if (!ptr || *ptr) + return -EINVAL; + + /* + * The major must be between 1 and 255, inclusive. The minor + * must be between 0 and 255, inclusive. The version passed in + * must be within the maximum version supported by the filesystem. + */ + if ((major == LONG_MIN) || (major == LONG_MAX) || + (major > (u8)-1) || (major < 1)) + return -ERANGE; + if ((minor == LONG_MIN) || (minor == LONG_MAX) || + (minor > (u8)-1) || (minor < 0)) + return -ERANGE; + if ((major != max->pv_major) || + (minor > max->pv_minor)) + return -EINVAL; + + p->op_proto.pv_major = major; + p->op_proto.pv_minor = minor; + + return ocfs2_control_install_private(file); +} + +static int ocfs2_control_do_down_msg(struct file *file, + struct ocfs2_control_message_down *msg) +{ + long nodenum; + char *p = NULL; + + if (ocfs2_control_get_handshake_state(file) != + OCFS2_CONTROL_HANDSHAKE_VALID) + return -EINVAL; + + if (strncmp(msg->tag, OCFS2_CONTROL_MESSAGE_DOWN_OP, + OCFS2_CONTROL_MESSAGE_OP_LEN)) + return -EINVAL; + + if ((msg->space1 != ' ') || (msg->space2 != ' ') || + (msg->newline != '\n')) + return -EINVAL; + msg->space1 = msg->space2 = msg->newline = '\0'; + + nodenum = simple_strtol(msg->nodestr, &p, 16); + if (!p || *p) + return -EINVAL; + + if ((nodenum == LONG_MIN) || (nodenum == LONG_MAX) || + (nodenum > INT_MAX) || (nodenum < 0)) + return -ERANGE; + + ocfs2_control_send_down(msg->uuid, nodenum); + + return 0; +} + +static ssize_t ocfs2_control_message(struct file *file, + const char __user *buf, + size_t count) +{ + ssize_t ret; + union ocfs2_control_message msg; + + /* Try to catch padding issues */ + WARN_ON(offsetof(struct ocfs2_control_message_down, uuid) != + (sizeof(msg.u_down.tag) + sizeof(msg.u_down.space1))); + + memset(&msg, 0, sizeof(union ocfs2_control_message)); + ret = ocfs2_control_cfu(&msg, count, buf, count); + if (ret) + goto out; + + if ((count == OCFS2_CONTROL_MESSAGE_SETNODE_TOTAL_LEN) && + !strncmp(msg.tag, OCFS2_CONTROL_MESSAGE_SETNODE_OP, + OCFS2_CONTROL_MESSAGE_OP_LEN)) + ret = ocfs2_control_do_setnode_msg(file, &msg.u_setn); + else if ((count == OCFS2_CONTROL_MESSAGE_SETVERSION_TOTAL_LEN) && + !strncmp(msg.tag, OCFS2_CONTROL_MESSAGE_SETVERSION_OP, + OCFS2_CONTROL_MESSAGE_OP_LEN)) + ret = ocfs2_control_do_setversion_msg(file, &msg.u_setv); + else if ((count == OCFS2_CONTROL_MESSAGE_DOWN_TOTAL_LEN) && + !strncmp(msg.tag, OCFS2_CONTROL_MESSAGE_DOWN_OP, + OCFS2_CONTROL_MESSAGE_OP_LEN)) + ret = ocfs2_control_do_down_msg(file, &msg.u_down); + else + ret = -EINVAL; + +out: + return ret ? ret : count; +} + +static ssize_t ocfs2_control_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + ssize_t ret; + + switch (ocfs2_control_get_handshake_state(file)) { + case OCFS2_CONTROL_HANDSHAKE_INVALID: + ret = -EINVAL; + break; + + case OCFS2_CONTROL_HANDSHAKE_READ: + ret = ocfs2_control_validate_protocol(file, buf, + count); + break; + + case OCFS2_CONTROL_HANDSHAKE_PROTOCOL: + case OCFS2_CONTROL_HANDSHAKE_VALID: + ret = ocfs2_control_message(file, buf, count); + break; + + default: + BUG(); + ret = -EIO; + break; + } + + return ret; +} + +/* + * This is a naive version. If we ever have a new protocol, we'll expand + * it. Probably using seq_file. + */ +static ssize_t ocfs2_control_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + ssize_t ret; + + ret = simple_read_from_buffer(buf, count, ppos, + OCFS2_CONTROL_PROTO, OCFS2_CONTROL_PROTO_LEN); + + /* Have we read the whole protocol list? */ + if (ret > 0 && *ppos >= OCFS2_CONTROL_PROTO_LEN) + ocfs2_control_set_handshake_state(file, + OCFS2_CONTROL_HANDSHAKE_READ); + + return ret; +} + +static int ocfs2_control_release(struct inode *inode, struct file *file) +{ + struct ocfs2_control_private *p = file->private_data; + + mutex_lock(&ocfs2_control_lock); + + if (ocfs2_control_get_handshake_state(file) != + OCFS2_CONTROL_HANDSHAKE_VALID) + goto out; + + if (atomic_dec_and_test(&ocfs2_control_opened)) { + if (!list_empty(&ocfs2_live_connection_list)) { + /* XXX: Do bad things! */ + printk(KERN_ERR + "ocfs2: Unexpected release of ocfs2_control!\n" + " Loss of cluster connection requires " + "an emergency restart!\n"); + emergency_restart(); + } + /* + * Last valid close clears the node number and resets + * the locking protocol version + */ + ocfs2_control_this_node = -1; + running_proto.pv_major = 0; + running_proto.pv_major = 0; + } + +out: + list_del_init(&p->op_list); + file->private_data = NULL; + + mutex_unlock(&ocfs2_control_lock); + + kfree(p); + + return 0; +} + +static int ocfs2_control_open(struct inode *inode, struct file *file) +{ + struct ocfs2_control_private *p; + + p = kzalloc(sizeof(struct ocfs2_control_private), GFP_KERNEL); + if (!p) + return -ENOMEM; + p->op_this_node = -1; + + lock_kernel(); + mutex_lock(&ocfs2_control_lock); + file->private_data = p; + list_add(&p->op_list, &ocfs2_control_private_list); + mutex_unlock(&ocfs2_control_lock); + unlock_kernel(); + + return 0; +} + +static const struct file_operations ocfs2_control_fops = { + .open = ocfs2_control_open, + .release = ocfs2_control_release, + .read = ocfs2_control_read, + .write = ocfs2_control_write, + .owner = THIS_MODULE, +}; + +static struct miscdevice ocfs2_control_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ocfs2_control", + .fops = &ocfs2_control_fops, +}; + +static int ocfs2_control_init(void) +{ + int rc; + + atomic_set(&ocfs2_control_opened, 0); + + rc = misc_register(&ocfs2_control_device); + if (rc) + printk(KERN_ERR + "ocfs2: Unable to register ocfs2_control device " + "(errno %d)\n", + -rc); + + return rc; +} + +static void ocfs2_control_exit(void) +{ + int rc; + + rc = misc_deregister(&ocfs2_control_device); + if (rc) + printk(KERN_ERR + "ocfs2: Unable to deregister ocfs2_control device " + "(errno %d)\n", + -rc); +} + +static struct dlm_lksb *fsdlm_astarg_to_lksb(void *astarg) +{ + struct ocfs2_lock_res *res = astarg; + return &res->l_lksb.lksb_fsdlm; +} + +static void fsdlm_lock_ast_wrapper(void *astarg) +{ + struct dlm_lksb *lksb = fsdlm_astarg_to_lksb(astarg); + int status = lksb->sb_status; + + BUG_ON(ocfs2_user_plugin.sp_proto == NULL); + + /* + * For now we're punting on the issue of other non-standard errors + * where we can't tell if the unlock_ast or lock_ast should be called. + * The main "other error" that's possible is EINVAL which means the + * function was called with invalid args, which shouldn't be possible + * since the caller here is under our control. Other non-standard + * errors probably fall into the same category, or otherwise are fatal + * which means we can't carry on anyway. + */ + + if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL) + ocfs2_user_plugin.sp_proto->lp_unlock_ast(astarg, 0); + else + ocfs2_user_plugin.sp_proto->lp_lock_ast(astarg); +} + +static void fsdlm_blocking_ast_wrapper(void *astarg, int level) +{ + BUG_ON(ocfs2_user_plugin.sp_proto == NULL); + + ocfs2_user_plugin.sp_proto->lp_blocking_ast(astarg, level); +} + +static int user_dlm_lock(struct ocfs2_cluster_connection *conn, + int mode, + union ocfs2_dlm_lksb *lksb, + u32 flags, + void *name, + unsigned int namelen, + void *astarg) +{ + int ret; + + if (!lksb->lksb_fsdlm.sb_lvbptr) + lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb + + sizeof(struct dlm_lksb); + + ret = dlm_lock(conn->cc_lockspace, mode, &lksb->lksb_fsdlm, + flags|DLM_LKF_NODLCKWT, name, namelen, 0, + fsdlm_lock_ast_wrapper, astarg, + fsdlm_blocking_ast_wrapper); + return ret; +} + +static int user_dlm_unlock(struct ocfs2_cluster_connection *conn, + union ocfs2_dlm_lksb *lksb, + u32 flags, + void *astarg) +{ + int ret; + + ret = dlm_unlock(conn->cc_lockspace, lksb->lksb_fsdlm.sb_lkid, + flags, &lksb->lksb_fsdlm, astarg); + return ret; +} + +static int user_dlm_lock_status(union ocfs2_dlm_lksb *lksb) +{ + return lksb->lksb_fsdlm.sb_status; +} + +static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb) +{ + if (!lksb->lksb_fsdlm.sb_lvbptr) + lksb->lksb_fsdlm.sb_lvbptr = (char *)lksb + + sizeof(struct dlm_lksb); + return (void *)(lksb->lksb_fsdlm.sb_lvbptr); +} + +static void user_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb) +{ +} + +static int user_plock(struct ocfs2_cluster_connection *conn, + u64 ino, + struct file *file, + int cmd, + struct file_lock *fl) +{ + /* + * This more or less just demuxes the plock request into any + * one of three dlm calls. + * + * Internally, fs/dlm will pass these to a misc device, which + * a userspace daemon will read and write to. + * + * For now, cancel requests (which happen internally only), + * are turned into unlocks. Most of this function taken from + * gfs2_lock. + */ + + if (cmd == F_CANCELLK) { + cmd = F_SETLK; + fl->fl_type = F_UNLCK; + } + + if (IS_GETLK(cmd)) + return dlm_posix_get(conn->cc_lockspace, ino, file, fl); + else if (fl->fl_type == F_UNLCK) + return dlm_posix_unlock(conn->cc_lockspace, ino, file, fl); + else + return dlm_posix_lock(conn->cc_lockspace, ino, file, cmd, fl); +} + +/* + * Compare a requested locking protocol version against the current one. + * + * If the major numbers are different, they are incompatible. + * If the current minor is greater than the request, they are incompatible. + * If the current minor is less than or equal to the request, they are + * compatible, and the requester should run at the current minor version. + */ +static int fs_protocol_compare(struct ocfs2_protocol_version *existing, + struct ocfs2_protocol_version *request) +{ + if (existing->pv_major != request->pv_major) + return 1; + + if (existing->pv_minor > request->pv_minor) + return 1; + + if (existing->pv_minor < request->pv_minor) + request->pv_minor = existing->pv_minor; + + return 0; +} + +static int user_cluster_connect(struct ocfs2_cluster_connection *conn) +{ + dlm_lockspace_t *fsdlm; + struct ocfs2_live_connection *control; + int rc = 0; + + BUG_ON(conn == NULL); + + rc = ocfs2_live_connection_new(conn, &control); + if (rc) + goto out; + + /* + * running_proto must have been set before we allowed any mounts + * to proceed. + */ + if (fs_protocol_compare(&running_proto, &conn->cc_version)) { + printk(KERN_ERR + "Unable to mount with fs locking protocol version " + "%u.%u because the userspace control daemon has " + "negotiated %u.%u\n", + conn->cc_version.pv_major, conn->cc_version.pv_minor, + running_proto.pv_major, running_proto.pv_minor); + rc = -EPROTO; + ocfs2_live_connection_drop(control); + goto out; + } + + rc = dlm_new_lockspace(conn->cc_name, strlen(conn->cc_name), + &fsdlm, DLM_LSFL_FS, DLM_LVB_LEN); + if (rc) { + ocfs2_live_connection_drop(control); + goto out; + } + + conn->cc_private = control; + conn->cc_lockspace = fsdlm; +out: + return rc; +} + +static int user_cluster_disconnect(struct ocfs2_cluster_connection *conn) +{ + dlm_release_lockspace(conn->cc_lockspace, 2); + conn->cc_lockspace = NULL; + ocfs2_live_connection_drop(conn->cc_private); + conn->cc_private = NULL; + return 0; +} + +static int user_cluster_this_node(unsigned int *this_node) +{ + int rc; + + rc = ocfs2_control_get_this_node(); + if (rc < 0) + return rc; + + *this_node = rc; + return 0; +} + +static struct ocfs2_stack_operations ocfs2_user_plugin_ops = { + .connect = user_cluster_connect, + .disconnect = user_cluster_disconnect, + .this_node = user_cluster_this_node, + .dlm_lock = user_dlm_lock, + .dlm_unlock = user_dlm_unlock, + .lock_status = user_dlm_lock_status, + .lock_lvb = user_dlm_lvb, + .plock = user_plock, + .dump_lksb = user_dlm_dump_lksb, +}; + +static struct ocfs2_stack_plugin ocfs2_user_plugin = { + .sp_name = "user", + .sp_ops = &ocfs2_user_plugin_ops, + .sp_owner = THIS_MODULE, +}; + + +static int __init ocfs2_user_plugin_init(void) +{ + int rc; + + rc = ocfs2_control_init(); + if (!rc) { + rc = ocfs2_stack_glue_register(&ocfs2_user_plugin); + if (rc) + ocfs2_control_exit(); + } + + return rc; +} + +static void __exit ocfs2_user_plugin_exit(void) +{ + ocfs2_stack_glue_unregister(&ocfs2_user_plugin); + ocfs2_control_exit(); +} + +MODULE_AUTHOR("Oracle"); +MODULE_DESCRIPTION("ocfs2 driver for userspace cluster stacks"); +MODULE_LICENSE("GPL"); +module_init(ocfs2_user_plugin_init); +module_exit(ocfs2_user_plugin_exit); diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c new file mode 100644 index 0000000..68b668b --- /dev/null +++ b/fs/ocfs2/stackglue.c @@ -0,0 +1,702 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * stackglue.c + * + * Code which implements an OCFS2 specific interface to underlying + * cluster stacks. + * + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/kmod.h> +#include <linux/fs.h> +#include <linux/kobject.h> +#include <linux/sysfs.h> +#include <linux/sysctl.h> + +#include "ocfs2_fs.h" + +#include "stackglue.h" + +#define OCFS2_STACK_PLUGIN_O2CB "o2cb" +#define OCFS2_STACK_PLUGIN_USER "user" +#define OCFS2_MAX_HB_CTL_PATH 256 + +static struct ocfs2_locking_protocol *lproto; +static DEFINE_SPINLOCK(ocfs2_stack_lock); +static LIST_HEAD(ocfs2_stack_list); +static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1]; +static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; + +/* + * The stack currently in use. If not null, active_stack->sp_count > 0, + * the module is pinned, and the locking protocol cannot be changed. + */ +static struct ocfs2_stack_plugin *active_stack; + +static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) +{ + struct ocfs2_stack_plugin *p; + + assert_spin_locked(&ocfs2_stack_lock); + + list_for_each_entry(p, &ocfs2_stack_list, sp_list) { + if (!strcmp(p->sp_name, name)) + return p; + } + + return NULL; +} + +static int ocfs2_stack_driver_request(const char *stack_name, + const char *plugin_name) +{ + int rc; + struct ocfs2_stack_plugin *p; + + spin_lock(&ocfs2_stack_lock); + + /* + * If the stack passed by the filesystem isn't the selected one, + * we can't continue. + */ + if (strcmp(stack_name, cluster_stack_name)) { + rc = -EBUSY; + goto out; + } + + if (active_stack) { + /* + * If the active stack isn't the one we want, it cannot + * be selected right now. + */ + if (!strcmp(active_stack->sp_name, plugin_name)) + rc = 0; + else + rc = -EBUSY; + goto out; + } + + p = ocfs2_stack_lookup(plugin_name); + if (!p || !try_module_get(p->sp_owner)) { + rc = -ENOENT; + goto out; + } + + active_stack = p; + rc = 0; + +out: + /* If we found it, pin it */ + if (!rc) + active_stack->sp_count++; + + spin_unlock(&ocfs2_stack_lock); + return rc; +} + +/* + * This function looks up the appropriate stack and makes it active. If + * there is no stack, it tries to load it. It will fail if the stack still + * cannot be found. It will also fail if a different stack is in use. + */ +static int ocfs2_stack_driver_get(const char *stack_name) +{ + int rc; + char *plugin_name = OCFS2_STACK_PLUGIN_O2CB; + + /* + * Classic stack does not pass in a stack name. This is + * compatible with older tools as well. + */ + if (!stack_name || !*stack_name) + stack_name = OCFS2_STACK_PLUGIN_O2CB; + + if (strlen(stack_name) != OCFS2_STACK_LABEL_LEN) { + printk(KERN_ERR + "ocfs2 passed an invalid cluster stack label: \"%s\"\n", + stack_name); + return -EINVAL; + } + + /* Anything that isn't the classic stack is a user stack */ + if (strcmp(stack_name, OCFS2_STACK_PLUGIN_O2CB)) + plugin_name = OCFS2_STACK_PLUGIN_USER; + + rc = ocfs2_stack_driver_request(stack_name, plugin_name); + if (rc == -ENOENT) { + request_module("ocfs2_stack_%s", plugin_name); + rc = ocfs2_stack_driver_request(stack_name, plugin_name); + } + + if (rc == -ENOENT) { + printk(KERN_ERR + "ocfs2: Cluster stack driver \"%s\" cannot be found\n", + plugin_name); + } else if (rc == -EBUSY) { + printk(KERN_ERR + "ocfs2: A different cluster stack is in use\n"); + } + + return rc; +} + +static void ocfs2_stack_driver_put(void) +{ + spin_lock(&ocfs2_stack_lock); + BUG_ON(active_stack == NULL); + BUG_ON(active_stack->sp_count == 0); + + active_stack->sp_count--; + if (!active_stack->sp_count) { + module_put(active_stack->sp_owner); + active_stack = NULL; + } + spin_unlock(&ocfs2_stack_lock); +} + +int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin) +{ + int rc; + + spin_lock(&ocfs2_stack_lock); + if (!ocfs2_stack_lookup(plugin->sp_name)) { + plugin->sp_count = 0; + plugin->sp_proto = lproto; + list_add(&plugin->sp_list, &ocfs2_stack_list); + printk(KERN_INFO "ocfs2: Registered cluster interface %s\n", + plugin->sp_name); + rc = 0; + } else { + printk(KERN_ERR "ocfs2: Stack \"%s\" already registered\n", + plugin->sp_name); + rc = -EEXIST; + } + spin_unlock(&ocfs2_stack_lock); + + return rc; +} +EXPORT_SYMBOL_GPL(ocfs2_stack_glue_register); + +void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin) +{ + struct ocfs2_stack_plugin *p; + + spin_lock(&ocfs2_stack_lock); + p = ocfs2_stack_lookup(plugin->sp_name); + if (p) { + BUG_ON(p != plugin); + BUG_ON(plugin == active_stack); + BUG_ON(plugin->sp_count != 0); + list_del_init(&plugin->sp_list); + printk(KERN_INFO "ocfs2: Unregistered cluster interface %s\n", + plugin->sp_name); + } else { + printk(KERN_ERR "Stack \"%s\" is not registered\n", + plugin->sp_name); + } + spin_unlock(&ocfs2_stack_lock); +} +EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister); + +void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto) +{ + struct ocfs2_stack_plugin *p; + + BUG_ON(proto == NULL); + + spin_lock(&ocfs2_stack_lock); + BUG_ON(active_stack != NULL); + + lproto = proto; + list_for_each_entry(p, &ocfs2_stack_list, sp_list) { + p->sp_proto = lproto; + } + + spin_unlock(&ocfs2_stack_lock); +} +EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_locking_protocol); + + +/* + * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take + * "struct ocfs2_lock_res *astarg" instead of "void *astarg" because the + * underlying stack plugins need to pilfer the lksb off of the lock_res. + * If some other structure needs to be passed as an astarg, the plugins + * will need to be given a different avenue to the lksb. + */ +int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn, + int mode, + union ocfs2_dlm_lksb *lksb, + u32 flags, + void *name, + unsigned int namelen, + struct ocfs2_lock_res *astarg) +{ + BUG_ON(lproto == NULL); + + return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags, + name, namelen, astarg); +} +EXPORT_SYMBOL_GPL(ocfs2_dlm_lock); + +int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn, + union ocfs2_dlm_lksb *lksb, + u32 flags, + struct ocfs2_lock_res *astarg) +{ + BUG_ON(lproto == NULL); + + return active_stack->sp_ops->dlm_unlock(conn, lksb, flags, astarg); +} +EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock); + +int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb) +{ + return active_stack->sp_ops->lock_status(lksb); +} +EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status); + +/* + * Why don't we cast to ocfs2_meta_lvb? The "clean" answer is that we + * don't cast at the glue level. The real answer is that the header + * ordering is nigh impossible. + */ +void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb) +{ + return active_stack->sp_ops->lock_lvb(lksb); +} +EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb); + +void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb) +{ + active_stack->sp_ops->dump_lksb(lksb); +} +EXPORT_SYMBOL_GPL(ocfs2_dlm_dump_lksb); + +int ocfs2_stack_supports_plocks(void) +{ + return active_stack && active_stack->sp_ops->plock; +} +EXPORT_SYMBOL_GPL(ocfs2_stack_supports_plocks); + +/* + * ocfs2_plock() can only be safely called if + * ocfs2_stack_supports_plocks() returned true + */ +int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino, + struct file *file, int cmd, struct file_lock *fl) +{ + WARN_ON_ONCE(active_stack->sp_ops->plock == NULL); + if (active_stack->sp_ops->plock) + return active_stack->sp_ops->plock(conn, ino, file, cmd, fl); + return -EOPNOTSUPP; +} +EXPORT_SYMBOL_GPL(ocfs2_plock); + +int ocfs2_cluster_connect(const char *stack_name, + const char *group, + int grouplen, + void (*recovery_handler)(int node_num, + void *recovery_data), + void *recovery_data, + struct ocfs2_cluster_connection **conn) +{ + int rc = 0; + struct ocfs2_cluster_connection *new_conn; + + BUG_ON(group == NULL); + BUG_ON(conn == NULL); + BUG_ON(recovery_handler == NULL); + + if (grouplen > GROUP_NAME_MAX) { + rc = -EINVAL; + goto out; + } + + new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection), + GFP_KERNEL); + if (!new_conn) { + rc = -ENOMEM; + goto out; + } + + memcpy(new_conn->cc_name, group, grouplen); + new_conn->cc_namelen = grouplen; + new_conn->cc_recovery_handler = recovery_handler; + new_conn->cc_recovery_data = recovery_data; + + /* Start the new connection at our maximum compatibility level */ + new_conn->cc_version = lproto->lp_max_version; + + /* This will pin the stack driver if successful */ + rc = ocfs2_stack_driver_get(stack_name); + if (rc) + goto out_free; + + rc = active_stack->sp_ops->connect(new_conn); + if (rc) { + ocfs2_stack_driver_put(); + goto out_free; + } + + *conn = new_conn; + +out_free: + if (rc) + kfree(new_conn); + +out: + return rc; +} +EXPORT_SYMBOL_GPL(ocfs2_cluster_connect); + +/* If hangup_pending is 0, the stack driver will be dropped */ +int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn, + int hangup_pending) +{ + int ret; + + BUG_ON(conn == NULL); + + ret = active_stack->sp_ops->disconnect(conn); + + /* XXX Should we free it anyway? */ + if (!ret) { + kfree(conn); + if (!hangup_pending) + ocfs2_stack_driver_put(); + } + + return ret; +} +EXPORT_SYMBOL_GPL(ocfs2_cluster_disconnect); + +/* + * Leave the group for this filesystem. This is executed by a userspace + * program (stored in ocfs2_hb_ctl_path). + */ +static void ocfs2_leave_group(const char *group) +{ + int ret; + char *argv[5], *envp[3]; + + argv[0] = ocfs2_hb_ctl_path; + argv[1] = "-K"; + argv[2] = "-u"; + argv[3] = (char *)group; + argv[4] = NULL; + + /* minimal command environment taken from cpu_run_sbin_hotplug */ + envp[0] = "HOME=/"; + envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + envp[2] = NULL; + + ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); + if (ret < 0) { + printk(KERN_ERR + "ocfs2: Error %d running user helper " + "\"%s %s %s %s\"\n", + ret, argv[0], argv[1], argv[2], argv[3]); + } +} + +/* + * Hangup is a required post-umount. ocfs2-tools software expects the + * filesystem to call "ocfs2_hb_ctl" during unmount. This happens + * regardless of whether the DLM got started, so we can't do it + * in ocfs2_cluster_disconnect(). The ocfs2_leave_group() function does + * the actual work. + */ +void ocfs2_cluster_hangup(const char *group, int grouplen) +{ + BUG_ON(group == NULL); + BUG_ON(group[grouplen] != '\0'); + + ocfs2_leave_group(group); + + /* cluster_disconnect() was called with hangup_pending==1 */ + ocfs2_stack_driver_put(); +} +EXPORT_SYMBOL_GPL(ocfs2_cluster_hangup); + +int ocfs2_cluster_this_node(unsigned int *node) +{ + return active_stack->sp_ops->this_node(node); +} +EXPORT_SYMBOL_GPL(ocfs2_cluster_this_node); + + +/* + * Sysfs bits + */ + +static ssize_t ocfs2_max_locking_protocol_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + ssize_t ret = 0; + + spin_lock(&ocfs2_stack_lock); + if (lproto) + ret = snprintf(buf, PAGE_SIZE, "%u.%u\n", + lproto->lp_max_version.pv_major, + lproto->lp_max_version.pv_minor); + spin_unlock(&ocfs2_stack_lock); + + return ret; +} + +static struct kobj_attribute ocfs2_attr_max_locking_protocol = + __ATTR(max_locking_protocol, S_IFREG | S_IRUGO, + ocfs2_max_locking_protocol_show, NULL); + +static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + ssize_t ret = 0, total = 0, remain = PAGE_SIZE; + struct ocfs2_stack_plugin *p; + + spin_lock(&ocfs2_stack_lock); + list_for_each_entry(p, &ocfs2_stack_list, sp_list) { + ret = snprintf(buf, remain, "%s\n", + p->sp_name); + if (ret < 0) { + total = ret; + break; + } + if (ret == remain) { + /* snprintf() didn't fit */ + total = -E2BIG; + break; + } + total += ret; + remain -= ret; + } + spin_unlock(&ocfs2_stack_lock); + + return total; +} + +static struct kobj_attribute ocfs2_attr_loaded_cluster_plugins = + __ATTR(loaded_cluster_plugins, S_IFREG | S_IRUGO, + ocfs2_loaded_cluster_plugins_show, NULL); + +static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + ssize_t ret = 0; + + spin_lock(&ocfs2_stack_lock); + if (active_stack) { + ret = snprintf(buf, PAGE_SIZE, "%s\n", + active_stack->sp_name); + if (ret == PAGE_SIZE) + ret = -E2BIG; + } + spin_unlock(&ocfs2_stack_lock); + + return ret; +} + +static struct kobj_attribute ocfs2_attr_active_cluster_plugin = + __ATTR(active_cluster_plugin, S_IFREG | S_IRUGO, + ocfs2_active_cluster_plugin_show, NULL); + +static ssize_t ocfs2_cluster_stack_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + ssize_t ret; + spin_lock(&ocfs2_stack_lock); + ret = snprintf(buf, PAGE_SIZE, "%s\n", cluster_stack_name); + spin_unlock(&ocfs2_stack_lock); + + return ret; +} + +static ssize_t ocfs2_cluster_stack_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + size_t len = count; + ssize_t ret; + + if (len == 0) + return len; + + if (buf[len - 1] == '\n') + len--; + + if ((len != OCFS2_STACK_LABEL_LEN) || + (strnlen(buf, len) != len)) + return -EINVAL; + + spin_lock(&ocfs2_stack_lock); + if (active_stack) { + if (!strncmp(buf, cluster_stack_name, len)) + ret = count; + else + ret = -EBUSY; + } else { + memcpy(cluster_stack_name, buf, len); + ret = count; + } + spin_unlock(&ocfs2_stack_lock); + + return ret; +} + + +static struct kobj_attribute ocfs2_attr_cluster_stack = + __ATTR(cluster_stack, S_IFREG | S_IRUGO | S_IWUSR, + ocfs2_cluster_stack_show, + ocfs2_cluster_stack_store); + +static struct attribute *ocfs2_attrs[] = { + &ocfs2_attr_max_locking_protocol.attr, + &ocfs2_attr_loaded_cluster_plugins.attr, + &ocfs2_attr_active_cluster_plugin.attr, + &ocfs2_attr_cluster_stack.attr, + NULL, +}; + +static struct attribute_group ocfs2_attr_group = { + .attrs = ocfs2_attrs, +}; + +static struct kset *ocfs2_kset; + +static void ocfs2_sysfs_exit(void) +{ + kset_unregister(ocfs2_kset); +} + +static int ocfs2_sysfs_init(void) +{ + int ret; + + ocfs2_kset = kset_create_and_add("ocfs2", NULL, fs_kobj); + if (!ocfs2_kset) + return -ENOMEM; + + ret = sysfs_create_group(&ocfs2_kset->kobj, &ocfs2_attr_group); + if (ret) + goto error; + + return 0; + +error: + kset_unregister(ocfs2_kset); + return ret; +} + +/* + * Sysctl bits + * + * The sysctl lives at /proc/sys/fs/ocfs2/nm/hb_ctl_path. The 'nm' doesn't + * make as much sense in a multiple cluster stack world, but it's safer + * and easier to preserve the name. + */ + +#define FS_OCFS2_NM 1 + +static ctl_table ocfs2_nm_table[] = { + { + .ctl_name = 1, + .procname = "hb_ctl_path", + .data = ocfs2_hb_ctl_path, + .maxlen = OCFS2_MAX_HB_CTL_PATH, + .mode = 0644, + .proc_handler = &proc_dostring, + .strategy = &sysctl_string, + }, + { .ctl_name = 0 } +}; + +static ctl_table ocfs2_mod_table[] = { + { + .ctl_name = FS_OCFS2_NM, + .procname = "nm", + .data = NULL, + .maxlen = 0, + .mode = 0555, + .child = ocfs2_nm_table + }, + { .ctl_name = 0} +}; + +static ctl_table ocfs2_kern_table[] = { + { + .ctl_name = FS_OCFS2, + .procname = "ocfs2", + .data = NULL, + .maxlen = 0, + .mode = 0555, + .child = ocfs2_mod_table + }, + { .ctl_name = 0} +}; + +static ctl_table ocfs2_root_table[] = { + { + .ctl_name = CTL_FS, + .procname = "fs", + .data = NULL, + .maxlen = 0, + .mode = 0555, + .child = ocfs2_kern_table + }, + { .ctl_name = 0 } +}; + +static struct ctl_table_header *ocfs2_table_header = NULL; + + +/* + * Initialization + */ + +static int __init ocfs2_stack_glue_init(void) +{ + strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB); + + ocfs2_table_header = register_sysctl_table(ocfs2_root_table); + if (!ocfs2_table_header) { + printk(KERN_ERR + "ocfs2 stack glue: unable to register sysctl\n"); + return -ENOMEM; /* or something. */ + } + + return ocfs2_sysfs_init(); +} + +static void __exit ocfs2_stack_glue_exit(void) +{ + lproto = NULL; + ocfs2_sysfs_exit(); + if (ocfs2_table_header) + unregister_sysctl_table(ocfs2_table_header); +} + +MODULE_AUTHOR("Oracle"); +MODULE_DESCRIPTION("ocfs2 cluter stack glue layer"); +MODULE_LICENSE("GPL"); +module_init(ocfs2_stack_glue_init); +module_exit(ocfs2_stack_glue_exit); diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h new file mode 100644 index 0000000..c571af3 --- /dev/null +++ b/fs/ocfs2/stackglue.h @@ -0,0 +1,269 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * stackglue.h + * + * Glue to the underlying cluster stack. + * + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + + +#ifndef STACKGLUE_H +#define STACKGLUE_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/dlmconstants.h> + +#include "dlm/dlmapi.h" +#include <linux/dlm.h> + +/* Needed for plock-related prototypes */ +struct file; +struct file_lock; + +/* + * dlmconstants.h does not have a LOCAL flag. We hope to remove it + * some day, but right now we need it. Let's fake it. This value is larger + * than any flag in dlmconstants.h. + */ +#define DLM_LKF_LOCAL 0x00100000 + +/* + * This shadows DLM_LOCKSPACE_LEN in fs/dlm/dlm_internal.h. That probably + * wants to be in a public header. + */ +#define GROUP_NAME_MAX 64 + + +/* + * ocfs2_protocol_version changes when ocfs2 does something different in + * its inter-node behavior. See dlmglue.c for more information. + */ +struct ocfs2_protocol_version { + u8 pv_major; + u8 pv_minor; +}; + +/* + * The ocfs2_locking_protocol defines the handlers called on ocfs2's behalf. + */ +struct ocfs2_locking_protocol { + struct ocfs2_protocol_version lp_max_version; + void (*lp_lock_ast)(void *astarg); + void (*lp_blocking_ast)(void *astarg, int level); + void (*lp_unlock_ast)(void *astarg, int error); +}; + + +/* + * The dlm_lockstatus struct includes lvb space, but the dlm_lksb struct only + * has a pointer to separately allocated lvb space. This struct exists only to + * include in the lksb union to make space for a combined dlm_lksb and lvb. + */ +struct fsdlm_lksb_plus_lvb { + struct dlm_lksb lksb; + char lvb[DLM_LVB_LEN]; +}; + +/* + * A union of all lock status structures. We define it here so that the + * size of the union is known. Lock status structures are embedded in + * ocfs2 inodes. + */ +union ocfs2_dlm_lksb { + struct dlm_lockstatus lksb_o2dlm; + struct dlm_lksb lksb_fsdlm; + struct fsdlm_lksb_plus_lvb padding; +}; + +/* + * A cluster connection. Mostly opaque to ocfs2, the connection holds + * state for the underlying stack. ocfs2 does use cc_version to determine + * locking compatibility. + */ +struct ocfs2_cluster_connection { + char cc_name[GROUP_NAME_MAX]; + int cc_namelen; + struct ocfs2_protocol_version cc_version; + void (*cc_recovery_handler)(int node_num, void *recovery_data); + void *cc_recovery_data; + void *cc_lockspace; + void *cc_private; +}; + +/* + * Each cluster stack implements the stack operations structure. Not used + * in the ocfs2 code, the stackglue code translates generic cluster calls + * into stack operations. + */ +struct ocfs2_stack_operations { + /* + * The fs code calls ocfs2_cluster_connect() to attach a new + * filesystem to the cluster stack. The ->connect() op is passed + * an ocfs2_cluster_connection with the name and recovery field + * filled in. + * + * The stack must set up any notification mechanisms and create + * the filesystem lockspace in the DLM. The lockspace should be + * stored on cc_lockspace. Any other information can be stored on + * cc_private. + * + * ->connect() must not return until it is guaranteed that + * + * - Node down notifications for the filesystem will be recieved + * and passed to conn->cc_recovery_handler(). + * - Locking requests for the filesystem will be processed. + */ + int (*connect)(struct ocfs2_cluster_connection *conn); + + /* + * The fs code calls ocfs2_cluster_disconnect() when a filesystem + * no longer needs cluster services. All DLM locks have been + * dropped, and recovery notification is being ignored by the + * fs code. The stack must disengage from the DLM and discontinue + * recovery notification. + * + * Once ->disconnect() has returned, the connection structure will + * be freed. Thus, a stack must not return from ->disconnect() + * until it will no longer reference the conn pointer. + * + * Once this call returns, the stack glue will be dropping this + * connection's reference on the module. + */ + int (*disconnect)(struct ocfs2_cluster_connection *conn); + + /* + * ->this_node() returns the cluster's unique identifier for the + * local node. + */ + int (*this_node)(unsigned int *node); + + /* + * Call the underlying dlm lock function. The ->dlm_lock() + * callback should convert the flags and mode as appropriate. + * + * ast and bast functions are not part of the call because the + * stack will likely want to wrap ast and bast calls before passing + * them to stack->sp_proto. + */ + int (*dlm_lock)(struct ocfs2_cluster_connection *conn, + int mode, + union ocfs2_dlm_lksb *lksb, + u32 flags, + void *name, + unsigned int namelen, + void *astarg); + + /* + * Call the underlying dlm unlock function. The ->dlm_unlock() + * function should convert the flags as appropriate. + * + * The unlock ast is not passed, as the stack will want to wrap + * it before calling stack->sp_proto->lp_unlock_ast(). + */ + int (*dlm_unlock)(struct ocfs2_cluster_connection *conn, + union ocfs2_dlm_lksb *lksb, + u32 flags, + void *astarg); + + /* + * Return the status of the current lock status block. The fs + * code should never dereference the union. The ->lock_status() + * callback pulls out the stack-specific lksb, converts the status + * to a proper errno, and returns it. + */ + int (*lock_status)(union ocfs2_dlm_lksb *lksb); + + /* + * Pull the lvb pointer off of the stack-specific lksb. + */ + void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb); + + /* + * Cluster-aware posix locks + * + * This is NULL for stacks which do not support posix locks. + */ + int (*plock)(struct ocfs2_cluster_connection *conn, + u64 ino, + struct file *file, + int cmd, + struct file_lock *fl); + + /* + * This is an optoinal debugging hook. If provided, the + * stack can dump debugging information about this lock. + */ + void (*dump_lksb)(union ocfs2_dlm_lksb *lksb); +}; + +/* + * Each stack plugin must describe itself by registering a + * ocfs2_stack_plugin structure. This is only seen by stackglue and the + * stack driver. + */ +struct ocfs2_stack_plugin { + char *sp_name; + struct ocfs2_stack_operations *sp_ops; + struct module *sp_owner; + + /* These are managed by the stackglue code. */ + struct list_head sp_list; + unsigned int sp_count; + struct ocfs2_locking_protocol *sp_proto; +}; + + +/* Used by the filesystem */ +int ocfs2_cluster_connect(const char *stack_name, + const char *group, + int grouplen, + void (*recovery_handler)(int node_num, + void *recovery_data), + void *recovery_data, + struct ocfs2_cluster_connection **conn); +int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn, + int hangup_pending); +void ocfs2_cluster_hangup(const char *group, int grouplen); +int ocfs2_cluster_this_node(unsigned int *node); + +struct ocfs2_lock_res; +int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn, + int mode, + union ocfs2_dlm_lksb *lksb, + u32 flags, + void *name, + unsigned int namelen, + struct ocfs2_lock_res *astarg); +int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn, + union ocfs2_dlm_lksb *lksb, + u32 flags, + struct ocfs2_lock_res *astarg); + +int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb); +void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb); +void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb); + +int ocfs2_stack_supports_plocks(void); +int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino, + struct file *file, int cmd, struct file_lock *fl); + +void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto); + + +/* Used by stack plugins */ +int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); +void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); + +#endif /* STACKGLUE_H */ diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c new file mode 100644 index 0000000..c5ff18b --- /dev/null +++ b/fs/ocfs2/suballoc.c @@ -0,0 +1,2029 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * suballoc.c + * + * metadata alloc and free + * Inspired by ext3 block groups. + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> + +#define MLOG_MASK_PREFIX ML_DISK_ALLOC +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dlmglue.h" +#include "inode.h" +#include "journal.h" +#include "localalloc.h" +#include "suballoc.h" +#include "super.h" +#include "sysfile.h" +#include "uptodate.h" + +#include "buffer_head_io.h" + +#define NOT_ALLOC_NEW_GROUP 0 +#define ALLOC_NEW_GROUP 1 + +#define OCFS2_MAX_INODES_TO_STEAL 1024 + +static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg); +static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe); +static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl); +static int ocfs2_block_group_fill(handle_t *handle, + struct inode *alloc_inode, + struct buffer_head *bg_bh, + u64 group_blkno, + u16 my_chain, + struct ocfs2_chain_list *cl); +static int ocfs2_block_group_alloc(struct ocfs2_super *osb, + struct inode *alloc_inode, + struct buffer_head *bh, + u64 max_block); + +static int ocfs2_cluster_group_search(struct inode *inode, + struct buffer_head *group_bh, + u32 bits_wanted, u32 min_bits, + u64 max_block, + u16 *bit_off, u16 *bits_found); +static int ocfs2_block_group_search(struct inode *inode, + struct buffer_head *group_bh, + u32 bits_wanted, u32 min_bits, + u64 max_block, + u16 *bit_off, u16 *bits_found); +static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, + struct ocfs2_alloc_context *ac, + handle_t *handle, + u32 bits_wanted, + u32 min_bits, + u16 *bit_off, + unsigned int *num_bits, + u64 *bg_blkno); +static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, + int nr); +static inline int ocfs2_block_group_set_bits(handle_t *handle, + struct inode *alloc_inode, + struct ocfs2_group_desc *bg, + struct buffer_head *group_bh, + unsigned int bit_off, + unsigned int num_bits); +static inline int ocfs2_block_group_clear_bits(handle_t *handle, + struct inode *alloc_inode, + struct ocfs2_group_desc *bg, + struct buffer_head *group_bh, + unsigned int bit_off, + unsigned int num_bits); + +static int ocfs2_relink_block_group(handle_t *handle, + struct inode *alloc_inode, + struct buffer_head *fe_bh, + struct buffer_head *bg_bh, + struct buffer_head *prev_bg_bh, + u16 chain); +static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg, + u32 wanted); +static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode, + u64 bg_blkno, + u16 bg_bit_off); +static inline void ocfs2_block_to_cluster_group(struct inode *inode, + u64 data_blkno, + u64 *bg_blkno, + u16 *bg_bit_off); +static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb, + u32 bits_wanted, u64 max_block, + struct ocfs2_alloc_context **ac); + +void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac) +{ + struct inode *inode = ac->ac_inode; + + if (inode) { + if (ac->ac_which != OCFS2_AC_USE_LOCAL) + ocfs2_inode_unlock(inode, 1); + + mutex_unlock(&inode->i_mutex); + + iput(inode); + ac->ac_inode = NULL; + } + brelse(ac->ac_bh); + ac->ac_bh = NULL; +} + +void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac) +{ + ocfs2_free_ac_resource(ac); + kfree(ac); +} + +static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl) +{ + return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc); +} + +/* somewhat more expensive than our other checks, so use sparingly. */ +int ocfs2_check_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct ocfs2_group_desc *gd) +{ + unsigned int max_bits; + + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd); + return -EIO; + } + + if (di->i_blkno != gd->bg_parent_dinode) { + ocfs2_error(sb, "Group descriptor # %llu has bad parent " + "pointer (%llu, expected %llu)", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + (unsigned long long)le64_to_cpu(gd->bg_parent_dinode), + (unsigned long long)le64_to_cpu(di->i_blkno)); + return -EIO; + } + + max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc); + if (le16_to_cpu(gd->bg_bits) > max_bits) { + ocfs2_error(sb, "Group descriptor # %llu has bit count of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_chain) >= + le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) { + ocfs2_error(sb, "Group descriptor # %llu has bad chain %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_chain)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) { + ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " + "claims that %u are free", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + le16_to_cpu(gd->bg_free_bits_count)); + return -EIO; + } + + if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) { + ocfs2_error(sb, "Group descriptor # %llu has bit count %u but " + "max bitmap bits of %u", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + 8 * le16_to_cpu(gd->bg_size)); + return -EIO; + } + + return 0; +} + +static int ocfs2_block_group_fill(handle_t *handle, + struct inode *alloc_inode, + struct buffer_head *bg_bh, + u64 group_blkno, + u16 my_chain, + struct ocfs2_chain_list *cl) +{ + int status = 0; + struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; + struct super_block * sb = alloc_inode->i_sb; + + mlog_entry_void(); + + if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) { + ocfs2_error(alloc_inode->i_sb, "group block (%llu) != " + "b_blocknr (%llu)", + (unsigned long long)group_blkno, + (unsigned long long) bg_bh->b_blocknr); + status = -EIO; + goto bail; + } + + status = ocfs2_journal_access(handle, + alloc_inode, + bg_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + memset(bg, 0, sb->s_blocksize); + strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE); + bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); + bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb)); + bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl)); + bg->bg_chain = cpu_to_le16(my_chain); + bg->bg_next_group = cl->cl_recs[my_chain].c_blkno; + bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno); + bg->bg_blkno = cpu_to_le64(group_blkno); + /* set the 1st bit in the bitmap to account for the descriptor block */ + ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap); + bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1); + + status = ocfs2_journal_dirty(handle, bg_bh); + if (status < 0) + mlog_errno(status); + + /* There is no need to zero out or otherwise initialize the + * other blocks in a group - All valid FS metadata in a block + * group stores the superblock fs_generation value at + * allocation time. */ + +bail: + mlog_exit(status); + return status; +} + +static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl) +{ + u16 curr, best; + + best = curr = 0; + while (curr < le16_to_cpu(cl->cl_count)) { + if (le32_to_cpu(cl->cl_recs[best].c_total) > + le32_to_cpu(cl->cl_recs[curr].c_total)) + best = curr; + curr++; + } + return best; +} + +/* + * We expect the block group allocator to already be locked. + */ +static int ocfs2_block_group_alloc(struct ocfs2_super *osb, + struct inode *alloc_inode, + struct buffer_head *bh, + u64 max_block) +{ + int status, credits; + struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data; + struct ocfs2_chain_list *cl; + struct ocfs2_alloc_context *ac = NULL; + handle_t *handle = NULL; + u32 bit_off, num_bits; + u16 alloc_rec; + u64 bg_blkno; + struct buffer_head *bg_bh = NULL; + struct ocfs2_group_desc *bg; + + BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode)); + + mlog_entry_void(); + + cl = &fe->id2.i_chain; + status = ocfs2_reserve_clusters_with_limit(osb, + le16_to_cpu(cl->cl_cpg), + max_block, &ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + credits = ocfs2_calc_group_alloc_credits(osb->sb, + le16_to_cpu(cl->cl_cpg)); + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto bail; + } + + status = ocfs2_claim_clusters(osb, + handle, + ac, + le16_to_cpu(cl->cl_cpg), + &bit_off, + &num_bits); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + alloc_rec = ocfs2_find_smallest_chain(cl); + + /* setup the group */ + bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off); + mlog(0, "new descriptor, record %u, at block %llu\n", + alloc_rec, (unsigned long long)bg_blkno); + + bg_bh = sb_getblk(osb->sb, bg_blkno); + if (!bg_bh) { + status = -EIO; + mlog_errno(status); + goto bail; + } + ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh); + + status = ocfs2_block_group_fill(handle, + alloc_inode, + bg_bh, + bg_blkno, + alloc_rec, + cl); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + bg = (struct ocfs2_group_desc *) bg_bh->b_data; + + status = ocfs2_journal_access(handle, alloc_inode, + bh, OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + le32_add_cpu(&cl->cl_recs[alloc_rec].c_free, + le16_to_cpu(bg->bg_free_bits_count)); + le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits)); + cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg_blkno); + if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count)) + le16_add_cpu(&cl->cl_next_free_rec, 1); + + le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) - + le16_to_cpu(bg->bg_free_bits_count)); + le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits)); + le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg)); + + status = ocfs2_journal_dirty(handle, bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + spin_lock(&OCFS2_I(alloc_inode)->ip_lock); + OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters); + fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb, + le32_to_cpu(fe->i_clusters))); + spin_unlock(&OCFS2_I(alloc_inode)->ip_lock); + i_size_write(alloc_inode, le64_to_cpu(fe->i_size)); + alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode); + + status = 0; +bail: + if (handle) + ocfs2_commit_trans(osb, handle); + + if (ac) + ocfs2_free_alloc_context(ac); + + brelse(bg_bh); + + mlog_exit(status); + return status; +} + +static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb, + struct ocfs2_alloc_context *ac, + int type, + u32 slot, + int alloc_new_group) +{ + int status; + u32 bits_wanted = ac->ac_bits_wanted; + struct inode *alloc_inode; + struct buffer_head *bh = NULL; + struct ocfs2_dinode *fe; + u32 free_bits; + + mlog_entry_void(); + + alloc_inode = ocfs2_get_system_file_inode(osb, type, slot); + if (!alloc_inode) { + mlog_errno(-EINVAL); + return -EINVAL; + } + + mutex_lock(&alloc_inode->i_mutex); + + status = ocfs2_inode_lock(alloc_inode, &bh, 1); + if (status < 0) { + mutex_unlock(&alloc_inode->i_mutex); + iput(alloc_inode); + + mlog_errno(status); + return status; + } + + ac->ac_inode = alloc_inode; + ac->ac_alloc_slot = slot; + + fe = (struct ocfs2_dinode *) bh->b_data; + if (!OCFS2_IS_VALID_DINODE(fe)) { + OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe); + status = -EIO; + goto bail; + } + if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) { + ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu", + (unsigned long long)le64_to_cpu(fe->i_blkno)); + status = -EIO; + goto bail; + } + + free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) - + le32_to_cpu(fe->id1.bitmap1.i_used); + + if (bits_wanted > free_bits) { + /* cluster bitmap never grows */ + if (ocfs2_is_cluster_bitmap(alloc_inode)) { + mlog(0, "Disk Full: wanted=%u, free_bits=%u\n", + bits_wanted, free_bits); + status = -ENOSPC; + goto bail; + } + + if (alloc_new_group != ALLOC_NEW_GROUP) { + mlog(0, "Alloc File %u Full: wanted=%u, free_bits=%u, " + "and we don't alloc a new group for it.\n", + slot, bits_wanted, free_bits); + status = -ENOSPC; + goto bail; + } + + status = ocfs2_block_group_alloc(osb, alloc_inode, bh, + ac->ac_max_block); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + atomic_inc(&osb->alloc_stats.bg_extends); + + /* You should never ask for this much metadata */ + BUG_ON(bits_wanted > + (le32_to_cpu(fe->id1.bitmap1.i_total) + - le32_to_cpu(fe->id1.bitmap1.i_used))); + } + + get_bh(bh); + ac->ac_bh = bh; +bail: + brelse(bh); + + mlog_exit(status); + return status; +} + +int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb, + int blocks, + struct ocfs2_alloc_context **ac) +{ + int status; + u32 slot; + + *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); + if (!(*ac)) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + (*ac)->ac_bits_wanted = blocks; + (*ac)->ac_which = OCFS2_AC_USE_META; + slot = osb->slot_num; + (*ac)->ac_group_search = ocfs2_block_group_search; + + status = ocfs2_reserve_suballoc_bits(osb, (*ac), + EXTENT_ALLOC_SYSTEM_INODE, + slot, ALLOC_NEW_GROUP); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + status = 0; +bail: + if ((status < 0) && *ac) { + ocfs2_free_alloc_context(*ac); + *ac = NULL; + } + + mlog_exit(status); + return status; +} + +int ocfs2_reserve_new_metadata(struct ocfs2_super *osb, + struct ocfs2_extent_list *root_el, + struct ocfs2_alloc_context **ac) +{ + return ocfs2_reserve_new_metadata_blocks(osb, + ocfs2_extend_meta_needed(root_el), + ac); +} + +static int ocfs2_steal_inode_from_other_nodes(struct ocfs2_super *osb, + struct ocfs2_alloc_context *ac) +{ + int i, status = -ENOSPC; + s16 slot = ocfs2_get_inode_steal_slot(osb); + + /* Start to steal inodes from the first slot after ours. */ + if (slot == OCFS2_INVALID_SLOT) + slot = osb->slot_num + 1; + + for (i = 0; i < osb->max_slots; i++, slot++) { + if (slot == osb->max_slots) + slot = 0; + + if (slot == osb->slot_num) + continue; + + status = ocfs2_reserve_suballoc_bits(osb, ac, + INODE_ALLOC_SYSTEM_INODE, + slot, NOT_ALLOC_NEW_GROUP); + if (status >= 0) { + ocfs2_set_inode_steal_slot(osb, slot); + break; + } + + ocfs2_free_ac_resource(ac); + } + + return status; +} + +int ocfs2_reserve_new_inode(struct ocfs2_super *osb, + struct ocfs2_alloc_context **ac) +{ + int status; + s16 slot = ocfs2_get_inode_steal_slot(osb); + + *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); + if (!(*ac)) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + (*ac)->ac_bits_wanted = 1; + (*ac)->ac_which = OCFS2_AC_USE_INODE; + + (*ac)->ac_group_search = ocfs2_block_group_search; + + /* + * stat(2) can't handle i_ino > 32bits, so we tell the + * lower levels not to allocate us a block group past that + * limit. The 'inode64' mount option avoids this behavior. + */ + if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64)) + (*ac)->ac_max_block = (u32)~0U; + + /* + * slot is set when we successfully steal inode from other nodes. + * It is reset in 3 places: + * 1. when we flush the truncate log + * 2. when we complete local alloc recovery. + * 3. when we successfully allocate from our own slot. + * After it is set, we will go on stealing inodes until we find the + * need to check our slots to see whether there is some space for us. + */ + if (slot != OCFS2_INVALID_SLOT && + atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL) + goto inode_steal; + + atomic_set(&osb->s_num_inodes_stolen, 0); + status = ocfs2_reserve_suballoc_bits(osb, *ac, + INODE_ALLOC_SYSTEM_INODE, + osb->slot_num, ALLOC_NEW_GROUP); + if (status >= 0) { + status = 0; + + /* + * Some inodes must be freed by us, so try to allocate + * from our own next time. + */ + if (slot != OCFS2_INVALID_SLOT) + ocfs2_init_inode_steal_slot(osb); + goto bail; + } else if (status < 0 && status != -ENOSPC) { + mlog_errno(status); + goto bail; + } + + ocfs2_free_ac_resource(*ac); + +inode_steal: + status = ocfs2_steal_inode_from_other_nodes(osb, *ac); + atomic_inc(&osb->s_num_inodes_stolen); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + status = 0; +bail: + if ((status < 0) && *ac) { + ocfs2_free_alloc_context(*ac); + *ac = NULL; + } + + mlog_exit(status); + return status; +} + +/* local alloc code has to do the same thing, so rather than do this + * twice.. */ +int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb, + struct ocfs2_alloc_context *ac) +{ + int status; + + ac->ac_which = OCFS2_AC_USE_MAIN; + ac->ac_group_search = ocfs2_cluster_group_search; + + status = ocfs2_reserve_suballoc_bits(osb, ac, + GLOBAL_BITMAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT, + ALLOC_NEW_GROUP); + if (status < 0 && status != -ENOSPC) { + mlog_errno(status); + goto bail; + } + +bail: + return status; +} + +/* Callers don't need to care which bitmap (local alloc or main) to + * use so we figure it out for them, but unfortunately this clutters + * things a bit. */ +static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb, + u32 bits_wanted, u64 max_block, + struct ocfs2_alloc_context **ac) +{ + int status; + + mlog_entry_void(); + + *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); + if (!(*ac)) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + (*ac)->ac_bits_wanted = bits_wanted; + (*ac)->ac_max_block = max_block; + + status = -ENOSPC; + if (ocfs2_alloc_should_use_local(osb, bits_wanted)) { + status = ocfs2_reserve_local_alloc_bits(osb, + bits_wanted, + *ac); + if (status == -EFBIG) { + /* The local alloc window is outside ac_max_block. + * use the main bitmap. */ + status = -ENOSPC; + } else if ((status < 0) && (status != -ENOSPC)) { + mlog_errno(status); + goto bail; + } + } + + if (status == -ENOSPC) { + status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + } + + status = 0; +bail: + if ((status < 0) && *ac) { + ocfs2_free_alloc_context(*ac); + *ac = NULL; + } + + mlog_exit(status); + return status; +} + +int ocfs2_reserve_clusters(struct ocfs2_super *osb, + u32 bits_wanted, + struct ocfs2_alloc_context **ac) +{ + return ocfs2_reserve_clusters_with_limit(osb, bits_wanted, 0, ac); +} + +/* + * More or less lifted from ext3. I'll leave their description below: + * + * "For ext3 allocations, we must not reuse any blocks which are + * allocated in the bitmap buffer's "last committed data" copy. This + * prevents deletes from freeing up the page for reuse until we have + * committed the delete transaction. + * + * If we didn't do this, then deleting something and reallocating it as + * data would allow the old block to be overwritten before the + * transaction committed (because we force data to disk before commit). + * This would lead to corruption if we crashed between overwriting the + * data and committing the delete. + * + * @@@ We may want to make this allocation behaviour conditional on + * data-writes at some point, and disable it for metadata allocations or + * sync-data inodes." + * + * Note: OCFS2 already does this differently for metadata vs data + * allocations, as those bitmaps are separate and undo access is never + * called on a metadata group descriptor. + */ +static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, + int nr) +{ + struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; + + if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap)) + return 0; + if (!buffer_jbd(bg_bh) || !bh2jh(bg_bh)->b_committed_data) + return 1; + + bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data; + return !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap); +} + +static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb, + struct buffer_head *bg_bh, + unsigned int bits_wanted, + unsigned int total_bits, + u16 *bit_off, + u16 *bits_found) +{ + void *bitmap; + u16 best_offset, best_size; + int offset, start, found, status = 0; + struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; + + if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(osb->sb, bg); + return -EIO; + } + + found = start = best_offset = best_size = 0; + bitmap = bg->bg_bitmap; + + while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) { + if (offset == total_bits) + break; + + if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) { + /* We found a zero, but we can't use it as it + * hasn't been put to disk yet! */ + found = 0; + start = offset + 1; + } else if (offset == start) { + /* we found a zero */ + found++; + /* move start to the next bit to test */ + start++; + } else { + /* got a zero after some ones */ + found = 1; + start = offset + 1; + } + if (found > best_size) { + best_size = found; + best_offset = start - found; + } + /* we got everything we needed */ + if (found == bits_wanted) { + /* mlog(0, "Found it all!\n"); */ + break; + } + } + + /* XXX: I think the first clause is equivalent to the second + * - jlbec */ + if (found == bits_wanted) { + *bit_off = start - found; + *bits_found = found; + } else if (best_size) { + *bit_off = best_offset; + *bits_found = best_size; + } else { + status = -ENOSPC; + /* No error log here -- see the comment above + * ocfs2_test_bg_bit_allocatable */ + } + + return status; +} + +static inline int ocfs2_block_group_set_bits(handle_t *handle, + struct inode *alloc_inode, + struct ocfs2_group_desc *bg, + struct buffer_head *group_bh, + unsigned int bit_off, + unsigned int num_bits) +{ + int status; + void *bitmap = bg->bg_bitmap; + int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; + + mlog_entry_void(); + + if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); + status = -EIO; + goto bail; + } + BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits); + + mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off, + num_bits); + + if (ocfs2_is_cluster_bitmap(alloc_inode)) + journal_type = OCFS2_JOURNAL_ACCESS_UNDO; + + status = ocfs2_journal_access(handle, + alloc_inode, + group_bh, + journal_type); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + le16_add_cpu(&bg->bg_free_bits_count, -num_bits); + + while(num_bits--) + ocfs2_set_bit(bit_off++, bitmap); + + status = ocfs2_journal_dirty(handle, + group_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + +bail: + mlog_exit(status); + return status; +} + +/* find the one with the most empty bits */ +static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl) +{ + u16 curr, best; + + BUG_ON(!cl->cl_next_free_rec); + + best = curr = 0; + while (curr < le16_to_cpu(cl->cl_next_free_rec)) { + if (le32_to_cpu(cl->cl_recs[curr].c_free) > + le32_to_cpu(cl->cl_recs[best].c_free)) + best = curr; + curr++; + } + + BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec)); + return best; +} + +static int ocfs2_relink_block_group(handle_t *handle, + struct inode *alloc_inode, + struct buffer_head *fe_bh, + struct buffer_head *bg_bh, + struct buffer_head *prev_bg_bh, + u16 chain) +{ + int status; + /* there is a really tiny chance the journal calls could fail, + * but we wouldn't want inconsistent blocks in *any* case. */ + u64 fe_ptr, bg_ptr, prev_bg_ptr; + struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; + struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; + struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data; + + if (!OCFS2_IS_VALID_DINODE(fe)) { + OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe); + status = -EIO; + goto out; + } + if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); + status = -EIO; + goto out; + } + if (!OCFS2_IS_VALID_GROUP_DESC(prev_bg)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, prev_bg); + status = -EIO; + goto out; + } + + mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n", + (unsigned long long)le64_to_cpu(fe->i_blkno), chain, + (unsigned long long)le64_to_cpu(bg->bg_blkno), + (unsigned long long)le64_to_cpu(prev_bg->bg_blkno)); + + fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno); + bg_ptr = le64_to_cpu(bg->bg_next_group); + prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group); + + status = ocfs2_journal_access(handle, alloc_inode, prev_bg_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_rollback; + } + + prev_bg->bg_next_group = bg->bg_next_group; + + status = ocfs2_journal_dirty(handle, prev_bg_bh); + if (status < 0) { + mlog_errno(status); + goto out_rollback; + } + + status = ocfs2_journal_access(handle, alloc_inode, bg_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_rollback; + } + + bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno; + + status = ocfs2_journal_dirty(handle, bg_bh); + if (status < 0) { + mlog_errno(status); + goto out_rollback; + } + + status = ocfs2_journal_access(handle, alloc_inode, fe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_rollback; + } + + fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno; + + status = ocfs2_journal_dirty(handle, fe_bh); + if (status < 0) { + mlog_errno(status); + goto out_rollback; + } + + status = 0; +out_rollback: + if (status < 0) { + fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr); + bg->bg_next_group = cpu_to_le64(bg_ptr); + prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr); + } +out: + mlog_exit(status); + return status; +} + +static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg, + u32 wanted) +{ + return le16_to_cpu(bg->bg_free_bits_count) > wanted; +} + +/* return 0 on success, -ENOSPC to keep searching and any other < 0 + * value on error. */ +static int ocfs2_cluster_group_search(struct inode *inode, + struct buffer_head *group_bh, + u32 bits_wanted, u32 min_bits, + u64 max_block, + u16 *bit_off, u16 *bits_found) +{ + int search = -ENOSPC; + int ret; + u64 blkoff; + struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + u16 tmp_off, tmp_found; + unsigned int max_bits, gd_cluster_off; + + BUG_ON(!ocfs2_is_cluster_bitmap(inode)); + + if (gd->bg_free_bits_count) { + max_bits = le16_to_cpu(gd->bg_bits); + + /* Tail groups in cluster bitmaps which aren't cpg + * aligned are prone to partial extention by a failed + * fs resize. If the file system resize never got to + * update the dinode cluster count, then we don't want + * to trust any clusters past it, regardless of what + * the group descriptor says. */ + gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb, + le64_to_cpu(gd->bg_blkno)); + if ((gd_cluster_off + max_bits) > + OCFS2_I(inode)->ip_clusters) { + max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off; + mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n", + (unsigned long long)le64_to_cpu(gd->bg_blkno), + le16_to_cpu(gd->bg_bits), + OCFS2_I(inode)->ip_clusters, max_bits); + } + + ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), + group_bh, bits_wanted, + max_bits, + &tmp_off, &tmp_found); + if (ret) + return ret; + + if (max_block) { + blkoff = ocfs2_clusters_to_blocks(inode->i_sb, + gd_cluster_off + + tmp_off + tmp_found); + mlog(0, "Checking %llu against %llu\n", + (unsigned long long)blkoff, + (unsigned long long)max_block); + if (blkoff > max_block) + return -ENOSPC; + } + + /* ocfs2_block_group_find_clear_bits() might + * return success, but we still want to return + * -ENOSPC unless it found the minimum number + * of bits. */ + if (min_bits <= tmp_found) { + *bit_off = tmp_off; + *bits_found = tmp_found; + search = 0; /* success */ + } else if (tmp_found) { + /* + * Don't show bits which we'll be returning + * for allocation to the local alloc bitmap. + */ + ocfs2_local_alloc_seen_free_bits(osb, tmp_found); + } + } + + return search; +} + +static int ocfs2_block_group_search(struct inode *inode, + struct buffer_head *group_bh, + u32 bits_wanted, u32 min_bits, + u64 max_block, + u16 *bit_off, u16 *bits_found) +{ + int ret = -ENOSPC; + u64 blkoff; + struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data; + + BUG_ON(min_bits != 1); + BUG_ON(ocfs2_is_cluster_bitmap(inode)); + + if (bg->bg_free_bits_count) { + ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb), + group_bh, bits_wanted, + le16_to_cpu(bg->bg_bits), + bit_off, bits_found); + if (!ret && max_block) { + blkoff = le64_to_cpu(bg->bg_blkno) + *bit_off + + *bits_found; + mlog(0, "Checking %llu against %llu\n", + (unsigned long long)blkoff, + (unsigned long long)max_block); + if (blkoff > max_block) + ret = -ENOSPC; + } + } + + return ret; +} + +static int ocfs2_alloc_dinode_update_counts(struct inode *inode, + handle_t *handle, + struct buffer_head *di_bh, + u32 num_bits, + u16 chain) +{ + int ret; + u32 tmp_used; + struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; + struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain; + + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + tmp_used = le32_to_cpu(di->id1.bitmap1.i_used); + di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used); + le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits); + + ret = ocfs2_journal_dirty(handle, di_bh); + if (ret < 0) + mlog_errno(ret); + +out: + return ret; +} + +static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac, + handle_t *handle, + u32 bits_wanted, + u32 min_bits, + u16 *bit_off, + unsigned int *num_bits, + u64 gd_blkno, + u16 *bits_left) +{ + int ret; + u16 found; + struct buffer_head *group_bh = NULL; + struct ocfs2_group_desc *gd; + struct inode *alloc_inode = ac->ac_inode; + + ret = ocfs2_read_block(alloc_inode, gd_blkno, &group_bh); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + gd = (struct ocfs2_group_desc *) group_bh->b_data; + if (!OCFS2_IS_VALID_GROUP_DESC(gd)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd); + ret = -EIO; + goto out; + } + + ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits, + ac->ac_max_block, bit_off, &found); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + + *num_bits = found; + + ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh, + *num_bits, + le16_to_cpu(gd->bg_chain)); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh, + *bit_off, *num_bits); + if (ret < 0) + mlog_errno(ret); + + *bits_left = le16_to_cpu(gd->bg_free_bits_count); + +out: + brelse(group_bh); + + return ret; +} + +static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, + handle_t *handle, + u32 bits_wanted, + u32 min_bits, + u16 *bit_off, + unsigned int *num_bits, + u64 *bg_blkno, + u16 *bits_left) +{ + int status; + u16 chain, tmp_bits; + u32 tmp_used; + u64 next_group; + struct inode *alloc_inode = ac->ac_inode; + struct buffer_head *group_bh = NULL; + struct buffer_head *prev_group_bh = NULL; + struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data; + struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; + struct ocfs2_group_desc *bg; + + chain = ac->ac_chain; + mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n", + bits_wanted, chain, + (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno); + + status = ocfs2_read_block(alloc_inode, + le64_to_cpu(cl->cl_recs[chain].c_blkno), + &group_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + bg = (struct ocfs2_group_desc *) group_bh->b_data; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); + if (status) { + mlog_errno(status); + goto bail; + } + + status = -ENOSPC; + /* for now, the chain search is a bit simplistic. We just use + * the 1st group with any empty bits. */ + while ((status = ac->ac_group_search(alloc_inode, group_bh, + bits_wanted, min_bits, + ac->ac_max_block, bit_off, + &tmp_bits)) == -ENOSPC) { + if (!bg->bg_next_group) + break; + + brelse(prev_group_bh); + prev_group_bh = NULL; + + next_group = le64_to_cpu(bg->bg_next_group); + prev_group_bh = group_bh; + group_bh = NULL; + status = ocfs2_read_block(alloc_inode, + next_group, &group_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + bg = (struct ocfs2_group_desc *) group_bh->b_data; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg); + if (status) { + mlog_errno(status); + goto bail; + } + } + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + mlog(0, "alloc succeeds: we give %u bits from block group %llu\n", + tmp_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno)); + + *num_bits = tmp_bits; + + BUG_ON(*num_bits == 0); + + /* + * Keep track of previous block descriptor read. When + * we find a target, if we have read more than X + * number of descriptors, and the target is reasonably + * empty, relink him to top of his chain. + * + * We've read 0 extra blocks and only send one more to + * the transaction, yet the next guy to search has a + * much easier time. + * + * Do this *after* figuring out how many bits we're taking out + * of our target group. + */ + if (ac->ac_allow_chain_relink && + (prev_group_bh) && + (ocfs2_block_group_reasonably_empty(bg, *num_bits))) { + status = ocfs2_relink_block_group(handle, alloc_inode, + ac->ac_bh, group_bh, + prev_group_bh, chain); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + + /* Ok, claim our bits now: set the info on dinode, chainlist + * and then the group */ + status = ocfs2_journal_access(handle, + alloc_inode, + ac->ac_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used); + fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used); + le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits)); + + status = ocfs2_journal_dirty(handle, + ac->ac_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_block_group_set_bits(handle, + alloc_inode, + bg, + group_bh, + *bit_off, + *num_bits); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + mlog(0, "Allocated %u bits from suballocator %llu\n", *num_bits, + (unsigned long long)le64_to_cpu(fe->i_blkno)); + + *bg_blkno = le64_to_cpu(bg->bg_blkno); + *bits_left = le16_to_cpu(bg->bg_free_bits_count); +bail: + brelse(group_bh); + brelse(prev_group_bh); + + mlog_exit(status); + return status; +} + +/* will give out up to bits_wanted contiguous bits. */ +static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb, + struct ocfs2_alloc_context *ac, + handle_t *handle, + u32 bits_wanted, + u32 min_bits, + u16 *bit_off, + unsigned int *num_bits, + u64 *bg_blkno) +{ + int status; + u16 victim, i; + u16 bits_left = 0; + u64 hint_blkno = ac->ac_last_group; + struct ocfs2_chain_list *cl; + struct ocfs2_dinode *fe; + + mlog_entry_void(); + + BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); + BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given)); + BUG_ON(!ac->ac_bh); + + fe = (struct ocfs2_dinode *) ac->ac_bh->b_data; + if (!OCFS2_IS_VALID_DINODE(fe)) { + OCFS2_RO_ON_INVALID_DINODE(osb->sb, fe); + status = -EIO; + goto bail; + } + if (le32_to_cpu(fe->id1.bitmap1.i_used) >= + le32_to_cpu(fe->id1.bitmap1.i_total)) { + ocfs2_error(osb->sb, "Chain allocator dinode %llu has %u used " + "bits but only %u total.", + (unsigned long long)le64_to_cpu(fe->i_blkno), + le32_to_cpu(fe->id1.bitmap1.i_used), + le32_to_cpu(fe->id1.bitmap1.i_total)); + status = -EIO; + goto bail; + } + + if (hint_blkno) { + /* Attempt to short-circuit the usual search mechanism + * by jumping straight to the most recently used + * allocation group. This helps us mantain some + * contiguousness across allocations. */ + status = ocfs2_search_one_group(ac, handle, bits_wanted, + min_bits, bit_off, num_bits, + hint_blkno, &bits_left); + if (!status) { + /* Be careful to update *bg_blkno here as the + * caller is expecting it to be filled in, and + * ocfs2_search_one_group() won't do that for + * us. */ + *bg_blkno = hint_blkno; + goto set_hint; + } + if (status < 0 && status != -ENOSPC) { + mlog_errno(status); + goto bail; + } + } + + cl = (struct ocfs2_chain_list *) &fe->id2.i_chain; + + victim = ocfs2_find_victim_chain(cl); + ac->ac_chain = victim; + ac->ac_allow_chain_relink = 1; + + status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off, + num_bits, bg_blkno, &bits_left); + if (!status) + goto set_hint; + if (status < 0 && status != -ENOSPC) { + mlog_errno(status); + goto bail; + } + + mlog(0, "Search of victim chain %u came up with nothing, " + "trying all chains now.\n", victim); + + /* If we didn't pick a good victim, then just default to + * searching each chain in order. Don't allow chain relinking + * because we only calculate enough journal credits for one + * relink per alloc. */ + ac->ac_allow_chain_relink = 0; + for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) { + if (i == victim) + continue; + if (!cl->cl_recs[i].c_free) + continue; + + ac->ac_chain = i; + status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, + bit_off, num_bits, bg_blkno, + &bits_left); + if (!status) + break; + if (status < 0 && status != -ENOSPC) { + mlog_errno(status); + goto bail; + } + } + +set_hint: + if (status != -ENOSPC) { + /* If the next search of this group is not likely to + * yield a suitable extent, then we reset the last + * group hint so as to not waste a disk read */ + if (bits_left < min_bits) + ac->ac_last_group = 0; + else + ac->ac_last_group = *bg_blkno; + } + +bail: + mlog_exit(status); + return status; +} + +int ocfs2_claim_metadata(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 bits_wanted, + u16 *suballoc_bit_start, + unsigned int *num_bits, + u64 *blkno_start) +{ + int status; + u64 bg_blkno; + + BUG_ON(!ac); + BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted)); + BUG_ON(ac->ac_which != OCFS2_AC_USE_META); + + status = ocfs2_claim_suballoc_bits(osb, + ac, + handle, + bits_wanted, + 1, + suballoc_bit_start, + num_bits, + &bg_blkno); + if (status < 0) { + mlog_errno(status); + goto bail; + } + atomic_inc(&osb->alloc_stats.bg_allocs); + + *blkno_start = bg_blkno + (u64) *suballoc_bit_start; + ac->ac_bits_given += (*num_bits); + status = 0; +bail: + mlog_exit(status); + return status; +} + +int ocfs2_claim_new_inode(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u16 *suballoc_bit, + u64 *fe_blkno) +{ + int status; + unsigned int num_bits; + u64 bg_blkno; + + mlog_entry_void(); + + BUG_ON(!ac); + BUG_ON(ac->ac_bits_given != 0); + BUG_ON(ac->ac_bits_wanted != 1); + BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE); + + status = ocfs2_claim_suballoc_bits(osb, + ac, + handle, + 1, + 1, + suballoc_bit, + &num_bits, + &bg_blkno); + if (status < 0) { + mlog_errno(status); + goto bail; + } + atomic_inc(&osb->alloc_stats.bg_allocs); + + BUG_ON(num_bits != 1); + + *fe_blkno = bg_blkno + (u64) (*suballoc_bit); + ac->ac_bits_given++; + status = 0; +bail: + mlog_exit(status); + return status; +} + +/* translate a group desc. blkno and it's bitmap offset into + * disk cluster offset. */ +static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode, + u64 bg_blkno, + u16 bg_bit_off) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + u32 cluster = 0; + + BUG_ON(!ocfs2_is_cluster_bitmap(inode)); + + if (bg_blkno != osb->first_cluster_group_blkno) + cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno); + cluster += (u32) bg_bit_off; + return cluster; +} + +/* given a cluster offset, calculate which block group it belongs to + * and return that block offset. */ +u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + u32 group_no; + + BUG_ON(!ocfs2_is_cluster_bitmap(inode)); + + group_no = cluster / osb->bitmap_cpg; + if (!group_no) + return osb->first_cluster_group_blkno; + return ocfs2_clusters_to_blocks(inode->i_sb, + group_no * osb->bitmap_cpg); +} + +/* given the block number of a cluster start, calculate which cluster + * group and descriptor bitmap offset that corresponds to. */ +static inline void ocfs2_block_to_cluster_group(struct inode *inode, + u64 data_blkno, + u64 *bg_blkno, + u16 *bg_bit_off) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno); + + BUG_ON(!ocfs2_is_cluster_bitmap(inode)); + + *bg_blkno = ocfs2_which_cluster_group(inode, + data_cluster); + + if (*bg_blkno == osb->first_cluster_group_blkno) + *bg_bit_off = (u16) data_cluster; + else + *bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb, + data_blkno - *bg_blkno); +} + +/* + * min_bits - minimum contiguous chunk from this total allocation we + * can handle. set to what we asked for originally for a full + * contig. allocation, set to '1' to indicate we can deal with extents + * of any size. + */ +int __ocfs2_claim_clusters(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 min_clusters, + u32 max_clusters, + u32 *cluster_start, + u32 *num_clusters) +{ + int status; + unsigned int bits_wanted = max_clusters; + u64 bg_blkno = 0; + u16 bg_bit_off; + + mlog_entry_void(); + + BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); + + BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL + && ac->ac_which != OCFS2_AC_USE_MAIN); + + if (ac->ac_which == OCFS2_AC_USE_LOCAL) { + status = ocfs2_claim_local_alloc_bits(osb, + handle, + ac, + bits_wanted, + cluster_start, + num_clusters); + if (!status) + atomic_inc(&osb->alloc_stats.local_data); + } else { + if (min_clusters > (osb->bitmap_cpg - 1)) { + /* The only paths asking for contiguousness + * should know about this already. */ + mlog(ML_ERROR, "minimum allocation requested %u exceeds " + "group bitmap size %u!\n", min_clusters, + osb->bitmap_cpg); + status = -ENOSPC; + goto bail; + } + /* clamp the current request down to a realistic size. */ + if (bits_wanted > (osb->bitmap_cpg - 1)) + bits_wanted = osb->bitmap_cpg - 1; + + status = ocfs2_claim_suballoc_bits(osb, + ac, + handle, + bits_wanted, + min_clusters, + &bg_bit_off, + num_clusters, + &bg_blkno); + if (!status) { + *cluster_start = + ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode, + bg_blkno, + bg_bit_off); + atomic_inc(&osb->alloc_stats.bitmap_data); + } + } + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto bail; + } + + ac->ac_bits_given += *num_clusters; + +bail: + mlog_exit(status); + return status; +} + +int ocfs2_claim_clusters(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 min_clusters, + u32 *cluster_start, + u32 *num_clusters) +{ + unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given; + + return __ocfs2_claim_clusters(osb, handle, ac, min_clusters, + bits_wanted, cluster_start, num_clusters); +} + +static inline int ocfs2_block_group_clear_bits(handle_t *handle, + struct inode *alloc_inode, + struct ocfs2_group_desc *bg, + struct buffer_head *group_bh, + unsigned int bit_off, + unsigned int num_bits) +{ + int status; + unsigned int tmp; + int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; + struct ocfs2_group_desc *undo_bg = NULL; + + mlog_entry_void(); + + if (!OCFS2_IS_VALID_GROUP_DESC(bg)) { + OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg); + status = -EIO; + goto bail; + } + + mlog(0, "off = %u, num = %u\n", bit_off, num_bits); + + if (ocfs2_is_cluster_bitmap(alloc_inode)) + journal_type = OCFS2_JOURNAL_ACCESS_UNDO; + + status = ocfs2_journal_access(handle, alloc_inode, group_bh, + journal_type); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + if (ocfs2_is_cluster_bitmap(alloc_inode)) + undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data; + + tmp = num_bits; + while(tmp--) { + ocfs2_clear_bit((bit_off + tmp), + (unsigned long *) bg->bg_bitmap); + if (ocfs2_is_cluster_bitmap(alloc_inode)) + ocfs2_set_bit(bit_off + tmp, + (unsigned long *) undo_bg->bg_bitmap); + } + le16_add_cpu(&bg->bg_free_bits_count, num_bits); + + status = ocfs2_journal_dirty(handle, group_bh); + if (status < 0) + mlog_errno(status); +bail: + return status; +} + +/* + * expects the suballoc inode to already be locked. + */ +int ocfs2_free_suballoc_bits(handle_t *handle, + struct inode *alloc_inode, + struct buffer_head *alloc_bh, + unsigned int start_bit, + u64 bg_blkno, + unsigned int count) +{ + int status = 0; + u32 tmp_used; + struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data; + struct ocfs2_chain_list *cl = &fe->id2.i_chain; + struct buffer_head *group_bh = NULL; + struct ocfs2_group_desc *group; + + mlog_entry_void(); + + if (!OCFS2_IS_VALID_DINODE(fe)) { + OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe); + status = -EIO; + goto bail; + } + BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl)); + + mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n", + (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count, + (unsigned long long)bg_blkno, start_bit); + + status = ocfs2_read_block(alloc_inode, bg_blkno, &group_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + group = (struct ocfs2_group_desc *) group_bh->b_data; + status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group); + if (status) { + mlog_errno(status); + goto bail; + } + BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits)); + + status = ocfs2_block_group_clear_bits(handle, alloc_inode, + group, group_bh, + start_bit, count); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_journal_access(handle, alloc_inode, alloc_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free, + count); + tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used); + fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count); + + status = ocfs2_journal_dirty(handle, alloc_bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } + +bail: + brelse(group_bh); + + mlog_exit(status); + return status; +} + +int ocfs2_free_dinode(handle_t *handle, + struct inode *inode_alloc_inode, + struct buffer_head *inode_alloc_bh, + struct ocfs2_dinode *di) +{ + u64 blk = le64_to_cpu(di->i_blkno); + u16 bit = le16_to_cpu(di->i_suballoc_bit); + u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit); + + return ocfs2_free_suballoc_bits(handle, inode_alloc_inode, + inode_alloc_bh, bit, bg_blkno, 1); +} + +int ocfs2_free_clusters(handle_t *handle, + struct inode *bitmap_inode, + struct buffer_head *bitmap_bh, + u64 start_blk, + unsigned int num_clusters) +{ + int status; + u16 bg_start_bit; + u64 bg_blkno; + struct ocfs2_dinode *fe; + + /* You can't ever have a contiguous set of clusters + * bigger than a block group bitmap so we never have to worry + * about looping on them. */ + + mlog_entry_void(); + + /* This is expensive. We can safely remove once this stuff has + * gotten tested really well. */ + BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk))); + + fe = (struct ocfs2_dinode *) bitmap_bh->b_data; + + ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno, + &bg_start_bit); + + mlog(0, "want to free %u clusters starting at block %llu\n", + num_clusters, (unsigned long long)start_blk); + mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n", + (unsigned long long)bg_blkno, bg_start_bit); + + status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh, + bg_start_bit, bg_blkno, + num_clusters); + if (status < 0) { + mlog_errno(status); + goto out; + } + + ocfs2_local_alloc_seen_free_bits(OCFS2_SB(bitmap_inode->i_sb), + num_clusters); + +out: + mlog_exit(status); + return status; +} + +static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg) +{ + printk("Block Group:\n"); + printk("bg_signature: %s\n", bg->bg_signature); + printk("bg_size: %u\n", bg->bg_size); + printk("bg_bits: %u\n", bg->bg_bits); + printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count); + printk("bg_chain: %u\n", bg->bg_chain); + printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation)); + printk("bg_next_group: %llu\n", + (unsigned long long)bg->bg_next_group); + printk("bg_parent_dinode: %llu\n", + (unsigned long long)bg->bg_parent_dinode); + printk("bg_blkno: %llu\n", + (unsigned long long)bg->bg_blkno); +} + +static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe) +{ + int i; + + printk("Suballoc Inode %llu:\n", (unsigned long long)fe->i_blkno); + printk("i_signature: %s\n", fe->i_signature); + printk("i_size: %llu\n", + (unsigned long long)fe->i_size); + printk("i_clusters: %u\n", fe->i_clusters); + printk("i_generation: %u\n", + le32_to_cpu(fe->i_generation)); + printk("id1.bitmap1.i_used: %u\n", + le32_to_cpu(fe->id1.bitmap1.i_used)); + printk("id1.bitmap1.i_total: %u\n", + le32_to_cpu(fe->id1.bitmap1.i_total)); + printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg); + printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc); + printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count); + printk("id2.i_chain.cl_next_free_rec: %u\n", + fe->id2.i_chain.cl_next_free_rec); + for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) { + printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i, + fe->id2.i_chain.cl_recs[i].c_free); + printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i, + fe->id2.i_chain.cl_recs[i].c_total); + printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %llu\n", i, + (unsigned long long)fe->id2.i_chain.cl_recs[i].c_blkno); + } +} + +/* + * For a given allocation, determine which allocators will need to be + * accessed, and lock them, reserving the appropriate number of bits. + * + * Sparse file systems call this from ocfs2_write_begin_nolock() + * and ocfs2_allocate_unwritten_extents(). + * + * File systems which don't support holes call this from + * ocfs2_extend_allocation(). + */ +int ocfs2_lock_allocators(struct inode *inode, + struct ocfs2_extent_tree *et, + u32 clusters_to_add, u32 extents_to_split, + struct ocfs2_alloc_context **data_ac, + struct ocfs2_alloc_context **meta_ac) +{ + int ret = 0, num_free_extents; + unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + *meta_ac = NULL; + if (data_ac) + *data_ac = NULL; + + BUG_ON(clusters_to_add != 0 && data_ac == NULL); + + num_free_extents = ocfs2_num_free_extents(osb, inode, et); + if (num_free_extents < 0) { + ret = num_free_extents; + mlog_errno(ret); + goto out; + } + + /* + * Sparse allocation file systems need to be more conservative + * with reserving room for expansion - the actual allocation + * happens while we've got a journal handle open so re-taking + * a cluster lock (because we ran out of room for another + * extent) will violate ordering rules. + * + * Most of the time we'll only be seeing this 1 cluster at a time + * anyway. + * + * Always lock for any unwritten extents - we might want to + * add blocks during a split. + */ + if (!num_free_extents || + (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) { + ret = ocfs2_reserve_new_metadata(osb, et->et_root_el, meta_ac); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + } + + if (clusters_to_add == 0) + goto out; + + ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + +out: + if (ret) { + if (*meta_ac) { + ocfs2_free_alloc_context(*meta_ac); + *meta_ac = NULL; + } + + /* + * We cannot have an error and a non null *data_ac. + */ + } + + return ret; +} diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h new file mode 100644 index 0000000..4df159d --- /dev/null +++ b/fs/ocfs2/suballoc.h @@ -0,0 +1,175 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * suballoc.h + * + * Defines sub allocator api + * + * Copyright (C) 2003, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef _CHAINALLOC_H_ +#define _CHAINALLOC_H_ + +typedef int (group_search_t)(struct inode *, + struct buffer_head *, + u32, /* bits_wanted */ + u32, /* min_bits */ + u64, /* max_block */ + u16 *, /* *bit_off */ + u16 *); /* *bits_found */ + +struct ocfs2_alloc_context { + struct inode *ac_inode; /* which bitmap are we allocating from? */ + struct buffer_head *ac_bh; /* file entry bh */ + u32 ac_alloc_slot; /* which slot are we allocating from? */ + u32 ac_bits_wanted; + u32 ac_bits_given; +#define OCFS2_AC_USE_LOCAL 1 +#define OCFS2_AC_USE_MAIN 2 +#define OCFS2_AC_USE_INODE 3 +#define OCFS2_AC_USE_META 4 + u32 ac_which; + + /* these are used by the chain search */ + u16 ac_chain; + int ac_allow_chain_relink; + group_search_t *ac_group_search; + + u64 ac_last_group; + u64 ac_max_block; /* Highest block number to allocate. 0 is + is the same as ~0 - unlimited */ +}; + +void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac); +static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac) +{ + return ac->ac_bits_wanted - ac->ac_bits_given; +} + +/* + * Please note that the caller must make sure that root_el is the root + * of extent tree. So for an inode, it should be &fe->id2.i_list. Otherwise + * the result may be wrong. + */ +int ocfs2_reserve_new_metadata(struct ocfs2_super *osb, + struct ocfs2_extent_list *root_el, + struct ocfs2_alloc_context **ac); +int ocfs2_reserve_new_metadata_blocks(struct ocfs2_super *osb, + int blocks, + struct ocfs2_alloc_context **ac); +int ocfs2_reserve_new_inode(struct ocfs2_super *osb, + struct ocfs2_alloc_context **ac); +int ocfs2_reserve_clusters(struct ocfs2_super *osb, + u32 bits_wanted, + struct ocfs2_alloc_context **ac); + +int ocfs2_claim_metadata(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 bits_wanted, + u16 *suballoc_bit_start, + u32 *num_bits, + u64 *blkno_start); +int ocfs2_claim_new_inode(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u16 *suballoc_bit, + u64 *fe_blkno); +int ocfs2_claim_clusters(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 min_clusters, + u32 *cluster_start, + u32 *num_clusters); +/* + * Use this variant of ocfs2_claim_clusters to specify a maxiumum + * number of clusters smaller than the allocation reserved. + */ +int __ocfs2_claim_clusters(struct ocfs2_super *osb, + handle_t *handle, + struct ocfs2_alloc_context *ac, + u32 min_clusters, + u32 max_clusters, + u32 *cluster_start, + u32 *num_clusters); + +int ocfs2_free_suballoc_bits(handle_t *handle, + struct inode *alloc_inode, + struct buffer_head *alloc_bh, + unsigned int start_bit, + u64 bg_blkno, + unsigned int count); +int ocfs2_free_dinode(handle_t *handle, + struct inode *inode_alloc_inode, + struct buffer_head *inode_alloc_bh, + struct ocfs2_dinode *di); +int ocfs2_free_clusters(handle_t *handle, + struct inode *bitmap_inode, + struct buffer_head *bitmap_bh, + u64 start_blk, + unsigned int num_clusters); + +static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit) +{ + u64 group = block - (u64) bit; + + return group; +} + +static inline u32 ocfs2_cluster_from_desc(struct ocfs2_super *osb, + u64 bg_blkno) +{ + /* This should work for all block group descriptors as only + * the 1st group descriptor of the cluster bitmap is + * different. */ + + if (bg_blkno == osb->first_cluster_group_blkno) + return 0; + + /* the rest of the block groups are located at the beginning + * of their 1st cluster, so a direct translation just + * works. */ + return ocfs2_blocks_to_clusters(osb->sb, bg_blkno); +} + +static inline int ocfs2_is_cluster_bitmap(struct inode *inode) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + return osb->bitmap_blkno == OCFS2_I(inode)->ip_blkno; +} + +/* This is for local alloc ONLY. Others should use the task-specific + * apis above. */ +int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb, + struct ocfs2_alloc_context *ac); +void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac); + +/* given a cluster offset, calculate which block group it belongs to + * and return that block offset. */ +u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster); + +/* somewhat more expensive than our other checks, so use sparingly. */ +int ocfs2_check_group_descriptor(struct super_block *sb, + struct ocfs2_dinode *di, + struct ocfs2_group_desc *gd); +int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et, + u32 clusters_to_add, u32 extents_to_split, + struct ocfs2_alloc_context **data_ac, + struct ocfs2_alloc_context **meta_ac); +#endif /* _CHAINALLOC_H_ */ diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c new file mode 100644 index 0000000..304b63a --- /dev/null +++ b/fs/ocfs2/super.c @@ -0,0 +1,1916 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * super.c + * + * load/unload driver, mount/dismount volumes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/random.h> +#include <linux/statfs.h> +#include <linux/moduleparam.h> +#include <linux/blkdev.h> +#include <linux/socket.h> +#include <linux/inet.h> +#include <linux/parser.h> +#include <linux/crc32.h> +#include <linux/debugfs.h> +#include <linux/mount.h> +#include <linux/seq_file.h> + +#define MLOG_MASK_PREFIX ML_SUPER +#include <cluster/masklog.h> + +#include "ocfs2.h" + +/* this should be the only file to include a version 1 header */ +#include "ocfs1_fs_compat.h" + +#include "alloc.h" +#include "dlmglue.h" +#include "export.h" +#include "extent_map.h" +#include "heartbeat.h" +#include "inode.h" +#include "journal.h" +#include "localalloc.h" +#include "namei.h" +#include "slot_map.h" +#include "super.h" +#include "sysfile.h" +#include "uptodate.h" +#include "ver.h" +#include "xattr.h" + +#include "buffer_head_io.h" + +static struct kmem_cache *ocfs2_inode_cachep = NULL; + +/* OCFS2 needs to schedule several differnt types of work which + * require cluster locking, disk I/O, recovery waits, etc. Since these + * types of work tend to be heavy we avoid using the kernel events + * workqueue and schedule on our own. */ +struct workqueue_struct *ocfs2_wq = NULL; + +static struct dentry *ocfs2_debugfs_root = NULL; + +MODULE_AUTHOR("Oracle"); +MODULE_LICENSE("GPL"); + +struct mount_options +{ + unsigned long commit_interval; + unsigned long mount_opt; + unsigned int atime_quantum; + signed short slot; + unsigned int localalloc_opt; + char cluster_stack[OCFS2_STACK_LABEL_LEN + 1]; +}; + +static int ocfs2_parse_options(struct super_block *sb, char *options, + struct mount_options *mopt, + int is_remount); +static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt); +static void ocfs2_put_super(struct super_block *sb); +static int ocfs2_mount_volume(struct super_block *sb); +static int ocfs2_remount(struct super_block *sb, int *flags, char *data); +static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err); +static int ocfs2_initialize_mem_caches(void); +static void ocfs2_free_mem_caches(void); +static void ocfs2_delete_osb(struct ocfs2_super *osb); + +static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf); + +static int ocfs2_sync_fs(struct super_block *sb, int wait); + +static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb); +static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb); +static void ocfs2_release_system_inodes(struct ocfs2_super *osb); +static int ocfs2_check_volume(struct ocfs2_super *osb); +static int ocfs2_verify_volume(struct ocfs2_dinode *di, + struct buffer_head *bh, + u32 sectsize); +static int ocfs2_initialize_super(struct super_block *sb, + struct buffer_head *bh, + int sector_size); +static int ocfs2_get_sector(struct super_block *sb, + struct buffer_head **bh, + int block, + int sect_size); +static void ocfs2_write_super(struct super_block *sb); +static struct inode *ocfs2_alloc_inode(struct super_block *sb); +static void ocfs2_destroy_inode(struct inode *inode); + +static const struct super_operations ocfs2_sops = { + .statfs = ocfs2_statfs, + .alloc_inode = ocfs2_alloc_inode, + .destroy_inode = ocfs2_destroy_inode, + .drop_inode = ocfs2_drop_inode, + .clear_inode = ocfs2_clear_inode, + .delete_inode = ocfs2_delete_inode, + .sync_fs = ocfs2_sync_fs, + .write_super = ocfs2_write_super, + .put_super = ocfs2_put_super, + .remount_fs = ocfs2_remount, + .show_options = ocfs2_show_options, +}; + +enum { + Opt_barrier, + Opt_err_panic, + Opt_err_ro, + Opt_intr, + Opt_nointr, + Opt_hb_none, + Opt_hb_local, + Opt_data_ordered, + Opt_data_writeback, + Opt_atime_quantum, + Opt_slot, + Opt_commit, + Opt_localalloc, + Opt_localflocks, + Opt_stack, + Opt_user_xattr, + Opt_nouser_xattr, + Opt_inode64, + Opt_err, +}; + +static const match_table_t tokens = { + {Opt_barrier, "barrier=%u"}, + {Opt_err_panic, "errors=panic"}, + {Opt_err_ro, "errors=remount-ro"}, + {Opt_intr, "intr"}, + {Opt_nointr, "nointr"}, + {Opt_hb_none, OCFS2_HB_NONE}, + {Opt_hb_local, OCFS2_HB_LOCAL}, + {Opt_data_ordered, "data=ordered"}, + {Opt_data_writeback, "data=writeback"}, + {Opt_atime_quantum, "atime_quantum=%u"}, + {Opt_slot, "preferred_slot=%u"}, + {Opt_commit, "commit=%u"}, + {Opt_localalloc, "localalloc=%d"}, + {Opt_localflocks, "localflocks"}, + {Opt_stack, "cluster_stack=%s"}, + {Opt_user_xattr, "user_xattr"}, + {Opt_nouser_xattr, "nouser_xattr"}, + {Opt_inode64, "inode64"}, + {Opt_err, NULL} +}; + +/* + * write_super and sync_fs ripped right out of ext3. + */ +static void ocfs2_write_super(struct super_block *sb) +{ + if (mutex_trylock(&sb->s_lock) != 0) + BUG(); + sb->s_dirt = 0; +} + +static int ocfs2_sync_fs(struct super_block *sb, int wait) +{ + int status; + tid_t target; + struct ocfs2_super *osb = OCFS2_SB(sb); + + sb->s_dirt = 0; + + if (ocfs2_is_hard_readonly(osb)) + return -EROFS; + + if (wait) { + status = ocfs2_flush_truncate_log(osb); + if (status < 0) + mlog_errno(status); + } else { + ocfs2_schedule_truncate_log_flush(osb, 0); + } + + if (jbd2_journal_start_commit(OCFS2_SB(sb)->journal->j_journal, + &target)) { + if (wait) + jbd2_log_wait_commit(OCFS2_SB(sb)->journal->j_journal, + target); + } + return 0; +} + +static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb) +{ + struct inode *new = NULL; + int status = 0; + int i; + + mlog_entry_void(); + + new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0); + if (IS_ERR(new)) { + status = PTR_ERR(new); + mlog_errno(status); + goto bail; + } + osb->root_inode = new; + + new = ocfs2_iget(osb, osb->system_dir_blkno, OCFS2_FI_FLAG_SYSFILE, 0); + if (IS_ERR(new)) { + status = PTR_ERR(new); + mlog_errno(status); + goto bail; + } + osb->sys_root_inode = new; + + for (i = OCFS2_FIRST_ONLINE_SYSTEM_INODE; + i <= OCFS2_LAST_GLOBAL_SYSTEM_INODE; i++) { + new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); + if (!new) { + ocfs2_release_system_inodes(osb); + status = -EINVAL; + mlog_errno(status); + /* FIXME: Should ERROR_RO_FS */ + mlog(ML_ERROR, "Unable to load system inode %d, " + "possibly corrupt fs?", i); + goto bail; + } + // the array now has one ref, so drop this one + iput(new); + } + +bail: + mlog_exit(status); + return status; +} + +static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb) +{ + struct inode *new = NULL; + int status = 0; + int i; + + mlog_entry_void(); + + for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1; + i < NUM_SYSTEM_INODES; + i++) { + new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); + if (!new) { + ocfs2_release_system_inodes(osb); + status = -EINVAL; + mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n", + status, i, osb->slot_num); + goto bail; + } + /* the array now has one ref, so drop this one */ + iput(new); + } + +bail: + mlog_exit(status); + return status; +} + +static void ocfs2_release_system_inodes(struct ocfs2_super *osb) +{ + int i; + struct inode *inode; + + mlog_entry_void(); + + for (i = 0; i < NUM_SYSTEM_INODES; i++) { + inode = osb->system_inodes[i]; + if (inode) { + iput(inode); + osb->system_inodes[i] = NULL; + } + } + + inode = osb->sys_root_inode; + if (inode) { + iput(inode); + osb->sys_root_inode = NULL; + } + + inode = osb->root_inode; + if (inode) { + iput(inode); + osb->root_inode = NULL; + } + + mlog_exit(0); +} + +/* We're allocating fs objects, use GFP_NOFS */ +static struct inode *ocfs2_alloc_inode(struct super_block *sb) +{ + struct ocfs2_inode_info *oi; + + oi = kmem_cache_alloc(ocfs2_inode_cachep, GFP_NOFS); + if (!oi) + return NULL; + + jbd2_journal_init_jbd_inode(&oi->ip_jinode, &oi->vfs_inode); + return &oi->vfs_inode; +} + +static void ocfs2_destroy_inode(struct inode *inode) +{ + kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode)); +} + +static unsigned long long ocfs2_max_file_offset(unsigned int bbits, + unsigned int cbits) +{ + unsigned int bytes = 1 << cbits; + unsigned int trim = bytes; + unsigned int bitshift = 32; + + /* + * i_size and all block offsets in ocfs2 are always 64 bits + * wide. i_clusters is 32 bits, in cluster-sized units. So on + * 64 bit platforms, cluster size will be the limiting factor. + */ + +#if BITS_PER_LONG == 32 +# if defined(CONFIG_LBD) + BUILD_BUG_ON(sizeof(sector_t) != 8); + /* + * We might be limited by page cache size. + */ + if (bytes > PAGE_CACHE_SIZE) { + bytes = PAGE_CACHE_SIZE; + trim = 1; + /* + * Shift by 31 here so that we don't get larger than + * MAX_LFS_FILESIZE + */ + bitshift = 31; + } +# else + /* + * We are limited by the size of sector_t. Use block size, as + * that's what we expose to the VFS. + */ + bytes = 1 << bbits; + trim = 1; + bitshift = 31; +# endif +#endif + + /* + * Trim by a whole cluster when we can actually approach the + * on-disk limits. Otherwise we can overflow i_clusters when + * an extent start is at the max offset. + */ + return (((unsigned long long)bytes) << bitshift) - trim; +} + +static int ocfs2_remount(struct super_block *sb, int *flags, char *data) +{ + int incompat_features; + int ret = 0; + struct mount_options parsed_options; + struct ocfs2_super *osb = OCFS2_SB(sb); + + if (!ocfs2_parse_options(sb, data, &parsed_options, 1)) { + ret = -EINVAL; + goto out; + } + + if ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) != + (parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL)) { + ret = -EINVAL; + mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n"); + goto out; + } + + if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) != + (parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) { + ret = -EINVAL; + mlog(ML_ERROR, "Cannot change data mode on remount\n"); + goto out; + } + + /* Probably don't want this on remount; it might + * mess with other nodes */ + if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64) && + (parsed_options.mount_opt & OCFS2_MOUNT_INODE64)) { + ret = -EINVAL; + mlog(ML_ERROR, "Cannot enable inode64 on remount\n"); + goto out; + } + + /* We're going to/from readonly mode. */ + if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) { + /* Lock here so the check of HARD_RO and the potential + * setting of SOFT_RO is atomic. */ + spin_lock(&osb->osb_lock); + if (osb->osb_flags & OCFS2_OSB_HARD_RO) { + mlog(ML_ERROR, "Remount on readonly device is forbidden.\n"); + ret = -EROFS; + goto unlock_osb; + } + + if (*flags & MS_RDONLY) { + mlog(0, "Going to ro mode.\n"); + sb->s_flags |= MS_RDONLY; + osb->osb_flags |= OCFS2_OSB_SOFT_RO; + } else { + mlog(0, "Making ro filesystem writeable.\n"); + + if (osb->osb_flags & OCFS2_OSB_ERROR_FS) { + mlog(ML_ERROR, "Cannot remount RDWR " + "filesystem due to previous errors.\n"); + ret = -EROFS; + goto unlock_osb; + } + incompat_features = OCFS2_HAS_RO_COMPAT_FEATURE(sb, ~OCFS2_FEATURE_RO_COMPAT_SUPP); + if (incompat_features) { + mlog(ML_ERROR, "Cannot remount RDWR because " + "of unsupported optional features " + "(%x).\n", incompat_features); + ret = -EINVAL; + goto unlock_osb; + } + sb->s_flags &= ~MS_RDONLY; + osb->osb_flags &= ~OCFS2_OSB_SOFT_RO; + } +unlock_osb: + spin_unlock(&osb->osb_lock); + } + + if (!ret) { + /* Only save off the new mount options in case of a successful + * remount. */ + osb->s_mount_opt = parsed_options.mount_opt; + osb->s_atime_quantum = parsed_options.atime_quantum; + osb->preferred_slot = parsed_options.slot; + if (parsed_options.commit_interval) + osb->osb_commit_interval = parsed_options.commit_interval; + + if (!ocfs2_is_hard_readonly(osb)) + ocfs2_set_journal_params(osb); + } +out: + return ret; +} + +static int ocfs2_sb_probe(struct super_block *sb, + struct buffer_head **bh, + int *sector_size) +{ + int status, tmpstat; + struct ocfs1_vol_disk_hdr *hdr; + struct ocfs2_dinode *di; + int blksize; + + *bh = NULL; + + /* may be > 512 */ + *sector_size = bdev_hardsect_size(sb->s_bdev); + if (*sector_size > OCFS2_MAX_BLOCKSIZE) { + mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", + *sector_size, OCFS2_MAX_BLOCKSIZE); + status = -EINVAL; + goto bail; + } + + /* Can this really happen? */ + if (*sector_size < OCFS2_MIN_BLOCKSIZE) + *sector_size = OCFS2_MIN_BLOCKSIZE; + + /* check block zero for old format */ + status = ocfs2_get_sector(sb, bh, 0, *sector_size); + if (status < 0) { + mlog_errno(status); + goto bail; + } + hdr = (struct ocfs1_vol_disk_hdr *) (*bh)->b_data; + if (hdr->major_version == OCFS1_MAJOR_VERSION) { + mlog(ML_ERROR, "incompatible version: %u.%u\n", + hdr->major_version, hdr->minor_version); + status = -EINVAL; + } + if (memcmp(hdr->signature, OCFS1_VOLUME_SIGNATURE, + strlen(OCFS1_VOLUME_SIGNATURE)) == 0) { + mlog(ML_ERROR, "incompatible volume signature: %8s\n", + hdr->signature); + status = -EINVAL; + } + brelse(*bh); + *bh = NULL; + if (status < 0) { + mlog(ML_ERROR, "This is an ocfs v1 filesystem which must be " + "upgraded before mounting with ocfs v2\n"); + goto bail; + } + + /* + * Now check at magic offset for 512, 1024, 2048, 4096 + * blocksizes. 4096 is the maximum blocksize because it is + * the minimum clustersize. + */ + status = -EINVAL; + for (blksize = *sector_size; + blksize <= OCFS2_MAX_BLOCKSIZE; + blksize <<= 1) { + tmpstat = ocfs2_get_sector(sb, bh, + OCFS2_SUPER_BLOCK_BLKNO, + blksize); + if (tmpstat < 0) { + status = tmpstat; + mlog_errno(status); + goto bail; + } + di = (struct ocfs2_dinode *) (*bh)->b_data; + status = ocfs2_verify_volume(di, *bh, blksize); + if (status >= 0) + goto bail; + brelse(*bh); + *bh = NULL; + if (status != -EAGAIN) + break; + } + +bail: + return status; +} + +static int ocfs2_verify_heartbeat(struct ocfs2_super *osb) +{ + if (ocfs2_mount_local(osb)) { + if (osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) { + mlog(ML_ERROR, "Cannot heartbeat on a locally " + "mounted device.\n"); + return -EINVAL; + } + } + + if (ocfs2_userspace_stack(osb)) { + if (osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) { + mlog(ML_ERROR, "Userspace stack expected, but " + "o2cb heartbeat arguments passed to mount\n"); + return -EINVAL; + } + } + + if (!(osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL)) { + if (!ocfs2_mount_local(osb) && !ocfs2_is_hard_readonly(osb) && + !ocfs2_userspace_stack(osb)) { + mlog(ML_ERROR, "Heartbeat has to be started to mount " + "a read-write clustered device.\n"); + return -EINVAL; + } + } + + return 0; +} + +/* + * If we're using a userspace stack, mount should have passed + * a name that matches the disk. If not, mount should not + * have passed a stack. + */ +static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb, + struct mount_options *mopt) +{ + if (!ocfs2_userspace_stack(osb) && mopt->cluster_stack[0]) { + mlog(ML_ERROR, + "cluster stack passed to mount, but this filesystem " + "does not support it\n"); + return -EINVAL; + } + + if (ocfs2_userspace_stack(osb) && + strncmp(osb->osb_cluster_stack, mopt->cluster_stack, + OCFS2_STACK_LABEL_LEN)) { + mlog(ML_ERROR, + "cluster stack passed to mount (\"%s\") does not " + "match the filesystem (\"%s\")\n", + mopt->cluster_stack, + osb->osb_cluster_stack); + return -EINVAL; + } + + return 0; +} + +static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) +{ + struct dentry *root; + int status, sector_size; + struct mount_options parsed_options; + struct inode *inode = NULL; + struct ocfs2_super *osb = NULL; + struct buffer_head *bh = NULL; + char nodestr[8]; + + mlog_entry("%p, %p, %i", sb, data, silent); + + if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { + status = -EINVAL; + goto read_super_error; + } + + /* probe for superblock */ + status = ocfs2_sb_probe(sb, &bh, §or_size); + if (status < 0) { + mlog(ML_ERROR, "superblock probe failed!\n"); + goto read_super_error; + } + + status = ocfs2_initialize_super(sb, bh, sector_size); + osb = OCFS2_SB(sb); + if (status < 0) { + mlog_errno(status); + goto read_super_error; + } + brelse(bh); + bh = NULL; + osb->s_mount_opt = parsed_options.mount_opt; + osb->s_atime_quantum = parsed_options.atime_quantum; + osb->preferred_slot = parsed_options.slot; + osb->osb_commit_interval = parsed_options.commit_interval; + osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, parsed_options.localalloc_opt); + osb->local_alloc_bits = osb->local_alloc_default_bits; + + status = ocfs2_verify_userspace_stack(osb, &parsed_options); + if (status) + goto read_super_error; + + sb->s_magic = OCFS2_SUPER_MAGIC; + + /* Hard readonly mode only if: bdev_read_only, MS_RDONLY, + * heartbeat=none */ + if (bdev_read_only(sb->s_bdev)) { + if (!(sb->s_flags & MS_RDONLY)) { + status = -EACCES; + mlog(ML_ERROR, "Readonly device detected but readonly " + "mount was not specified.\n"); + goto read_super_error; + } + + /* You should not be able to start a local heartbeat + * on a readonly device. */ + if (osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) { + status = -EROFS; + mlog(ML_ERROR, "Local heartbeat specified on readonly " + "device.\n"); + goto read_super_error; + } + + status = ocfs2_check_journals_nolocks(osb); + if (status < 0) { + if (status == -EROFS) + mlog(ML_ERROR, "Recovery required on readonly " + "file system, but write access is " + "unavailable.\n"); + else + mlog_errno(status); + goto read_super_error; + } + + ocfs2_set_ro_flag(osb, 1); + + printk(KERN_NOTICE "Readonly device detected. No cluster " + "services will be utilized for this mount. Recovery " + "will be skipped.\n"); + } + + if (!ocfs2_is_hard_readonly(osb)) { + if (sb->s_flags & MS_RDONLY) + ocfs2_set_ro_flag(osb, 0); + } + + status = ocfs2_verify_heartbeat(osb); + if (status < 0) { + mlog_errno(status); + goto read_super_error; + } + + osb->osb_debug_root = debugfs_create_dir(osb->uuid_str, + ocfs2_debugfs_root); + if (!osb->osb_debug_root) { + status = -EINVAL; + mlog(ML_ERROR, "Unable to create per-mount debugfs root.\n"); + goto read_super_error; + } + + status = ocfs2_mount_volume(sb); + if (osb->root_inode) + inode = igrab(osb->root_inode); + + if (status < 0) + goto read_super_error; + + if (!inode) { + status = -EIO; + mlog_errno(status); + goto read_super_error; + } + + root = d_alloc_root(inode); + if (!root) { + status = -ENOMEM; + mlog_errno(status); + goto read_super_error; + } + + sb->s_root = root; + + ocfs2_complete_mount_recovery(osb); + + if (ocfs2_mount_local(osb)) + snprintf(nodestr, sizeof(nodestr), "local"); + else + snprintf(nodestr, sizeof(nodestr), "%u", osb->node_num); + + printk(KERN_INFO "ocfs2: Mounting device (%s) on (node %s, slot %d) " + "with %s data mode.\n", + osb->dev_str, nodestr, osb->slot_num, + osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" : + "ordered"); + + atomic_set(&osb->vol_state, VOLUME_MOUNTED); + wake_up(&osb->osb_mount_event); + + mlog_exit(status); + return status; + +read_super_error: + brelse(bh); + + if (inode) + iput(inode); + + if (osb) { + atomic_set(&osb->vol_state, VOLUME_DISABLED); + wake_up(&osb->osb_mount_event); + ocfs2_dismount_volume(sb, 1); + } + + mlog_exit(status); + return status; +} + +static int ocfs2_get_sb(struct file_system_type *fs_type, + int flags, + const char *dev_name, + void *data, + struct vfsmount *mnt) +{ + return get_sb_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super, + mnt); +} + +static struct file_system_type ocfs2_fs_type = { + .owner = THIS_MODULE, + .name = "ocfs2", + .get_sb = ocfs2_get_sb, /* is this called when we mount + * the fs? */ + .kill_sb = kill_block_super, /* set to the generic one + * right now, but do we + * need to change that? */ + .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, + .next = NULL +}; + +static int ocfs2_parse_options(struct super_block *sb, + char *options, + struct mount_options *mopt, + int is_remount) +{ + int status; + char *p; + + mlog_entry("remount: %d, options: \"%s\"\n", is_remount, + options ? options : "(none)"); + + mopt->commit_interval = 0; + mopt->mount_opt = 0; + mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; + mopt->slot = OCFS2_INVALID_SLOT; + mopt->localalloc_opt = OCFS2_DEFAULT_LOCAL_ALLOC_SIZE; + mopt->cluster_stack[0] = '\0'; + + if (!options) { + status = 1; + goto bail; + } + + while ((p = strsep(&options, ",")) != NULL) { + int token, option; + substring_t args[MAX_OPT_ARGS]; + + if (!*p) + continue; + + token = match_token(p, tokens, args); + switch (token) { + case Opt_hb_local: + mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL; + break; + case Opt_hb_none: + mopt->mount_opt &= ~OCFS2_MOUNT_HB_LOCAL; + break; + case Opt_barrier: + if (match_int(&args[0], &option)) { + status = 0; + goto bail; + } + if (option) + mopt->mount_opt |= OCFS2_MOUNT_BARRIER; + else + mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER; + break; + case Opt_intr: + mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR; + break; + case Opt_nointr: + mopt->mount_opt |= OCFS2_MOUNT_NOINTR; + break; + case Opt_err_panic: + mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; + break; + case Opt_err_ro: + mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC; + break; + case Opt_data_ordered: + mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK; + break; + case Opt_data_writeback: + mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK; + break; + case Opt_user_xattr: + mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR; + break; + case Opt_nouser_xattr: + mopt->mount_opt |= OCFS2_MOUNT_NOUSERXATTR; + break; + case Opt_atime_quantum: + if (match_int(&args[0], &option)) { + status = 0; + goto bail; + } + if (option >= 0) + mopt->atime_quantum = option; + break; + case Opt_slot: + option = 0; + if (match_int(&args[0], &option)) { + status = 0; + goto bail; + } + if (option) + mopt->slot = (s16)option; + break; + case Opt_commit: + option = 0; + if (match_int(&args[0], &option)) { + status = 0; + goto bail; + } + if (option < 0) + return 0; + if (option == 0) + option = JBD2_DEFAULT_MAX_COMMIT_AGE; + mopt->commit_interval = HZ * option; + break; + case Opt_localalloc: + option = 0; + if (match_int(&args[0], &option)) { + status = 0; + goto bail; + } + if (option >= 0 && (option <= ocfs2_local_alloc_size(sb) * 8)) + mopt->localalloc_opt = option; + break; + case Opt_localflocks: + /* + * Changing this during remount could race + * flock() requests, or "unbalance" existing + * ones (e.g., a lock is taken in one mode but + * dropped in the other). If users care enough + * to flip locking modes during remount, we + * could add a "local" flag to individual + * flock structures for proper tracking of + * state. + */ + if (!is_remount) + mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS; + break; + case Opt_stack: + /* Check both that the option we were passed + * is of the right length and that it is a proper + * string of the right length. + */ + if (((args[0].to - args[0].from) != + OCFS2_STACK_LABEL_LEN) || + (strnlen(args[0].from, + OCFS2_STACK_LABEL_LEN) != + OCFS2_STACK_LABEL_LEN)) { + mlog(ML_ERROR, + "Invalid cluster_stack option\n"); + status = 0; + goto bail; + } + memcpy(mopt->cluster_stack, args[0].from, + OCFS2_STACK_LABEL_LEN); + mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; + break; + case Opt_inode64: + mopt->mount_opt |= OCFS2_MOUNT_INODE64; + break; + default: + mlog(ML_ERROR, + "Unrecognized mount option \"%s\" " + "or missing value\n", p); + status = 0; + goto bail; + } + } + + status = 1; + +bail: + mlog_exit(status); + return status; +} + +static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt) +{ + struct ocfs2_super *osb = OCFS2_SB(mnt->mnt_sb); + unsigned long opts = osb->s_mount_opt; + unsigned int local_alloc_megs; + + if (opts & OCFS2_MOUNT_HB_LOCAL) + seq_printf(s, ",_netdev,heartbeat=local"); + else + seq_printf(s, ",heartbeat=none"); + + if (opts & OCFS2_MOUNT_NOINTR) + seq_printf(s, ",nointr"); + + if (opts & OCFS2_MOUNT_DATA_WRITEBACK) + seq_printf(s, ",data=writeback"); + else + seq_printf(s, ",data=ordered"); + + if (opts & OCFS2_MOUNT_BARRIER) + seq_printf(s, ",barrier=1"); + + if (opts & OCFS2_MOUNT_ERRORS_PANIC) + seq_printf(s, ",errors=panic"); + else + seq_printf(s, ",errors=remount-ro"); + + if (osb->preferred_slot != OCFS2_INVALID_SLOT) + seq_printf(s, ",preferred_slot=%d", osb->preferred_slot); + + if (osb->s_atime_quantum != OCFS2_DEFAULT_ATIME_QUANTUM) + seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum); + + if (osb->osb_commit_interval) + seq_printf(s, ",commit=%u", + (unsigned) (osb->osb_commit_interval / HZ)); + + local_alloc_megs = osb->local_alloc_bits >> (20 - osb->s_clustersize_bits); + if (local_alloc_megs != OCFS2_DEFAULT_LOCAL_ALLOC_SIZE) + seq_printf(s, ",localalloc=%d", local_alloc_megs); + + if (opts & OCFS2_MOUNT_LOCALFLOCKS) + seq_printf(s, ",localflocks,"); + + if (osb->osb_cluster_stack[0]) + seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN, + osb->osb_cluster_stack); + + if (opts & OCFS2_MOUNT_NOUSERXATTR) + seq_printf(s, ",nouser_xattr"); + else + seq_printf(s, ",user_xattr"); + + if (opts & OCFS2_MOUNT_INODE64) + seq_printf(s, ",inode64"); + + return 0; +} + +static int __init ocfs2_init(void) +{ + int status; + + mlog_entry_void(); + + ocfs2_print_version(); + + status = init_ocfs2_uptodate_cache(); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_initialize_mem_caches(); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + ocfs2_wq = create_singlethread_workqueue("ocfs2_wq"); + if (!ocfs2_wq) { + status = -ENOMEM; + goto leave; + } + + ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL); + if (!ocfs2_debugfs_root) { + status = -EFAULT; + mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n"); + } + + ocfs2_set_locking_protocol(); + +leave: + if (status < 0) { + ocfs2_free_mem_caches(); + exit_ocfs2_uptodate_cache(); + } + + mlog_exit(status); + + if (status >= 0) { + return register_filesystem(&ocfs2_fs_type); + } else + return -1; +} + +static void __exit ocfs2_exit(void) +{ + mlog_entry_void(); + + if (ocfs2_wq) { + flush_workqueue(ocfs2_wq); + destroy_workqueue(ocfs2_wq); + } + + debugfs_remove(ocfs2_debugfs_root); + + ocfs2_free_mem_caches(); + + unregister_filesystem(&ocfs2_fs_type); + + exit_ocfs2_uptodate_cache(); + + mlog_exit_void(); +} + +static void ocfs2_put_super(struct super_block *sb) +{ + mlog_entry("(0x%p)\n", sb); + + ocfs2_sync_blockdev(sb); + ocfs2_dismount_volume(sb, 0); + + mlog_exit_void(); +} + +static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct ocfs2_super *osb; + u32 numbits, freebits; + int status; + struct ocfs2_dinode *bm_lock; + struct buffer_head *bh = NULL; + struct inode *inode = NULL; + + mlog_entry("(%p, %p)\n", dentry->d_sb, buf); + + osb = OCFS2_SB(dentry->d_sb); + + inode = ocfs2_get_system_file_inode(osb, + GLOBAL_BITMAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT); + if (!inode) { + mlog(ML_ERROR, "failed to get bitmap inode\n"); + status = -EIO; + goto bail; + } + + status = ocfs2_inode_lock(inode, &bh, 0); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + bm_lock = (struct ocfs2_dinode *) bh->b_data; + + numbits = le32_to_cpu(bm_lock->id1.bitmap1.i_total); + freebits = numbits - le32_to_cpu(bm_lock->id1.bitmap1.i_used); + + buf->f_type = OCFS2_SUPER_MAGIC; + buf->f_bsize = dentry->d_sb->s_blocksize; + buf->f_namelen = OCFS2_MAX_FILENAME_LEN; + buf->f_blocks = ((sector_t) numbits) * + (osb->s_clustersize >> osb->sb->s_blocksize_bits); + buf->f_bfree = ((sector_t) freebits) * + (osb->s_clustersize >> osb->sb->s_blocksize_bits); + buf->f_bavail = buf->f_bfree; + buf->f_files = numbits; + buf->f_ffree = freebits; + + brelse(bh); + + ocfs2_inode_unlock(inode, 0); + status = 0; +bail: + if (inode) + iput(inode); + + mlog_exit(status); + + return status; +} + +static void ocfs2_inode_init_once(void *data) +{ + struct ocfs2_inode_info *oi = data; + + oi->ip_flags = 0; + oi->ip_open_count = 0; + spin_lock_init(&oi->ip_lock); + ocfs2_extent_map_init(&oi->vfs_inode); + INIT_LIST_HEAD(&oi->ip_io_markers); + oi->ip_created_trans = 0; + oi->ip_last_trans = 0; + oi->ip_dir_start_lookup = 0; + + init_rwsem(&oi->ip_alloc_sem); + init_rwsem(&oi->ip_xattr_sem); + mutex_init(&oi->ip_io_mutex); + + oi->ip_blkno = 0ULL; + oi->ip_clusters = 0; + + ocfs2_lock_res_init_once(&oi->ip_rw_lockres); + ocfs2_lock_res_init_once(&oi->ip_inode_lockres); + ocfs2_lock_res_init_once(&oi->ip_open_lockres); + + ocfs2_metadata_cache_init(&oi->vfs_inode); + + inode_init_once(&oi->vfs_inode); +} + +static int ocfs2_initialize_mem_caches(void) +{ + ocfs2_inode_cachep = kmem_cache_create("ocfs2_inode_cache", + sizeof(struct ocfs2_inode_info), + 0, + (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + ocfs2_inode_init_once); + if (!ocfs2_inode_cachep) + return -ENOMEM; + + return 0; +} + +static void ocfs2_free_mem_caches(void) +{ + if (ocfs2_inode_cachep) + kmem_cache_destroy(ocfs2_inode_cachep); + + ocfs2_inode_cachep = NULL; +} + +static int ocfs2_get_sector(struct super_block *sb, + struct buffer_head **bh, + int block, + int sect_size) +{ + if (!sb_set_blocksize(sb, sect_size)) { + mlog(ML_ERROR, "unable to set blocksize\n"); + return -EIO; + } + + *bh = sb_getblk(sb, block); + if (!*bh) { + mlog_errno(-EIO); + return -EIO; + } + lock_buffer(*bh); + if (!buffer_dirty(*bh)) + clear_buffer_uptodate(*bh); + unlock_buffer(*bh); + ll_rw_block(READ, 1, bh); + wait_on_buffer(*bh); + return 0; +} + +static int ocfs2_mount_volume(struct super_block *sb) +{ + int status = 0; + int unlock_super = 0; + struct ocfs2_super *osb = OCFS2_SB(sb); + + mlog_entry_void(); + + if (ocfs2_is_hard_readonly(osb)) + goto leave; + + status = ocfs2_dlm_init(osb); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_super_lock(osb, 1); + if (status < 0) { + mlog_errno(status); + goto leave; + } + unlock_super = 1; + + /* This will load up the node map and add ourselves to it. */ + status = ocfs2_find_slot(osb); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + /* load all node-local system inodes */ + status = ocfs2_init_local_system_inodes(osb); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_check_volume(osb); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_truncate_log_init(osb); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + if (ocfs2_mount_local(osb)) + goto leave; + +leave: + if (unlock_super) + ocfs2_super_unlock(osb, 1); + + mlog_exit(status); + return status; +} + +static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) +{ + int tmp, hangup_needed = 0; + struct ocfs2_super *osb = NULL; + char nodestr[8]; + + mlog_entry("(0x%p)\n", sb); + + BUG_ON(!sb); + osb = OCFS2_SB(sb); + BUG_ON(!osb); + + ocfs2_shutdown_local_alloc(osb); + + ocfs2_truncate_log_shutdown(osb); + + /* This will disable recovery and flush any recovery work. */ + ocfs2_recovery_exit(osb); + + ocfs2_journal_shutdown(osb); + + ocfs2_sync_blockdev(sb); + + /* No cluster connection means we've failed during mount, so skip + * all the steps which depended on that to complete. */ + if (osb->cconn) { + tmp = ocfs2_super_lock(osb, 1); + if (tmp < 0) { + mlog_errno(tmp); + return; + } + } + + if (osb->slot_num != OCFS2_INVALID_SLOT) + ocfs2_put_slot(osb); + + if (osb->cconn) + ocfs2_super_unlock(osb, 1); + + ocfs2_release_system_inodes(osb); + + /* + * If we're dismounting due to mount error, mount.ocfs2 will clean + * up heartbeat. If we're a local mount, there is no heartbeat. + * If we failed before we got a uuid_str yet, we can't stop + * heartbeat. Otherwise, do it. + */ + if (!mnt_err && !ocfs2_mount_local(osb) && osb->uuid_str) + hangup_needed = 1; + + if (osb->cconn) + ocfs2_dlm_shutdown(osb, hangup_needed); + + debugfs_remove(osb->osb_debug_root); + + if (hangup_needed) + ocfs2_cluster_hangup(osb->uuid_str, strlen(osb->uuid_str)); + + atomic_set(&osb->vol_state, VOLUME_DISMOUNTED); + + if (ocfs2_mount_local(osb)) + snprintf(nodestr, sizeof(nodestr), "local"); + else + snprintf(nodestr, sizeof(nodestr), "%u", osb->node_num); + + printk(KERN_INFO "ocfs2: Unmounting device (%s) on (node %s)\n", + osb->dev_str, nodestr); + + ocfs2_delete_osb(osb); + kfree(osb); + sb->s_dev = 0; + sb->s_fs_info = NULL; +} + +static int ocfs2_setup_osb_uuid(struct ocfs2_super *osb, const unsigned char *uuid, + unsigned uuid_bytes) +{ + int i, ret; + char *ptr; + + BUG_ON(uuid_bytes != OCFS2_VOL_UUID_LEN); + + osb->uuid_str = kzalloc(OCFS2_VOL_UUID_LEN * 2 + 1, GFP_KERNEL); + if (osb->uuid_str == NULL) + return -ENOMEM; + + for (i = 0, ptr = osb->uuid_str; i < OCFS2_VOL_UUID_LEN; i++) { + /* print with null */ + ret = snprintf(ptr, 3, "%02X", uuid[i]); + if (ret != 2) /* drop super cleans up */ + return -EINVAL; + /* then only advance past the last char */ + ptr += 2; + } + + return 0; +} + +static int ocfs2_initialize_super(struct super_block *sb, + struct buffer_head *bh, + int sector_size) +{ + int status; + int i, cbits, bbits; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; + struct inode *inode = NULL; + struct ocfs2_journal *journal; + __le32 uuid_net_key; + struct ocfs2_super *osb; + + mlog_entry_void(); + + osb = kzalloc(sizeof(struct ocfs2_super), GFP_KERNEL); + if (!osb) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + sb->s_fs_info = osb; + sb->s_op = &ocfs2_sops; + sb->s_export_op = &ocfs2_export_ops; + sb->s_xattr = ocfs2_xattr_handlers; + sb->s_time_gran = 1; + sb->s_flags |= MS_NOATIME; + /* this is needed to support O_LARGEFILE */ + cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits); + bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits); + sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits); + + osb->sb = sb; + /* Save off for ocfs2_rw_direct */ + osb->s_sectsize_bits = blksize_bits(sector_size); + BUG_ON(!osb->s_sectsize_bits); + + spin_lock_init(&osb->dc_task_lock); + init_waitqueue_head(&osb->dc_event); + osb->dc_work_sequence = 0; + osb->dc_wake_sequence = 0; + INIT_LIST_HEAD(&osb->blocked_lock_list); + osb->blocked_lock_count = 0; + spin_lock_init(&osb->osb_lock); + ocfs2_init_inode_steal_slot(osb); + + atomic_set(&osb->alloc_stats.moves, 0); + atomic_set(&osb->alloc_stats.local_data, 0); + atomic_set(&osb->alloc_stats.bitmap_data, 0); + atomic_set(&osb->alloc_stats.bg_allocs, 0); + atomic_set(&osb->alloc_stats.bg_extends, 0); + + ocfs2_init_node_maps(osb); + + snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u", + MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); + + status = ocfs2_recovery_init(osb); + if (status) { + mlog(ML_ERROR, "Unable to initialize recovery state\n"); + mlog_errno(status); + goto bail; + } + + init_waitqueue_head(&osb->checkpoint_event); + atomic_set(&osb->needs_checkpoint, 0); + + osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; + + osb->slot_num = OCFS2_INVALID_SLOT; + + osb->s_xattr_inline_size = le16_to_cpu( + di->id2.i_super.s_xattr_inline_size); + + osb->local_alloc_state = OCFS2_LA_UNUSED; + osb->local_alloc_bh = NULL; + INIT_DELAYED_WORK(&osb->la_enable_wq, ocfs2_la_enable_worker); + + init_waitqueue_head(&osb->osb_mount_event); + + osb->vol_label = kmalloc(OCFS2_MAX_VOL_LABEL_LEN, GFP_KERNEL); + if (!osb->vol_label) { + mlog(ML_ERROR, "unable to alloc vol label\n"); + status = -ENOMEM; + goto bail; + } + + osb->max_slots = le16_to_cpu(di->id2.i_super.s_max_slots); + if (osb->max_slots > OCFS2_MAX_SLOTS || osb->max_slots == 0) { + mlog(ML_ERROR, "Invalid number of node slots (%u)\n", + osb->max_slots); + status = -EINVAL; + goto bail; + } + mlog(0, "max_slots for this device: %u\n", osb->max_slots); + + osb->slot_recovery_generations = + kcalloc(osb->max_slots, sizeof(*osb->slot_recovery_generations), + GFP_KERNEL); + if (!osb->slot_recovery_generations) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + init_waitqueue_head(&osb->osb_wipe_event); + osb->osb_orphan_wipes = kcalloc(osb->max_slots, + sizeof(*osb->osb_orphan_wipes), + GFP_KERNEL); + if (!osb->osb_orphan_wipes) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + osb->s_feature_compat = + le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_compat); + osb->s_feature_ro_compat = + le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_ro_compat); + osb->s_feature_incompat = + le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_incompat); + + if ((i = OCFS2_HAS_INCOMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_INCOMPAT_SUPP))) { + mlog(ML_ERROR, "couldn't mount because of unsupported " + "optional features (%x).\n", i); + status = -EINVAL; + goto bail; + } + if (!(osb->sb->s_flags & MS_RDONLY) && + (i = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_RO_COMPAT_SUPP))) { + mlog(ML_ERROR, "couldn't mount RDWR because of " + "unsupported optional features (%x).\n", i); + status = -EINVAL; + goto bail; + } + + if (ocfs2_userspace_stack(osb)) { + memcpy(osb->osb_cluster_stack, + OCFS2_RAW_SB(di)->s_cluster_info.ci_stack, + OCFS2_STACK_LABEL_LEN); + osb->osb_cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; + if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) { + mlog(ML_ERROR, + "couldn't mount because of an invalid " + "cluster stack label (%s) \n", + osb->osb_cluster_stack); + status = -EINVAL; + goto bail; + } + } else { + /* The empty string is identical with classic tools that + * don't know about s_cluster_info. */ + osb->osb_cluster_stack[0] = '\0'; + } + + get_random_bytes(&osb->s_next_generation, sizeof(u32)); + + /* FIXME + * This should be done in ocfs2_journal_init(), but unknown + * ordering issues will cause the filesystem to crash. + * If anyone wants to figure out what part of the code + * refers to osb->journal before ocfs2_journal_init() is run, + * be my guest. + */ + /* initialize our journal structure */ + + journal = kzalloc(sizeof(struct ocfs2_journal), GFP_KERNEL); + if (!journal) { + mlog(ML_ERROR, "unable to alloc journal\n"); + status = -ENOMEM; + goto bail; + } + osb->journal = journal; + journal->j_osb = osb; + + atomic_set(&journal->j_num_trans, 0); + init_rwsem(&journal->j_trans_barrier); + init_waitqueue_head(&journal->j_checkpointed); + spin_lock_init(&journal->j_lock); + journal->j_trans_id = (unsigned long) 1; + INIT_LIST_HEAD(&journal->j_la_cleanups); + INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); + journal->j_state = OCFS2_JOURNAL_FREE; + + /* get some pseudo constants for clustersize bits */ + osb->s_clustersize_bits = + le32_to_cpu(di->id2.i_super.s_clustersize_bits); + osb->s_clustersize = 1 << osb->s_clustersize_bits; + mlog(0, "clusterbits=%d\n", osb->s_clustersize_bits); + + if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE || + osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) { + mlog(ML_ERROR, "Volume has invalid cluster size (%d)\n", + osb->s_clustersize); + status = -EINVAL; + goto bail; + } + + if (ocfs2_clusters_to_blocks(osb->sb, le32_to_cpu(di->i_clusters) - 1) + > (u32)~0UL) { + mlog(ML_ERROR, "Volume might try to write to blocks beyond " + "what jbd can address in 32 bits.\n"); + status = -EINVAL; + goto bail; + } + + if (ocfs2_setup_osb_uuid(osb, di->id2.i_super.s_uuid, + sizeof(di->id2.i_super.s_uuid))) { + mlog(ML_ERROR, "Out of memory trying to setup our uuid.\n"); + status = -ENOMEM; + goto bail; + } + + memcpy(&uuid_net_key, di->id2.i_super.s_uuid, sizeof(uuid_net_key)); + + strncpy(osb->vol_label, di->id2.i_super.s_label, 63); + osb->vol_label[63] = '\0'; + osb->root_blkno = le64_to_cpu(di->id2.i_super.s_root_blkno); + osb->system_dir_blkno = le64_to_cpu(di->id2.i_super.s_system_dir_blkno); + osb->first_cluster_group_blkno = + le64_to_cpu(di->id2.i_super.s_first_cluster_group); + osb->fs_generation = le32_to_cpu(di->i_fs_generation); + osb->uuid_hash = le32_to_cpu(di->id2.i_super.s_uuid_hash); + mlog(0, "vol_label: %s\n", osb->vol_label); + mlog(0, "uuid: %s\n", osb->uuid_str); + mlog(0, "root_blkno=%llu, system_dir_blkno=%llu\n", + (unsigned long long)osb->root_blkno, + (unsigned long long)osb->system_dir_blkno); + + osb->osb_dlm_debug = ocfs2_new_dlm_debug(); + if (!osb->osb_dlm_debug) { + status = -ENOMEM; + mlog_errno(status); + goto bail; + } + + atomic_set(&osb->vol_state, VOLUME_INIT); + + /* load root, system_dir, and all global system inodes */ + status = ocfs2_init_global_system_inodes(osb); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + /* + * global bitmap + */ + inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, + OCFS2_INVALID_SLOT); + if (!inode) { + status = -EINVAL; + mlog_errno(status); + goto bail; + } + + osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno; + iput(inode); + + osb->bitmap_cpg = ocfs2_group_bitmap_size(sb) * 8; + + status = ocfs2_init_slot_info(osb); + if (status < 0) { + mlog_errno(status); + goto bail; + } + +bail: + mlog_exit(status); + return status; +} + +/* + * will return: -EAGAIN if it is ok to keep searching for superblocks + * -EINVAL if there is a bad superblock + * 0 on success + */ +static int ocfs2_verify_volume(struct ocfs2_dinode *di, + struct buffer_head *bh, + u32 blksz) +{ + int status = -EAGAIN; + + mlog_entry_void(); + + if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, + strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { + status = -EINVAL; + if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != blksz) { + mlog(ML_ERROR, "found superblock with incorrect block " + "size: found %u, should be %u\n", + 1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits), + blksz); + } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) != + OCFS2_MAJOR_REV_LEVEL || + le16_to_cpu(di->id2.i_super.s_minor_rev_level) != + OCFS2_MINOR_REV_LEVEL) { + mlog(ML_ERROR, "found superblock with bad version: " + "found %u.%u, should be %u.%u\n", + le16_to_cpu(di->id2.i_super.s_major_rev_level), + le16_to_cpu(di->id2.i_super.s_minor_rev_level), + OCFS2_MAJOR_REV_LEVEL, + OCFS2_MINOR_REV_LEVEL); + } else if (bh->b_blocknr != le64_to_cpu(di->i_blkno)) { + mlog(ML_ERROR, "bad block number on superblock: " + "found %llu, should be %llu\n", + (unsigned long long)le64_to_cpu(di->i_blkno), + (unsigned long long)bh->b_blocknr); + } else if (le32_to_cpu(di->id2.i_super.s_clustersize_bits) < 12 || + le32_to_cpu(di->id2.i_super.s_clustersize_bits) > 20) { + mlog(ML_ERROR, "bad cluster size found: %u\n", + 1 << le32_to_cpu(di->id2.i_super.s_clustersize_bits)); + } else if (!le64_to_cpu(di->id2.i_super.s_root_blkno)) { + mlog(ML_ERROR, "bad root_blkno: 0\n"); + } else if (!le64_to_cpu(di->id2.i_super.s_system_dir_blkno)) { + mlog(ML_ERROR, "bad system_dir_blkno: 0\n"); + } else if (le16_to_cpu(di->id2.i_super.s_max_slots) > OCFS2_MAX_SLOTS) { + mlog(ML_ERROR, + "Superblock slots found greater than file system " + "maximum: found %u, max %u\n", + le16_to_cpu(di->id2.i_super.s_max_slots), + OCFS2_MAX_SLOTS); + } else { + /* found it! */ + status = 0; + } + } + + mlog_exit(status); + return status; +} + +static int ocfs2_check_volume(struct ocfs2_super *osb) +{ + int status; + int dirty; + int local; + struct ocfs2_dinode *local_alloc = NULL; /* only used if we + * recover + * ourselves. */ + + mlog_entry_void(); + + /* Init our journal object. */ + status = ocfs2_journal_init(osb->journal, &dirty); + if (status < 0) { + mlog(ML_ERROR, "Could not initialize journal!\n"); + goto finally; + } + + /* If the journal was unmounted cleanly then we don't want to + * recover anything. Otherwise, journal_load will do that + * dirty work for us :) */ + if (!dirty) { + status = ocfs2_journal_wipe(osb->journal, 0); + if (status < 0) { + mlog_errno(status); + goto finally; + } + } else { + mlog(ML_NOTICE, "File system was not unmounted cleanly, " + "recovering volume.\n"); + } + + local = ocfs2_mount_local(osb); + + /* will play back anything left in the journal. */ + status = ocfs2_journal_load(osb->journal, local, dirty); + if (status < 0) { + mlog(ML_ERROR, "ocfs2 journal load failed! %d\n", status); + goto finally; + } + + if (dirty) { + /* recover my local alloc if we didn't unmount cleanly. */ + status = ocfs2_begin_local_alloc_recovery(osb, + osb->slot_num, + &local_alloc); + if (status < 0) { + mlog_errno(status); + goto finally; + } + /* we complete the recovery process after we've marked + * ourselves as mounted. */ + } + + mlog(0, "Journal loaded.\n"); + + status = ocfs2_load_local_alloc(osb); + if (status < 0) { + mlog_errno(status); + goto finally; + } + + if (dirty) { + /* Recovery will be completed after we've mounted the + * rest of the volume. */ + osb->dirty = 1; + osb->local_alloc_copy = local_alloc; + local_alloc = NULL; + } + + /* go through each journal, trylock it and if you get the + * lock, and it's marked as dirty, set the bit in the recover + * map and launch a recovery thread for it. */ + status = ocfs2_mark_dead_nodes(osb); + if (status < 0) + mlog_errno(status); + +finally: + if (local_alloc) + kfree(local_alloc); + + mlog_exit(status); + return status; +} + +/* + * The routine gets called from dismount or close whenever a dismount on + * volume is requested and the osb open count becomes 1. + * It will remove the osb from the global list and also free up all the + * initialized resources and fileobject. + */ +static void ocfs2_delete_osb(struct ocfs2_super *osb) +{ + mlog_entry_void(); + + /* This function assumes that the caller has the main osb resource */ + + ocfs2_free_slot_info(osb); + + kfree(osb->osb_orphan_wipes); + kfree(osb->slot_recovery_generations); + /* FIXME + * This belongs in journal shutdown, but because we have to + * allocate osb->journal at the start of ocfs2_initalize_osb(), + * we free it here. + */ + kfree(osb->journal); + if (osb->local_alloc_copy) + kfree(osb->local_alloc_copy); + kfree(osb->uuid_str); + ocfs2_put_dlm_debug(osb->osb_dlm_debug); + memset(osb, 0, sizeof(struct ocfs2_super)); + + mlog_exit_void(); +} + +/* Put OCFS2 into a readonly state, or (if the user specifies it), + * panic(). We do not support continue-on-error operation. */ +static void ocfs2_handle_error(struct super_block *sb) +{ + struct ocfs2_super *osb = OCFS2_SB(sb); + + if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_PANIC) + panic("OCFS2: (device %s): panic forced after error\n", + sb->s_id); + + ocfs2_set_osb_flag(osb, OCFS2_OSB_ERROR_FS); + + if (sb->s_flags & MS_RDONLY && + (ocfs2_is_soft_readonly(osb) || + ocfs2_is_hard_readonly(osb))) + return; + + printk(KERN_CRIT "File system is now read-only due to the potential " + "of on-disk corruption. Please run fsck.ocfs2 once the file " + "system is unmounted.\n"); + sb->s_flags |= MS_RDONLY; + ocfs2_set_ro_flag(osb, 0); +} + +static char error_buf[1024]; + +void __ocfs2_error(struct super_block *sb, + const char *function, + const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(error_buf, sizeof(error_buf), fmt, args); + va_end(args); + + /* Not using mlog here because we want to show the actual + * function the error came from. */ + printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %s\n", + sb->s_id, function, error_buf); + + ocfs2_handle_error(sb); +} + +/* Handle critical errors. This is intentionally more drastic than + * ocfs2_handle_error, so we only use for things like journal errors, + * etc. */ +void __ocfs2_abort(struct super_block* sb, + const char *function, + const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(error_buf, sizeof(error_buf), fmt, args); + va_end(args); + + printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n", + sb->s_id, function, error_buf); + + /* We don't have the cluster support yet to go straight to + * hard readonly in here. Until then, we want to keep + * ocfs2_abort() so that we can at least mark critical + * errors. + * + * TODO: This should abort the journal and alert other nodes + * that our slot needs recovery. */ + + /* Force a panic(). This stinks, but it's better than letting + * things continue without having a proper hard readonly + * here. */ + OCFS2_SB(sb)->s_mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; + ocfs2_handle_error(sb); +} + +module_init(ocfs2_init); +module_exit(ocfs2_exit); diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h new file mode 100644 index 0000000..783f527 --- /dev/null +++ b/fs/ocfs2/super.h @@ -0,0 +1,48 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * super.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_SUPER_H +#define OCFS2_SUPER_H + +extern struct workqueue_struct *ocfs2_wq; + +int ocfs2_publish_get_mount_state(struct ocfs2_super *osb, + int node_num); + +void __ocfs2_error(struct super_block *sb, + const char *function, + const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); + +#define ocfs2_error(sb, fmt, args...) __ocfs2_error(sb, __PRETTY_FUNCTION__, fmt, ##args) + +void __ocfs2_abort(struct super_block *sb, + const char *function, + const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); + +#define ocfs2_abort(sb, fmt, args...) __ocfs2_abort(sb, __PRETTY_FUNCTION__, fmt, ##args) + +#endif /* OCFS2_SUPER_H */ diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c new file mode 100644 index 0000000..cbd03df --- /dev/null +++ b/fs/ocfs2/symlink.c @@ -0,0 +1,181 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * linux/cluster/ssi/cfs/symlink.c + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE + * or NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Questions/Comments/Bugfixes to ssic-linux-devel@lists.sourceforge.net + * + * Copyright (C) 1992 Rick Sladkey + * + * Optimization changes Copyright (C) 1994 Florian La Roche + * + * Jun 7 1999, cache symlink lookups in the page cache. -DaveM + * + * Portions Copyright (C) 2001 Compaq Computer Corporation + * + * ocfs2 symlink handling code. + * + * Copyright (C) 2004, 2005 Oracle. + * + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/pagemap.h> +#include <linux/utsname.h> + +#define MLOG_MASK_PREFIX ML_NAMEI +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "file.h" +#include "inode.h" +#include "journal.h" +#include "symlink.h" +#include "xattr.h" + +#include "buffer_head_io.h" + +static char *ocfs2_page_getlink(struct dentry * dentry, + struct page **ppage); +static char *ocfs2_fast_symlink_getlink(struct inode *inode, + struct buffer_head **bh); + +/* get the link contents into pagecache */ +static char *ocfs2_page_getlink(struct dentry * dentry, + struct page **ppage) +{ + struct page * page; + struct address_space *mapping = dentry->d_inode->i_mapping; + page = read_mapping_page(mapping, 0, NULL); + if (IS_ERR(page)) + goto sync_fail; + *ppage = page; + return kmap(page); + +sync_fail: + return (char*)page; +} + +static char *ocfs2_fast_symlink_getlink(struct inode *inode, + struct buffer_head **bh) +{ + int status; + char *link = NULL; + struct ocfs2_dinode *fe; + + mlog_entry_void(); + + status = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, bh); + if (status < 0) { + mlog_errno(status); + link = ERR_PTR(status); + goto bail; + } + + fe = (struct ocfs2_dinode *) (*bh)->b_data; + link = (char *) fe->id2.i_symlink; +bail: + mlog_exit(status); + + return link; +} + +static int ocfs2_readlink(struct dentry *dentry, + char __user *buffer, + int buflen) +{ + int ret; + char *link; + struct buffer_head *bh = NULL; + struct inode *inode = dentry->d_inode; + + mlog_entry_void(); + + link = ocfs2_fast_symlink_getlink(inode, &bh); + if (IS_ERR(link)) { + ret = PTR_ERR(link); + goto out; + } + + /* + * Without vfsmount we can't update atime now, + * but we will update atime here ultimately. + */ + ret = vfs_readlink(dentry, buffer, buflen, link); + + brelse(bh); +out: + mlog_exit(ret); + return ret; +} + +static void *ocfs2_follow_link(struct dentry *dentry, + struct nameidata *nd) +{ + int status; + char *link; + struct inode *inode = dentry->d_inode; + struct page *page = NULL; + struct buffer_head *bh = NULL; + + if (ocfs2_inode_is_fast_symlink(inode)) + link = ocfs2_fast_symlink_getlink(inode, &bh); + else + link = ocfs2_page_getlink(dentry, &page); + if (IS_ERR(link)) { + status = PTR_ERR(link); + mlog_errno(status); + goto bail; + } + + status = vfs_follow_link(nd, link); + +bail: + if (page) { + kunmap(page); + page_cache_release(page); + } + brelse(bh); + + return ERR_PTR(status); +} + +const struct inode_operations ocfs2_symlink_inode_operations = { + .readlink = page_readlink, + .follow_link = ocfs2_follow_link, + .getattr = ocfs2_getattr, + .setattr = ocfs2_setattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = ocfs2_listxattr, + .removexattr = generic_removexattr, +}; +const struct inode_operations ocfs2_fast_symlink_inode_operations = { + .readlink = ocfs2_readlink, + .follow_link = ocfs2_follow_link, + .getattr = ocfs2_getattr, + .setattr = ocfs2_setattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .listxattr = ocfs2_listxattr, + .removexattr = generic_removexattr, +}; diff --git a/fs/ocfs2/symlink.h b/fs/ocfs2/symlink.h new file mode 100644 index 0000000..65a6c9c --- /dev/null +++ b/fs/ocfs2/symlink.h @@ -0,0 +1,42 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * symlink.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_SYMLINK_H +#define OCFS2_SYMLINK_H + +extern const struct inode_operations ocfs2_symlink_inode_operations; +extern const struct inode_operations ocfs2_fast_symlink_inode_operations; + +/* + * Test whether an inode is a fast symlink. + */ +static inline int ocfs2_inode_is_fast_symlink(struct inode *inode) +{ + return (S_ISLNK(inode->i_mode) && + inode->i_blocks == 0); +} + + +#endif /* OCFS2_SYMLINK_H */ diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c new file mode 100644 index 0000000..ab713eb --- /dev/null +++ b/fs/ocfs2/sysfile.c @@ -0,0 +1,125 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * sysfile.c + * + * Initialize, read, write, etc. system files. + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> + +#define MLOG_MASK_PREFIX ML_INODE +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "alloc.h" +#include "dir.h" +#include "inode.h" +#include "journal.h" +#include "sysfile.h" + +#include "buffer_head_io.h" + +static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb, + int type, + u32 slot); + +static inline int is_global_system_inode(int type); +static inline int is_in_system_inode_array(struct ocfs2_super *osb, + int type, + u32 slot); + +static inline int is_global_system_inode(int type) +{ + return type >= OCFS2_FIRST_ONLINE_SYSTEM_INODE && + type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE; +} + +static inline int is_in_system_inode_array(struct ocfs2_super *osb, + int type, + u32 slot) +{ + return slot == osb->slot_num || is_global_system_inode(type); +} + +struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb, + int type, + u32 slot) +{ + struct inode *inode = NULL; + struct inode **arr = NULL; + + /* avoid the lookup if cached in local system file array */ + if (is_in_system_inode_array(osb, type, slot)) + arr = &(osb->system_inodes[type]); + + if (arr && ((inode = *arr) != NULL)) { + /* get a ref in addition to the array ref */ + inode = igrab(inode); + BUG_ON(!inode); + + return inode; + } + + /* this gets one ref thru iget */ + inode = _ocfs2_get_system_file_inode(osb, type, slot); + + /* add one more if putting into array for first time */ + if (arr && inode) { + *arr = igrab(inode); + BUG_ON(!*arr); + } + return inode; +} + +static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb, + int type, + u32 slot) +{ + char namebuf[40]; + struct inode *inode = NULL; + u64 blkno; + int status = 0; + + ocfs2_sprintf_system_inode_name(namebuf, + sizeof(namebuf), + type, slot); + + status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf, + strlen(namebuf), &blkno); + if (status < 0) { + goto bail; + } + + inode = ocfs2_iget(osb, blkno, OCFS2_FI_FLAG_SYSFILE, type); + if (IS_ERR(inode)) { + mlog_errno(PTR_ERR(inode)); + inode = NULL; + goto bail; + } +bail: + + return inode; +} + diff --git a/fs/ocfs2/sysfile.h b/fs/ocfs2/sysfile.h new file mode 100644 index 0000000..cc9ea66 --- /dev/null +++ b/fs/ocfs2/sysfile.h @@ -0,0 +1,33 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * sysfile.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_SYSFILE_H +#define OCFS2_SYSFILE_H + +struct inode * ocfs2_get_system_file_inode(struct ocfs2_super *osb, + int type, + u32 slot); + +#endif /* OCFS2_SYSFILE_H */ diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c new file mode 100644 index 0000000..187b99f --- /dev/null +++ b/fs/ocfs2/uptodate.c @@ -0,0 +1,589 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * uptodate.c + * + * Tracking the up-to-date-ness of a local buffer_head with respect to + * the cluster. + * + * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + * Standard buffer head caching flags (uptodate, etc) are insufficient + * in a clustered environment - a buffer may be marked up to date on + * our local node but could have been modified by another cluster + * member. As a result an additional (and performant) caching scheme + * is required. A further requirement is that we consume as little + * memory as possible - we never pin buffer_head structures in order + * to cache them. + * + * We track the existence of up to date buffers on the inodes which + * are associated with them. Because we don't want to pin + * buffer_heads, this is only a (strong) hint and several other checks + * are made in the I/O path to ensure that we don't use a stale or + * invalid buffer without going to disk: + * - buffer_jbd is used liberally - if a bh is in the journal on + * this node then it *must* be up to date. + * - the standard buffer_uptodate() macro is used to detect buffers + * which may be invalid (even if we have an up to date tracking + * item for them) + * + * For a full understanding of how this code works together, one + * should read the callers in dlmglue.c, the I/O functions in + * buffer_head_io.c and ocfs2_journal_access in journal.c + */ + +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/buffer_head.h> +#include <linux/rbtree.h> +#ifndef CONFIG_OCFS2_COMPAT_JBD +# include <linux/jbd2.h> +#else +# include <linux/jbd.h> +#endif + +#define MLOG_MASK_PREFIX ML_UPTODATE + +#include <cluster/masklog.h> + +#include "ocfs2.h" + +#include "inode.h" +#include "uptodate.h" + +struct ocfs2_meta_cache_item { + struct rb_node c_node; + sector_t c_block; +}; + +static struct kmem_cache *ocfs2_uptodate_cachep = NULL; + +void ocfs2_metadata_cache_init(struct inode *inode) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; + + oi->ip_flags |= OCFS2_INODE_CACHE_INLINE; + ci->ci_num_cached = 0; +} + +/* No lock taken here as 'root' is not expected to be visible to other + * processes. */ +static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root) +{ + unsigned int purged = 0; + struct rb_node *node; + struct ocfs2_meta_cache_item *item; + + while ((node = rb_last(root)) != NULL) { + item = rb_entry(node, struct ocfs2_meta_cache_item, c_node); + + mlog(0, "Purge item %llu\n", + (unsigned long long) item->c_block); + + rb_erase(&item->c_node, root); + kmem_cache_free(ocfs2_uptodate_cachep, item); + + purged++; + } + return purged; +} + +/* Called from locking and called from ocfs2_clear_inode. Dump the + * cache for a given inode. + * + * This function is a few more lines longer than necessary due to some + * accounting done here, but I think it's worth tracking down those + * bugs sooner -- Mark */ +void ocfs2_metadata_cache_purge(struct inode *inode) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + unsigned int tree, to_purge, purged; + struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; + struct rb_root root = RB_ROOT; + + spin_lock(&oi->ip_lock); + tree = !(oi->ip_flags & OCFS2_INODE_CACHE_INLINE); + to_purge = ci->ci_num_cached; + + mlog(0, "Purge %u %s items from Inode %llu\n", to_purge, + tree ? "array" : "tree", (unsigned long long)oi->ip_blkno); + + /* If we're a tree, save off the root so that we can safely + * initialize the cache. We do the work to free tree members + * without the spinlock. */ + if (tree) + root = ci->ci_cache.ci_tree; + + ocfs2_metadata_cache_init(inode); + spin_unlock(&oi->ip_lock); + + purged = ocfs2_purge_copied_metadata_tree(&root); + /* If possible, track the number wiped so that we can more + * easily detect counting errors. Unfortunately, this is only + * meaningful for trees. */ + if (tree && purged != to_purge) + mlog(ML_ERROR, "Inode %llu, count = %u, purged = %u\n", + (unsigned long long)oi->ip_blkno, to_purge, purged); +} + +/* Returns the index in the cache array, -1 if not found. + * Requires ip_lock. */ +static int ocfs2_search_cache_array(struct ocfs2_caching_info *ci, + sector_t item) +{ + int i; + + for (i = 0; i < ci->ci_num_cached; i++) { + if (item == ci->ci_cache.ci_array[i]) + return i; + } + + return -1; +} + +/* Returns the cache item if found, otherwise NULL. + * Requires ip_lock. */ +static struct ocfs2_meta_cache_item * +ocfs2_search_cache_tree(struct ocfs2_caching_info *ci, + sector_t block) +{ + struct rb_node * n = ci->ci_cache.ci_tree.rb_node; + struct ocfs2_meta_cache_item *item = NULL; + + while (n) { + item = rb_entry(n, struct ocfs2_meta_cache_item, c_node); + + if (block < item->c_block) + n = n->rb_left; + else if (block > item->c_block) + n = n->rb_right; + else + return item; + } + + return NULL; +} + +static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi, + struct buffer_head *bh) +{ + int index = -1; + struct ocfs2_meta_cache_item *item = NULL; + + spin_lock(&oi->ip_lock); + + mlog(0, "Inode %llu, query block %llu (inline = %u)\n", + (unsigned long long)oi->ip_blkno, + (unsigned long long) bh->b_blocknr, + !!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE)); + + if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) + index = ocfs2_search_cache_array(&oi->ip_metadata_cache, + bh->b_blocknr); + else + item = ocfs2_search_cache_tree(&oi->ip_metadata_cache, + bh->b_blocknr); + + spin_unlock(&oi->ip_lock); + + mlog(0, "index = %d, item = %p\n", index, item); + + return (index != -1) || (item != NULL); +} + +/* Warning: even if it returns true, this does *not* guarantee that + * the block is stored in our inode metadata cache. + * + * This can be called under lock_buffer() + */ +int ocfs2_buffer_uptodate(struct inode *inode, + struct buffer_head *bh) +{ + /* Doesn't matter if the bh is in our cache or not -- if it's + * not marked uptodate then we know it can't have correct + * data. */ + if (!buffer_uptodate(bh)) + return 0; + + /* OCFS2 does not allow multiple nodes to be changing the same + * block at the same time. */ + if (buffer_jbd(bh)) + return 1; + + /* Ok, locally the buffer is marked as up to date, now search + * our cache to see if we can trust that. */ + return ocfs2_buffer_cached(OCFS2_I(inode), bh); +} + +/* + * Determine whether a buffer is currently out on a read-ahead request. + * ip_io_sem should be held to serialize submitters with the logic here. + */ +int ocfs2_buffer_read_ahead(struct inode *inode, + struct buffer_head *bh) +{ + return buffer_locked(bh) && ocfs2_buffer_cached(OCFS2_I(inode), bh); +} + +/* Requires ip_lock */ +static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci, + sector_t block) +{ + BUG_ON(ci->ci_num_cached >= OCFS2_INODE_MAX_CACHE_ARRAY); + + mlog(0, "block %llu takes position %u\n", (unsigned long long) block, + ci->ci_num_cached); + + ci->ci_cache.ci_array[ci->ci_num_cached] = block; + ci->ci_num_cached++; +} + +/* By now the caller should have checked that the item does *not* + * exist in the tree. + * Requires ip_lock. */ +static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci, + struct ocfs2_meta_cache_item *new) +{ + sector_t block = new->c_block; + struct rb_node *parent = NULL; + struct rb_node **p = &ci->ci_cache.ci_tree.rb_node; + struct ocfs2_meta_cache_item *tmp; + + mlog(0, "Insert block %llu num = %u\n", (unsigned long long) block, + ci->ci_num_cached); + + while(*p) { + parent = *p; + + tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node); + + if (block < tmp->c_block) + p = &(*p)->rb_left; + else if (block > tmp->c_block) + p = &(*p)->rb_right; + else { + /* This should never happen! */ + mlog(ML_ERROR, "Duplicate block %llu cached!\n", + (unsigned long long) block); + BUG(); + } + } + + rb_link_node(&new->c_node, parent, p); + rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree); + ci->ci_num_cached++; +} + +static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi, + struct ocfs2_caching_info *ci) +{ + assert_spin_locked(&oi->ip_lock); + + return (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) && + (ci->ci_num_cached < OCFS2_INODE_MAX_CACHE_ARRAY); +} + +/* tree should be exactly OCFS2_INODE_MAX_CACHE_ARRAY wide. NULL the + * pointers in tree after we use them - this allows caller to detect + * when to free in case of error. */ +static void ocfs2_expand_cache(struct ocfs2_inode_info *oi, + struct ocfs2_meta_cache_item **tree) +{ + int i; + struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; + + mlog_bug_on_msg(ci->ci_num_cached != OCFS2_INODE_MAX_CACHE_ARRAY, + "Inode %llu, num cached = %u, should be %u\n", + (unsigned long long)oi->ip_blkno, ci->ci_num_cached, + OCFS2_INODE_MAX_CACHE_ARRAY); + mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE), + "Inode %llu not marked as inline anymore!\n", + (unsigned long long)oi->ip_blkno); + assert_spin_locked(&oi->ip_lock); + + /* Be careful to initialize the tree members *first* because + * once the ci_tree is used, the array is junk... */ + for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) + tree[i]->c_block = ci->ci_cache.ci_array[i]; + + oi->ip_flags &= ~OCFS2_INODE_CACHE_INLINE; + ci->ci_cache.ci_tree = RB_ROOT; + /* this will be set again by __ocfs2_insert_cache_tree */ + ci->ci_num_cached = 0; + + for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { + __ocfs2_insert_cache_tree(ci, tree[i]); + tree[i] = NULL; + } + + mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n", + (unsigned long long)oi->ip_blkno, oi->ip_flags, ci->ci_num_cached); +} + +/* Slow path function - memory allocation is necessary. See the + * comment above ocfs2_set_buffer_uptodate for more information. */ +static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, + sector_t block, + int expand_tree) +{ + int i; + struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; + struct ocfs2_meta_cache_item *new = NULL; + struct ocfs2_meta_cache_item *tree[OCFS2_INODE_MAX_CACHE_ARRAY] = + { NULL, }; + + mlog(0, "Inode %llu, block %llu, expand = %d\n", + (unsigned long long)oi->ip_blkno, + (unsigned long long)block, expand_tree); + + new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); + if (!new) { + mlog_errno(-ENOMEM); + return; + } + new->c_block = block; + + if (expand_tree) { + /* Do *not* allocate an array here - the removal code + * has no way of tracking that. */ + for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { + tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, + GFP_NOFS); + if (!tree[i]) { + mlog_errno(-ENOMEM); + goto out_free; + } + + /* These are initialized in ocfs2_expand_cache! */ + } + } + + spin_lock(&oi->ip_lock); + if (ocfs2_insert_can_use_array(oi, ci)) { + mlog(0, "Someone cleared the tree underneath us\n"); + /* Ok, items were removed from the cache in between + * locks. Detect this and revert back to the fast path */ + ocfs2_append_cache_array(ci, block); + spin_unlock(&oi->ip_lock); + goto out_free; + } + + if (expand_tree) + ocfs2_expand_cache(oi, tree); + + __ocfs2_insert_cache_tree(ci, new); + spin_unlock(&oi->ip_lock); + + new = NULL; +out_free: + if (new) + kmem_cache_free(ocfs2_uptodate_cachep, new); + + /* If these were used, then ocfs2_expand_cache re-set them to + * NULL for us. */ + if (tree[0]) { + for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) + if (tree[i]) + kmem_cache_free(ocfs2_uptodate_cachep, + tree[i]); + } +} + +/* Item insertion is guarded by ip_io_mutex, so the insertion path takes + * advantage of this by not rechecking for a duplicate insert during + * the slow case. Additionally, if the cache needs to be bumped up to + * a tree, the code will not recheck after acquiring the lock -- + * multiple paths cannot be expanding to a tree at the same time. + * + * The slow path takes into account that items can be removed + * (including the whole tree wiped and reset) when this process it out + * allocating memory. In those cases, it reverts back to the fast + * path. + * + * Note that this function may actually fail to insert the block if + * memory cannot be allocated. This is not fatal however (but may + * result in a performance penalty) + * + * Readahead buffers can be passed in here before the I/O request is + * completed. + */ +void ocfs2_set_buffer_uptodate(struct inode *inode, + struct buffer_head *bh) +{ + int expand; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; + + /* The block may very well exist in our cache already, so avoid + * doing any more work in that case. */ + if (ocfs2_buffer_cached(oi, bh)) + return; + + mlog(0, "Inode %llu, inserting block %llu\n", + (unsigned long long)oi->ip_blkno, + (unsigned long long)bh->b_blocknr); + + /* No need to recheck under spinlock - insertion is guarded by + * ip_io_mutex */ + spin_lock(&oi->ip_lock); + if (ocfs2_insert_can_use_array(oi, ci)) { + /* Fast case - it's an array and there's a free + * spot. */ + ocfs2_append_cache_array(ci, bh->b_blocknr); + spin_unlock(&oi->ip_lock); + return; + } + + expand = 0; + if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) { + /* We need to bump things up to a tree. */ + expand = 1; + } + spin_unlock(&oi->ip_lock); + + __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand); +} + +/* Called against a newly allocated buffer. Most likely nobody should + * be able to read this sort of metadata while it's still being + * allocated, but this is careful to take ip_io_mutex anyway. */ +void ocfs2_set_new_buffer_uptodate(struct inode *inode, + struct buffer_head *bh) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + + /* This should definitely *not* exist in our cache */ + BUG_ON(ocfs2_buffer_cached(oi, bh)); + + set_buffer_uptodate(bh); + + mutex_lock(&oi->ip_io_mutex); + ocfs2_set_buffer_uptodate(inode, bh); + mutex_unlock(&oi->ip_io_mutex); +} + +/* Requires ip_lock. */ +static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci, + int index) +{ + sector_t *array = ci->ci_cache.ci_array; + int bytes; + + BUG_ON(index < 0 || index >= OCFS2_INODE_MAX_CACHE_ARRAY); + BUG_ON(index >= ci->ci_num_cached); + BUG_ON(!ci->ci_num_cached); + + mlog(0, "remove index %d (num_cached = %u\n", index, + ci->ci_num_cached); + + ci->ci_num_cached--; + + /* don't need to copy if the array is now empty, or if we + * removed at the tail */ + if (ci->ci_num_cached && index < ci->ci_num_cached) { + bytes = sizeof(sector_t) * (ci->ci_num_cached - index); + memmove(&array[index], &array[index + 1], bytes); + } +} + +/* Requires ip_lock. */ +static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci, + struct ocfs2_meta_cache_item *item) +{ + mlog(0, "remove block %llu from tree\n", + (unsigned long long) item->c_block); + + rb_erase(&item->c_node, &ci->ci_cache.ci_tree); + ci->ci_num_cached--; +} + +static void ocfs2_remove_block_from_cache(struct inode *inode, + sector_t block) +{ + int index; + struct ocfs2_meta_cache_item *item = NULL; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; + + spin_lock(&oi->ip_lock); + mlog(0, "Inode %llu, remove %llu, items = %u, array = %u\n", + (unsigned long long)oi->ip_blkno, + (unsigned long long) block, ci->ci_num_cached, + oi->ip_flags & OCFS2_INODE_CACHE_INLINE); + + if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) { + index = ocfs2_search_cache_array(ci, block); + if (index != -1) + ocfs2_remove_metadata_array(ci, index); + } else { + item = ocfs2_search_cache_tree(ci, block); + if (item) + ocfs2_remove_metadata_tree(ci, item); + } + spin_unlock(&oi->ip_lock); + + if (item) + kmem_cache_free(ocfs2_uptodate_cachep, item); +} + +/* + * Called when we remove a chunk of metadata from an inode. We don't + * bother reverting things to an inlined array in the case of a remove + * which moves us back under the limit. + */ +void ocfs2_remove_from_cache(struct inode *inode, + struct buffer_head *bh) +{ + sector_t block = bh->b_blocknr; + + ocfs2_remove_block_from_cache(inode, block); +} + +/* Called when we remove xattr clusters from an inode. */ +void ocfs2_remove_xattr_clusters_from_cache(struct inode *inode, + sector_t block, + u32 c_len) +{ + unsigned int i, b_len = ocfs2_clusters_to_blocks(inode->i_sb, 1) * c_len; + + for (i = 0; i < b_len; i++, block++) + ocfs2_remove_block_from_cache(inode, block); +} + +int __init init_ocfs2_uptodate_cache(void) +{ + ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate", + sizeof(struct ocfs2_meta_cache_item), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!ocfs2_uptodate_cachep) + return -ENOMEM; + + mlog(0, "%u inlined cache items per inode.\n", + OCFS2_INODE_MAX_CACHE_ARRAY); + + return 0; +} + +void exit_ocfs2_uptodate_cache(void) +{ + if (ocfs2_uptodate_cachep) + kmem_cache_destroy(ocfs2_uptodate_cachep); +} diff --git a/fs/ocfs2/uptodate.h b/fs/ocfs2/uptodate.h new file mode 100644 index 0000000..531b4b3 --- /dev/null +++ b/fs/ocfs2/uptodate.h @@ -0,0 +1,49 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * uptodate.h + * + * Cluster uptodate tracking + * + * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_UPTODATE_H +#define OCFS2_UPTODATE_H + +int __init init_ocfs2_uptodate_cache(void); +void exit_ocfs2_uptodate_cache(void); + +void ocfs2_metadata_cache_init(struct inode *inode); +void ocfs2_metadata_cache_purge(struct inode *inode); + +int ocfs2_buffer_uptodate(struct inode *inode, + struct buffer_head *bh); +void ocfs2_set_buffer_uptodate(struct inode *inode, + struct buffer_head *bh); +void ocfs2_set_new_buffer_uptodate(struct inode *inode, + struct buffer_head *bh); +void ocfs2_remove_from_cache(struct inode *inode, + struct buffer_head *bh); +void ocfs2_remove_xattr_clusters_from_cache(struct inode *inode, + sector_t block, + u32 c_len); +int ocfs2_buffer_read_ahead(struct inode *inode, + struct buffer_head *bh); + +#endif /* OCFS2_UPTODATE_H */ diff --git a/fs/ocfs2/ver.c b/fs/ocfs2/ver.c new file mode 100644 index 0000000..e2488f4 --- /dev/null +++ b/fs/ocfs2/ver.c @@ -0,0 +1,43 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ver.c + * + * version string + * + * Copyright (C) 2002, 2005 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include <linux/module.h> +#include <linux/string.h> +#include <linux/kernel.h> + +#include "ver.h" + +#define OCFS2_BUILD_VERSION "1.5.0" + +#define VERSION_STR "OCFS2 " OCFS2_BUILD_VERSION + +void ocfs2_print_version(void) +{ + printk(KERN_INFO "%s\n", VERSION_STR); +} + +MODULE_DESCRIPTION(VERSION_STR); + +MODULE_VERSION(OCFS2_BUILD_VERSION); diff --git a/fs/ocfs2/ver.h b/fs/ocfs2/ver.h new file mode 100644 index 0000000..d7395cb --- /dev/null +++ b/fs/ocfs2/ver.h @@ -0,0 +1,31 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * ver.h + * + * Function prototypes + * + * Copyright (C) 2002, 2004 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef OCFS2_VER_H +#define OCFS2_VER_H + +void ocfs2_print_version(void); + +#endif /* OCFS2_VER_H */ diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c new file mode 100644 index 0000000..74d7367 --- /dev/null +++ b/fs/ocfs2/xattr.c @@ -0,0 +1,4868 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * xattr.c + * + * Copyright (C) 2004, 2008 Oracle. All rights reserved. + * + * CREDITS: + * Lots of code in this file is copy from linux/fs/ext3/xattr.c. + * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/capability.h> +#include <linux/fs.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/pagemap.h> +#include <linux/uio.h> +#include <linux/sched.h> +#include <linux/splice.h> +#include <linux/mount.h> +#include <linux/writeback.h> +#include <linux/falloc.h> +#include <linux/sort.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/string.h> + +#define MLOG_MASK_PREFIX ML_XATTR +#include <cluster/masklog.h> + +#include "ocfs2.h" +#include "alloc.h" +#include "dlmglue.h" +#include "file.h" +#include "symlink.h" +#include "sysfile.h" +#include "inode.h" +#include "journal.h" +#include "ocfs2_fs.h" +#include "suballoc.h" +#include "uptodate.h" +#include "buffer_head_io.h" +#include "super.h" +#include "xattr.h" + + +struct ocfs2_xattr_def_value_root { + struct ocfs2_xattr_value_root xv; + struct ocfs2_extent_rec er; +}; + +struct ocfs2_xattr_bucket { + struct buffer_head *bhs[OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET]; + struct ocfs2_xattr_header *xh; +}; + +#define OCFS2_XATTR_ROOT_SIZE (sizeof(struct ocfs2_xattr_def_value_root)) +#define OCFS2_XATTR_INLINE_SIZE 80 + +static struct ocfs2_xattr_def_value_root def_xv = { + .xv.xr_list.l_count = cpu_to_le16(1), +}; + +struct xattr_handler *ocfs2_xattr_handlers[] = { + &ocfs2_xattr_user_handler, + &ocfs2_xattr_trusted_handler, + NULL +}; + +static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = { + [OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler, + [OCFS2_XATTR_INDEX_TRUSTED] = &ocfs2_xattr_trusted_handler, +}; + +struct ocfs2_xattr_info { + int name_index; + const char *name; + const void *value; + size_t value_len; +}; + +struct ocfs2_xattr_search { + struct buffer_head *inode_bh; + /* + * xattr_bh point to the block buffer head which has extended attribute + * when extended attribute in inode, xattr_bh is equal to inode_bh. + */ + struct buffer_head *xattr_bh; + struct ocfs2_xattr_header *header; + struct ocfs2_xattr_bucket bucket; + void *base; + void *end; + struct ocfs2_xattr_entry *here; + int not_found; +}; + +static int ocfs2_xattr_bucket_get_name_value(struct inode *inode, + struct ocfs2_xattr_header *xh, + int index, + int *block_off, + int *new_offset); + +static int ocfs2_xattr_block_find(struct inode *inode, + int name_index, + const char *name, + struct ocfs2_xattr_search *xs); +static int ocfs2_xattr_index_block_find(struct inode *inode, + struct buffer_head *root_bh, + int name_index, + const char *name, + struct ocfs2_xattr_search *xs); + +static int ocfs2_xattr_tree_list_index_block(struct inode *inode, + struct ocfs2_xattr_tree_root *xt, + char *buffer, + size_t buffer_size); + +static int ocfs2_xattr_create_index_block(struct inode *inode, + struct ocfs2_xattr_search *xs); + +static int ocfs2_xattr_set_entry_index_block(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs); + +static int ocfs2_delete_xattr_index_block(struct inode *inode, + struct buffer_head *xb_bh); + +static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb) +{ + return (1 << osb->s_clustersize_bits) / OCFS2_XATTR_BUCKET_SIZE; +} + +static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb) +{ + return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits); +} + +static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb) +{ + u16 len = sb->s_blocksize - + offsetof(struct ocfs2_xattr_header, xh_entries); + + return len / sizeof(struct ocfs2_xattr_entry); +} + +static inline const char *ocfs2_xattr_prefix(int name_index) +{ + struct xattr_handler *handler = NULL; + + if (name_index > 0 && name_index < OCFS2_XATTR_MAX) + handler = ocfs2_xattr_handler_map[name_index]; + + return handler ? handler->prefix : NULL; +} + +static u32 ocfs2_xattr_name_hash(struct inode *inode, + const char *name, + int name_len) +{ + /* Get hash value of uuid from super block */ + u32 hash = OCFS2_SB(inode->i_sb)->uuid_hash; + int i; + + /* hash extended attribute name */ + for (i = 0; i < name_len; i++) { + hash = (hash << OCFS2_HASH_SHIFT) ^ + (hash >> (8*sizeof(hash) - OCFS2_HASH_SHIFT)) ^ + *name++; + } + + return hash; +} + +/* + * ocfs2_xattr_hash_entry() + * + * Compute the hash of an extended attribute. + */ +static void ocfs2_xattr_hash_entry(struct inode *inode, + struct ocfs2_xattr_header *header, + struct ocfs2_xattr_entry *entry) +{ + u32 hash = 0; + char *name = (char *)header + le16_to_cpu(entry->xe_name_offset); + + hash = ocfs2_xattr_name_hash(inode, name, entry->xe_name_len); + entry->xe_name_hash = cpu_to_le32(hash); + + return; +} + +static int ocfs2_xattr_extend_allocation(struct inode *inode, + u32 clusters_to_add, + struct buffer_head *xattr_bh, + struct ocfs2_xattr_value_root *xv) +{ + int status = 0; + int restart_func = 0; + int credits = 0; + handle_t *handle = NULL; + struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *meta_ac = NULL; + enum ocfs2_alloc_restarted why; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + u32 prev_clusters, logical_start = le32_to_cpu(xv->xr_clusters); + struct ocfs2_extent_tree et; + + mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add); + + ocfs2_init_xattr_value_extent_tree(&et, inode, xattr_bh, xv); + +restart_all: + + status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, + &data_ac, &meta_ac); + if (status) { + mlog_errno(status); + goto leave; + } + + credits = ocfs2_calc_extend_credits(osb->sb, et.et_root_el, + clusters_to_add); + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto leave; + } + +restarted_transaction: + status = ocfs2_journal_access(handle, inode, xattr_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + prev_clusters = le32_to_cpu(xv->xr_clusters); + status = ocfs2_add_clusters_in_btree(osb, + inode, + &logical_start, + clusters_to_add, + 0, + &et, + handle, + data_ac, + meta_ac, + &why); + if ((status < 0) && (status != -EAGAIN)) { + if (status != -ENOSPC) + mlog_errno(status); + goto leave; + } + + status = ocfs2_journal_dirty(handle, xattr_bh); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + clusters_to_add -= le32_to_cpu(xv->xr_clusters) - prev_clusters; + + if (why != RESTART_NONE && clusters_to_add) { + if (why == RESTART_META) { + mlog(0, "restarting function.\n"); + restart_func = 1; + } else { + BUG_ON(why != RESTART_TRANS); + + mlog(0, "restarting transaction.\n"); + /* TODO: This can be more intelligent. */ + credits = ocfs2_calc_extend_credits(osb->sb, + et.et_root_el, + clusters_to_add); + status = ocfs2_extend_trans(handle, credits); + if (status < 0) { + /* handle still has to be committed at + * this point. */ + status = -ENOMEM; + mlog_errno(status); + goto leave; + } + goto restarted_transaction; + } + } + +leave: + if (handle) { + ocfs2_commit_trans(osb, handle); + handle = NULL; + } + if (data_ac) { + ocfs2_free_alloc_context(data_ac); + data_ac = NULL; + } + if (meta_ac) { + ocfs2_free_alloc_context(meta_ac); + meta_ac = NULL; + } + if ((!status) && restart_func) { + restart_func = 0; + goto restart_all; + } + + return status; +} + +static int __ocfs2_remove_xattr_range(struct inode *inode, + struct buffer_head *root_bh, + struct ocfs2_xattr_value_root *xv, + u32 cpos, u32 phys_cpos, u32 len, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret; + u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct inode *tl_inode = osb->osb_tl_inode; + handle_t *handle; + struct ocfs2_alloc_context *meta_ac = NULL; + struct ocfs2_extent_tree et; + + ocfs2_init_xattr_value_extent_tree(&et, inode, root_bh, xv); + + ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac); + if (ret) { + mlog_errno(ret); + return ret; + } + + mutex_lock(&tl_inode->i_mutex); + + if (ocfs2_truncate_log_needs_flush(osb)) { + ret = __ocfs2_flush_truncate_log(osb); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, meta_ac, + dealloc); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + le32_add_cpu(&xv->xr_clusters, -len); + + ret = ocfs2_journal_dirty(handle, root_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len); + if (ret) + mlog_errno(ret); + +out_commit: + ocfs2_commit_trans(osb, handle); +out: + mutex_unlock(&tl_inode->i_mutex); + + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + + return ret; +} + +static int ocfs2_xattr_shrink_size(struct inode *inode, + u32 old_clusters, + u32 new_clusters, + struct buffer_head *root_bh, + struct ocfs2_xattr_value_root *xv) +{ + int ret = 0; + u32 trunc_len, cpos, phys_cpos, alloc_size; + u64 block; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_cached_dealloc_ctxt dealloc; + + ocfs2_init_dealloc_ctxt(&dealloc); + + if (old_clusters <= new_clusters) + return 0; + + cpos = new_clusters; + trunc_len = old_clusters - new_clusters; + while (trunc_len) { + ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos, + &alloc_size, &xv->xr_list); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (alloc_size > trunc_len) + alloc_size = trunc_len; + + ret = __ocfs2_remove_xattr_range(inode, root_bh, xv, cpos, + phys_cpos, alloc_size, + &dealloc); + if (ret) { + mlog_errno(ret); + goto out; + } + + block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); + ocfs2_remove_xattr_clusters_from_cache(inode, block, + alloc_size); + cpos += alloc_size; + trunc_len -= alloc_size; + } + +out: + ocfs2_schedule_truncate_log_flush(osb, 1); + ocfs2_run_deallocs(osb, &dealloc); + + return ret; +} + +static int ocfs2_xattr_value_truncate(struct inode *inode, + struct buffer_head *root_bh, + struct ocfs2_xattr_value_root *xv, + int len) +{ + int ret; + u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len); + u32 old_clusters = le32_to_cpu(xv->xr_clusters); + + if (new_clusters == old_clusters) + return 0; + + if (new_clusters > old_clusters) + ret = ocfs2_xattr_extend_allocation(inode, + new_clusters - old_clusters, + root_bh, xv); + else + ret = ocfs2_xattr_shrink_size(inode, + old_clusters, new_clusters, + root_bh, xv); + + return ret; +} + +static int ocfs2_xattr_list_entry(char *buffer, size_t size, + size_t *result, const char *prefix, + const char *name, int name_len) +{ + char *p = buffer + *result; + int prefix_len = strlen(prefix); + int total_len = prefix_len + name_len + 1; + + *result += total_len; + + /* we are just looking for how big our buffer needs to be */ + if (!size) + return 0; + + if (*result > size) + return -ERANGE; + + memcpy(p, prefix, prefix_len); + memcpy(p + prefix_len, name, name_len); + p[prefix_len + name_len] = '\0'; + + return 0; +} + +static int ocfs2_xattr_list_entries(struct inode *inode, + struct ocfs2_xattr_header *header, + char *buffer, size_t buffer_size) +{ + size_t result = 0; + int i, type, ret; + const char *prefix, *name; + + for (i = 0 ; i < le16_to_cpu(header->xh_count); i++) { + struct ocfs2_xattr_entry *entry = &header->xh_entries[i]; + type = ocfs2_xattr_get_type(entry); + prefix = ocfs2_xattr_prefix(type); + + if (prefix) { + name = (const char *)header + + le16_to_cpu(entry->xe_name_offset); + + ret = ocfs2_xattr_list_entry(buffer, buffer_size, + &result, prefix, name, + entry->xe_name_len); + if (ret) + return ret; + } + } + + return result; +} + +static int ocfs2_xattr_ibody_list(struct inode *inode, + struct ocfs2_dinode *di, + char *buffer, + size_t buffer_size) +{ + struct ocfs2_xattr_header *header = NULL; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + int ret = 0; + + if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) + return ret; + + header = (struct ocfs2_xattr_header *) + ((void *)di + inode->i_sb->s_blocksize - + le16_to_cpu(di->i_xattr_inline_size)); + + ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size); + + return ret; +} + +static int ocfs2_xattr_block_list(struct inode *inode, + struct ocfs2_dinode *di, + char *buffer, + size_t buffer_size) +{ + struct buffer_head *blk_bh = NULL; + struct ocfs2_xattr_block *xb; + int ret = 0; + + if (!di->i_xattr_loc) + return ret; + + ret = ocfs2_read_block(inode, le64_to_cpu(di->i_xattr_loc), &blk_bh); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + xb = (struct ocfs2_xattr_block *)blk_bh->b_data; + if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { + ret = -EIO; + goto cleanup; + } + + if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { + struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header; + ret = ocfs2_xattr_list_entries(inode, header, + buffer, buffer_size); + } else { + struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; + ret = ocfs2_xattr_tree_list_index_block(inode, xt, + buffer, buffer_size); + } +cleanup: + brelse(blk_bh); + + return ret; +} + +ssize_t ocfs2_listxattr(struct dentry *dentry, + char *buffer, + size_t size) +{ + int ret = 0, i_ret = 0, b_ret = 0; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di = NULL; + struct ocfs2_inode_info *oi = OCFS2_I(dentry->d_inode); + + if (!ocfs2_supports_xattr(OCFS2_SB(dentry->d_sb))) + return -EOPNOTSUPP; + + if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) + return ret; + + ret = ocfs2_inode_lock(dentry->d_inode, &di_bh, 0); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + di = (struct ocfs2_dinode *)di_bh->b_data; + + down_read(&oi->ip_xattr_sem); + i_ret = ocfs2_xattr_ibody_list(dentry->d_inode, di, buffer, size); + if (i_ret < 0) + b_ret = 0; + else { + if (buffer) { + buffer += i_ret; + size -= i_ret; + } + b_ret = ocfs2_xattr_block_list(dentry->d_inode, di, + buffer, size); + if (b_ret < 0) + i_ret = 0; + } + up_read(&oi->ip_xattr_sem); + ocfs2_inode_unlock(dentry->d_inode, 0); + + brelse(di_bh); + + return i_ret + b_ret; +} + +static int ocfs2_xattr_find_entry(int name_index, + const char *name, + struct ocfs2_xattr_search *xs) +{ + struct ocfs2_xattr_entry *entry; + size_t name_len; + int i, cmp = 1; + + if (name == NULL) + return -EINVAL; + + name_len = strlen(name); + entry = xs->here; + for (i = 0; i < le16_to_cpu(xs->header->xh_count); i++) { + cmp = name_index - ocfs2_xattr_get_type(entry); + if (!cmp) + cmp = name_len - entry->xe_name_len; + if (!cmp) + cmp = memcmp(name, (xs->base + + le16_to_cpu(entry->xe_name_offset)), + name_len); + if (cmp == 0) + break; + entry += 1; + } + xs->here = entry; + + return cmp ? -ENODATA : 0; +} + +static int ocfs2_xattr_get_value_outside(struct inode *inode, + struct ocfs2_xattr_value_root *xv, + void *buffer, + size_t len) +{ + u32 cpos, p_cluster, num_clusters, bpc, clusters; + u64 blkno; + int i, ret = 0; + size_t cplen, blocksize; + struct buffer_head *bh = NULL; + struct ocfs2_extent_list *el; + + el = &xv->xr_list; + clusters = le32_to_cpu(xv->xr_clusters); + bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); + blocksize = inode->i_sb->s_blocksize; + + cpos = 0; + while (cpos < clusters) { + ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, + &num_clusters, el); + if (ret) { + mlog_errno(ret); + goto out; + } + + blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); + /* Copy ocfs2_xattr_value */ + for (i = 0; i < num_clusters * bpc; i++, blkno++) { + ret = ocfs2_read_block(inode, blkno, &bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + cplen = len >= blocksize ? blocksize : len; + memcpy(buffer, bh->b_data, cplen); + len -= cplen; + buffer += cplen; + + brelse(bh); + bh = NULL; + if (len == 0) + break; + } + cpos += num_clusters; + } +out: + return ret; +} + +static int ocfs2_xattr_ibody_get(struct inode *inode, + int name_index, + const char *name, + void *buffer, + size_t buffer_size, + struct ocfs2_xattr_search *xs) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; + struct ocfs2_xattr_value_root *xv; + size_t size; + int ret = 0; + + if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) + return -ENODATA; + + xs->end = (void *)di + inode->i_sb->s_blocksize; + xs->header = (struct ocfs2_xattr_header *) + (xs->end - le16_to_cpu(di->i_xattr_inline_size)); + xs->base = (void *)xs->header; + xs->here = xs->header->xh_entries; + + ret = ocfs2_xattr_find_entry(name_index, name, xs); + if (ret) + return ret; + size = le64_to_cpu(xs->here->xe_value_size); + if (buffer) { + if (size > buffer_size) + return -ERANGE; + if (ocfs2_xattr_is_local(xs->here)) { + memcpy(buffer, (void *)xs->base + + le16_to_cpu(xs->here->xe_name_offset) + + OCFS2_XATTR_SIZE(xs->here->xe_name_len), size); + } else { + xv = (struct ocfs2_xattr_value_root *) + (xs->base + le16_to_cpu( + xs->here->xe_name_offset) + + OCFS2_XATTR_SIZE(xs->here->xe_name_len)); + ret = ocfs2_xattr_get_value_outside(inode, xv, + buffer, size); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + } + } + + return size; +} + +static int ocfs2_xattr_block_get(struct inode *inode, + int name_index, + const char *name, + void *buffer, + size_t buffer_size, + struct ocfs2_xattr_search *xs) +{ + struct ocfs2_xattr_block *xb; + struct ocfs2_xattr_value_root *xv; + size_t size; + int ret = -ENODATA, name_offset, name_len, block_off, i; + + memset(&xs->bucket, 0, sizeof(xs->bucket)); + + ret = ocfs2_xattr_block_find(inode, name_index, name, xs); + if (ret) { + mlog_errno(ret); + goto cleanup; + } + + if (xs->not_found) { + ret = -ENODATA; + goto cleanup; + } + + xb = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; + size = le64_to_cpu(xs->here->xe_value_size); + if (buffer) { + ret = -ERANGE; + if (size > buffer_size) + goto cleanup; + + name_offset = le16_to_cpu(xs->here->xe_name_offset); + name_len = OCFS2_XATTR_SIZE(xs->here->xe_name_len); + i = xs->here - xs->header->xh_entries; + + if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { + ret = ocfs2_xattr_bucket_get_name_value(inode, + xs->bucket.xh, + i, + &block_off, + &name_offset); + xs->base = xs->bucket.bhs[block_off]->b_data; + } + if (ocfs2_xattr_is_local(xs->here)) { + memcpy(buffer, (void *)xs->base + + name_offset + name_len, size); + } else { + xv = (struct ocfs2_xattr_value_root *) + (xs->base + name_offset + name_len); + ret = ocfs2_xattr_get_value_outside(inode, xv, + buffer, size); + if (ret < 0) { + mlog_errno(ret); + goto cleanup; + } + } + } + ret = size; +cleanup: + for (i = 0; i < OCFS2_XATTR_MAX_BLOCKS_PER_BUCKET; i++) + brelse(xs->bucket.bhs[i]); + memset(&xs->bucket, 0, sizeof(xs->bucket)); + + brelse(xs->xattr_bh); + xs->xattr_bh = NULL; + return ret; +} + +/* ocfs2_xattr_get() + * + * Copy an extended attribute into the buffer provided. + * Buffer is NULL to compute the size of buffer required. + */ +static int ocfs2_xattr_get(struct inode *inode, + int name_index, + const char *name, + void *buffer, + size_t buffer_size) +{ + int ret; + struct ocfs2_dinode *di = NULL; + struct buffer_head *di_bh = NULL; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_xattr_search xis = { + .not_found = -ENODATA, + }; + struct ocfs2_xattr_search xbs = { + .not_found = -ENODATA, + }; + + if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb))) + return -EOPNOTSUPP; + + if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) + ret = -ENODATA; + + ret = ocfs2_inode_lock(inode, &di_bh, 0); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + xis.inode_bh = xbs.inode_bh = di_bh; + di = (struct ocfs2_dinode *)di_bh->b_data; + + down_read(&oi->ip_xattr_sem); + ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, + buffer_size, &xis); + if (ret == -ENODATA && di->i_xattr_loc) + ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, + buffer_size, &xbs); + up_read(&oi->ip_xattr_sem); + ocfs2_inode_unlock(inode, 0); + + brelse(di_bh); + + return ret; +} + +static int __ocfs2_xattr_set_value_outside(struct inode *inode, + struct ocfs2_xattr_value_root *xv, + const void *value, + int value_len) +{ + int ret = 0, i, cp_len, credits; + u16 blocksize = inode->i_sb->s_blocksize; + u32 p_cluster, num_clusters; + u32 cpos = 0, bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); + u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len); + u64 blkno; + struct buffer_head *bh = NULL; + handle_t *handle; + + BUG_ON(clusters > le32_to_cpu(xv->xr_clusters)); + + credits = clusters * bpc; + handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + while (cpos < clusters) { + ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, + &num_clusters, &xv->xr_list); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); + + for (i = 0; i < num_clusters * bpc; i++, blkno++) { + ret = ocfs2_read_block(inode, blkno, &bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_journal_access(handle, + inode, + bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + + cp_len = value_len > blocksize ? blocksize : value_len; + memcpy(bh->b_data, value, cp_len); + value_len -= cp_len; + value += cp_len; + if (cp_len < blocksize) + memset(bh->b_data + cp_len, 0, + blocksize - cp_len); + + ret = ocfs2_journal_dirty(handle, bh); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + brelse(bh); + bh = NULL; + + /* + * XXX: do we need to empty all the following + * blocks in this cluster? + */ + if (!value_len) + break; + } + cpos += num_clusters; + } +out_commit: + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +out: + brelse(bh); + + return ret; +} + +static int ocfs2_xattr_cleanup(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + size_t offs) +{ + handle_t *handle = NULL; + int ret = 0; + size_t name_len = strlen(xi->name); + void *val = xs->base + offs; + size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; + + handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), + OCFS2_XATTR_BLOCK_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + ret = ocfs2_journal_access(handle, inode, xs->xattr_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + /* Decrease xattr count */ + le16_add_cpu(&xs->header->xh_count, -1); + /* Remove the xattr entry and tree root which has already be set*/ + memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry)); + memset(val, 0, size); + + ret = ocfs2_journal_dirty(handle, xs->xattr_bh); + if (ret < 0) + mlog_errno(ret); +out_commit: + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +out: + return ret; +} + +static int ocfs2_xattr_update_entry(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + size_t offs) +{ + handle_t *handle = NULL; + int ret = 0; + + handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), + OCFS2_XATTR_BLOCK_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + ret = ocfs2_journal_access(handle, inode, xs->xattr_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + xs->here->xe_name_offset = cpu_to_le16(offs); + xs->here->xe_value_size = cpu_to_le64(xi->value_len); + if (xi->value_len <= OCFS2_XATTR_INLINE_SIZE) + ocfs2_xattr_set_local(xs->here, 1); + else + ocfs2_xattr_set_local(xs->here, 0); + ocfs2_xattr_hash_entry(inode, xs->header, xs->here); + + ret = ocfs2_journal_dirty(handle, xs->xattr_bh); + if (ret < 0) + mlog_errno(ret); +out_commit: + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +out: + return ret; +} + +/* + * ocfs2_xattr_set_value_outside() + * + * Set large size value in B tree. + */ +static int ocfs2_xattr_set_value_outside(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + size_t offs) +{ + size_t name_len = strlen(xi->name); + void *val = xs->base + offs; + struct ocfs2_xattr_value_root *xv = NULL; + size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; + int ret = 0; + + memset(val, 0, size); + memcpy(val, xi->name, name_len); + xv = (struct ocfs2_xattr_value_root *) + (val + OCFS2_XATTR_SIZE(name_len)); + xv->xr_clusters = 0; + xv->xr_last_eb_blk = 0; + xv->xr_list.l_tree_depth = 0; + xv->xr_list.l_count = cpu_to_le16(1); + xv->xr_list.l_next_free_rec = 0; + + ret = ocfs2_xattr_value_truncate(inode, xs->xattr_bh, xv, + xi->value_len); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + ret = __ocfs2_xattr_set_value_outside(inode, xv, xi->value, + xi->value_len); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + ret = ocfs2_xattr_update_entry(inode, xi, xs, offs); + if (ret < 0) + mlog_errno(ret); + + return ret; +} + +/* + * ocfs2_xattr_set_entry_local() + * + * Set, replace or remove extended attribute in local. + */ +static void ocfs2_xattr_set_entry_local(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_entry *last, + size_t min_offs) +{ + size_t name_len = strlen(xi->name); + int i; + + if (xi->value && xs->not_found) { + /* Insert the new xattr entry. */ + le16_add_cpu(&xs->header->xh_count, 1); + ocfs2_xattr_set_type(last, xi->name_index); + ocfs2_xattr_set_local(last, 1); + last->xe_name_len = name_len; + } else { + void *first_val; + void *val; + size_t offs, size; + + first_val = xs->base + min_offs; + offs = le16_to_cpu(xs->here->xe_name_offset); + val = xs->base + offs; + + if (le64_to_cpu(xs->here->xe_value_size) > + OCFS2_XATTR_INLINE_SIZE) + size = OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_ROOT_SIZE; + else + size = OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); + + if (xi->value && size == OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_SIZE(xi->value_len)) { + /* The old and the new value have the + same size. Just replace the value. */ + ocfs2_xattr_set_local(xs->here, 1); + xs->here->xe_value_size = cpu_to_le64(xi->value_len); + /* Clear value bytes. */ + memset(val + OCFS2_XATTR_SIZE(name_len), + 0, + OCFS2_XATTR_SIZE(xi->value_len)); + memcpy(val + OCFS2_XATTR_SIZE(name_len), + xi->value, + xi->value_len); + return; + } + /* Remove the old name+value. */ + memmove(first_val + size, first_val, val - first_val); + memset(first_val, 0, size); + xs->here->xe_name_hash = 0; + xs->here->xe_name_offset = 0; + ocfs2_xattr_set_local(xs->here, 1); + xs->here->xe_value_size = 0; + + min_offs += size; + + /* Adjust all value offsets. */ + last = xs->header->xh_entries; + for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) { + size_t o = le16_to_cpu(last->xe_name_offset); + + if (o < offs) + last->xe_name_offset = cpu_to_le16(o + size); + last += 1; + } + + if (!xi->value) { + /* Remove the old entry. */ + last -= 1; + memmove(xs->here, xs->here + 1, + (void *)last - (void *)xs->here); + memset(last, 0, sizeof(struct ocfs2_xattr_entry)); + le16_add_cpu(&xs->header->xh_count, -1); + } + } + if (xi->value) { + /* Insert the new name+value. */ + size_t size = OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_SIZE(xi->value_len); + void *val = xs->base + min_offs - size; + + xs->here->xe_name_offset = cpu_to_le16(min_offs - size); + memset(val, 0, size); + memcpy(val, xi->name, name_len); + memcpy(val + OCFS2_XATTR_SIZE(name_len), + xi->value, + xi->value_len); + xs->here->xe_value_size = cpu_to_le64(xi->value_len); + ocfs2_xattr_set_local(xs->here, 1); + ocfs2_xattr_hash_entry(inode, xs->header, xs->here); + } + + return; +} + +/* + * ocfs2_xattr_set_entry() + * + * Set extended attribute entry into inode or block. + * + * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE, + * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(), + * then set value in B tree with set_value_outside(). + */ +static int ocfs2_xattr_set_entry(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + int flag) +{ + struct ocfs2_xattr_entry *last; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; + size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name); + size_t size_l = 0; + handle_t *handle = NULL; + int free, i, ret; + struct ocfs2_xattr_info xi_l = { + .name_index = xi->name_index, + .name = xi->name, + .value = xi->value, + .value_len = xi->value_len, + }; + + /* Compute min_offs, last and free space. */ + last = xs->header->xh_entries; + + for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) { + size_t offs = le16_to_cpu(last->xe_name_offset); + if (offs < min_offs) + min_offs = offs; + last += 1; + } + + free = min_offs - ((void *)last - xs->base) - sizeof(__u32); + if (free < 0) + return -EIO; + + if (!xs->not_found) { + size_t size = 0; + if (ocfs2_xattr_is_local(xs->here)) + size = OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); + else + size = OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_ROOT_SIZE; + free += (size + sizeof(struct ocfs2_xattr_entry)); + } + /* Check free space in inode or block */ + if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) { + if (free < sizeof(struct ocfs2_xattr_entry) + + OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_ROOT_SIZE) { + ret = -ENOSPC; + goto out; + } + size_l = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; + xi_l.value = (void *)&def_xv; + xi_l.value_len = OCFS2_XATTR_ROOT_SIZE; + } else if (xi->value) { + if (free < sizeof(struct ocfs2_xattr_entry) + + OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_SIZE(xi->value_len)) { + ret = -ENOSPC; + goto out; + } + } + + if (!xs->not_found) { + /* For existing extended attribute */ + size_t size = OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); + size_t offs = le16_to_cpu(xs->here->xe_name_offset); + void *val = xs->base + offs; + + if (ocfs2_xattr_is_local(xs->here) && size == size_l) { + /* Replace existing local xattr with tree root */ + ret = ocfs2_xattr_set_value_outside(inode, xi, xs, + offs); + if (ret < 0) + mlog_errno(ret); + goto out; + } else if (!ocfs2_xattr_is_local(xs->here)) { + /* For existing xattr which has value outside */ + struct ocfs2_xattr_value_root *xv = NULL; + xv = (struct ocfs2_xattr_value_root *)(val + + OCFS2_XATTR_SIZE(name_len)); + + if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { + /* + * If new value need set outside also, + * first truncate old value to new value, + * then set new value with set_value_outside(). + */ + ret = ocfs2_xattr_value_truncate(inode, + xs->xattr_bh, + xv, + xi->value_len); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = __ocfs2_xattr_set_value_outside(inode, + xv, + xi->value, + xi->value_len); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_xattr_update_entry(inode, + xi, + xs, + offs); + if (ret < 0) + mlog_errno(ret); + goto out; + } else { + /* + * If new value need set in local, + * just trucate old value to zero. + */ + ret = ocfs2_xattr_value_truncate(inode, + xs->xattr_bh, + xv, + 0); + if (ret < 0) + mlog_errno(ret); + } + } + } + + handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), + OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, xs->inode_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + if (!(flag & OCFS2_INLINE_XATTR_FL)) { + /* set extended attribute in external block. */ + ret = ocfs2_extend_trans(handle, + OCFS2_INODE_UPDATE_CREDITS + + OCFS2_XATTR_BLOCK_UPDATE_CREDITS); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + ret = ocfs2_journal_access(handle, inode, xs->xattr_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + } + + /* + * Set value in local, include set tree root in local. + * This is the first step for value size >INLINE_SIZE. + */ + ocfs2_xattr_set_entry_local(inode, &xi_l, xs, last, min_offs); + + if (!(flag & OCFS2_INLINE_XATTR_FL)) { + ret = ocfs2_journal_dirty(handle, xs->xattr_bh); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + } + + if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) && + (flag & OCFS2_INLINE_XATTR_FL)) { + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + unsigned int xattrsize = osb->s_xattr_inline_size; + + /* + * Adjust extent record count or inline data size + * to reserve space for extended attribute. + */ + if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + struct ocfs2_inline_data *idata = &di->id2.i_data; + le16_add_cpu(&idata->id_count, -xattrsize); + } else if (!(ocfs2_inode_is_fast_symlink(inode))) { + struct ocfs2_extent_list *el = &di->id2.i_list; + le16_add_cpu(&el->l_count, -(xattrsize / + sizeof(struct ocfs2_extent_rec))); + } + di->i_xattr_inline_size = cpu_to_le16(xattrsize); + } + /* Update xattr flag */ + spin_lock(&oi->ip_lock); + oi->ip_dyn_features |= flag; + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + spin_unlock(&oi->ip_lock); + /* Update inode ctime */ + inode->i_ctime = CURRENT_TIME; + di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); + di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); + + ret = ocfs2_journal_dirty(handle, xs->inode_bh); + if (ret < 0) + mlog_errno(ret); + +out_commit: + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); + + if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) { + /* + * Set value outside in B tree. + * This is the second step for value size > INLINE_SIZE. + */ + size_t offs = le16_to_cpu(xs->here->xe_name_offset); + ret = ocfs2_xattr_set_value_outside(inode, xi, xs, offs); + if (ret < 0) { + int ret2; + + mlog_errno(ret); + /* + * If set value outside failed, we have to clean + * the junk tree root we have already set in local. + */ + ret2 = ocfs2_xattr_cleanup(inode, xi, xs, offs); + if (ret2 < 0) + mlog_errno(ret2); + } + } +out: + return ret; + +} + +static int ocfs2_remove_value_outside(struct inode*inode, + struct buffer_head *bh, + struct ocfs2_xattr_header *header) +{ + int ret = 0, i; + + for (i = 0; i < le16_to_cpu(header->xh_count); i++) { + struct ocfs2_xattr_entry *entry = &header->xh_entries[i]; + + if (!ocfs2_xattr_is_local(entry)) { + struct ocfs2_xattr_value_root *xv; + void *val; + + val = (void *)header + + le16_to_cpu(entry->xe_name_offset); + xv = (struct ocfs2_xattr_value_root *) + (val + OCFS2_XATTR_SIZE(entry->xe_name_len)); + ret = ocfs2_xattr_value_truncate(inode, bh, xv, 0); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + } + } + + return ret; +} + +static int ocfs2_xattr_ibody_remove(struct inode *inode, + struct buffer_head *di_bh) +{ + + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_xattr_header *header; + int ret; + + header = (struct ocfs2_xattr_header *) + ((void *)di + inode->i_sb->s_blocksize - + le16_to_cpu(di->i_xattr_inline_size)); + + ret = ocfs2_remove_value_outside(inode, di_bh, header); + + return ret; +} + +static int ocfs2_xattr_block_remove(struct inode *inode, + struct buffer_head *blk_bh) +{ + struct ocfs2_xattr_block *xb; + int ret = 0; + + xb = (struct ocfs2_xattr_block *)blk_bh->b_data; + if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { + struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header); + ret = ocfs2_remove_value_outside(inode, blk_bh, header); + } else + ret = ocfs2_delete_xattr_index_block(inode, blk_bh); + + return ret; +} + +static int ocfs2_xattr_free_block(struct inode *inode, + u64 block) +{ + struct inode *xb_alloc_inode; + struct buffer_head *xb_alloc_bh = NULL; + struct buffer_head *blk_bh = NULL; + struct ocfs2_xattr_block *xb; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + handle_t *handle; + int ret = 0; + u64 blk, bg_blkno; + u16 bit; + + ret = ocfs2_read_block(inode, block, &blk_bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + xb = (struct ocfs2_xattr_block *)blk_bh->b_data; + if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { + ret = -EIO; + goto out; + } + + ret = ocfs2_xattr_block_remove(inode, blk_bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + blk = le64_to_cpu(xb->xb_blkno); + bit = le16_to_cpu(xb->xb_suballoc_bit); + bg_blkno = ocfs2_which_suballoc_group(blk, bit); + + xb_alloc_inode = ocfs2_get_system_file_inode(osb, + EXTENT_ALLOC_SYSTEM_INODE, + le16_to_cpu(xb->xb_suballoc_slot)); + if (!xb_alloc_inode) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + mutex_lock(&xb_alloc_inode->i_mutex); + + ret = ocfs2_inode_lock(xb_alloc_inode, &xb_alloc_bh, 1); + if (ret < 0) { + mlog_errno(ret); + goto out_mutex; + } + + handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out_unlock; + } + + ret = ocfs2_free_suballoc_bits(handle, xb_alloc_inode, xb_alloc_bh, + bit, bg_blkno, 1); + if (ret < 0) + mlog_errno(ret); + + ocfs2_commit_trans(osb, handle); +out_unlock: + ocfs2_inode_unlock(xb_alloc_inode, 1); + brelse(xb_alloc_bh); +out_mutex: + mutex_unlock(&xb_alloc_inode->i_mutex); + iput(xb_alloc_inode); +out: + brelse(blk_bh); + return ret; +} + +/* + * ocfs2_xattr_remove() + * + * Free extended attribute resources associated with this inode. + */ +int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + handle_t *handle; + int ret; + + if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb))) + return 0; + + if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) + return 0; + + if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { + ret = ocfs2_xattr_ibody_remove(inode, di_bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + if (di->i_xattr_loc) { + ret = ocfs2_xattr_free_block(inode, + le64_to_cpu(di->i_xattr_loc)); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), + OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + di->i_xattr_loc = 0; + + spin_lock(&oi->ip_lock); + oi->ip_dyn_features &= ~(OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL); + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + spin_unlock(&oi->ip_lock); + + ret = ocfs2_journal_dirty(handle, di_bh); + if (ret < 0) + mlog_errno(ret); +out_commit: + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +out: + return ret; +} + +static int ocfs2_xattr_has_space_inline(struct inode *inode, + struct ocfs2_dinode *di) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + unsigned int xattrsize = OCFS2_SB(inode->i_sb)->s_xattr_inline_size; + int free; + + if (xattrsize < OCFS2_MIN_XATTR_INLINE_SIZE) + return 0; + + if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + struct ocfs2_inline_data *idata = &di->id2.i_data; + free = le16_to_cpu(idata->id_count) - le64_to_cpu(di->i_size); + } else if (ocfs2_inode_is_fast_symlink(inode)) { + free = ocfs2_fast_symlink_chars(inode->i_sb) - + le64_to_cpu(di->i_size); + } else { + struct ocfs2_extent_list *el = &di->id2.i_list; + free = (le16_to_cpu(el->l_count) - + le16_to_cpu(el->l_next_free_rec)) * + sizeof(struct ocfs2_extent_rec); + } + if (free >= xattrsize) + return 1; + + return 0; +} + +/* + * ocfs2_xattr_ibody_find() + * + * Find extended attribute in inode block and + * fill search info into struct ocfs2_xattr_search. + */ +static int ocfs2_xattr_ibody_find(struct inode *inode, + int name_index, + const char *name, + struct ocfs2_xattr_search *xs) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; + int ret; + int has_space = 0; + + if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) + return 0; + + if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { + down_read(&oi->ip_alloc_sem); + has_space = ocfs2_xattr_has_space_inline(inode, di); + up_read(&oi->ip_alloc_sem); + if (!has_space) + return 0; + } + + xs->xattr_bh = xs->inode_bh; + xs->end = (void *)di + inode->i_sb->s_blocksize; + if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) + xs->header = (struct ocfs2_xattr_header *) + (xs->end - le16_to_cpu(di->i_xattr_inline_size)); + else + xs->header = (struct ocfs2_xattr_header *) + (xs->end - OCFS2_SB(inode->i_sb)->s_xattr_inline_size); + xs->base = (void *)xs->header; + xs->here = xs->header->xh_entries; + + /* Find the named attribute. */ + if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { + ret = ocfs2_xattr_find_entry(name_index, name, xs); + if (ret && ret != -ENODATA) + return ret; + xs->not_found = ret; + } + + return 0; +} + +/* + * ocfs2_xattr_ibody_set() + * + * Set, replace or remove an extended attribute into inode block. + * + */ +static int ocfs2_xattr_ibody_set(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs) +{ + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; + int ret; + + if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) + return -ENOSPC; + + down_write(&oi->ip_alloc_sem); + if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { + if (!ocfs2_xattr_has_space_inline(inode, di)) { + ret = -ENOSPC; + goto out; + } + } + + ret = ocfs2_xattr_set_entry(inode, xi, xs, + (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL)); +out: + up_write(&oi->ip_alloc_sem); + + return ret; +} + +/* + * ocfs2_xattr_block_find() + * + * Find extended attribute in external block and + * fill search info into struct ocfs2_xattr_search. + */ +static int ocfs2_xattr_block_find(struct inode *inode, + int name_index, + const char *name, + struct ocfs2_xattr_search *xs) +{ + struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; + struct buffer_head *blk_bh = NULL; + struct ocfs2_xattr_block *xb; + int ret = 0; + + if (!di->i_xattr_loc) + return ret; + + ret = ocfs2_read_block(inode, le64_to_cpu(di->i_xattr_loc), &blk_bh); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + xb = (struct ocfs2_xattr_block *)blk_bh->b_data; + if (!OCFS2_IS_VALID_XATTR_BLOCK(xb)) { + ret = -EIO; + goto cleanup; + } + + xs->xattr_bh = blk_bh; + + if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { + xs->header = &xb->xb_attrs.xb_header; + xs->base = (void *)xs->header; + xs->end = (void *)(blk_bh->b_data) + blk_bh->b_size; + xs->here = xs->header->xh_entries; + + ret = ocfs2_xattr_find_entry(name_index, name, xs); + } else + ret = ocfs2_xattr_index_block_find(inode, blk_bh, + name_index, + name, xs); + + if (ret && ret != -ENODATA) { + xs->xattr_bh = NULL; + goto cleanup; + } + xs->not_found = ret; + return 0; +cleanup: + brelse(blk_bh); + + return ret; +} + +/* + * ocfs2_xattr_block_set() + * + * Set, replace or remove an extended attribute into external block. + * + */ +static int ocfs2_xattr_block_set(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs) +{ + struct buffer_head *new_bh = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; + struct ocfs2_alloc_context *meta_ac = NULL; + handle_t *handle = NULL; + struct ocfs2_xattr_block *xblk = NULL; + u16 suballoc_bit_start; + u32 num_got; + u64 first_blkno; + int ret; + + if (!xs->xattr_bh) { + /* + * Alloc one external block for extended attribute + * outside of inode. + */ + ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + handle = ocfs2_start_trans(osb, + OCFS2_XATTR_BLOCK_CREATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + ret = ocfs2_journal_access(handle, inode, xs->inode_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1, + &suballoc_bit_start, &num_got, + &first_blkno); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + + new_bh = sb_getblk(inode->i_sb, first_blkno); + ocfs2_set_new_buffer_uptodate(inode, new_bh); + + ret = ocfs2_journal_access(handle, inode, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + + /* Initialize ocfs2_xattr_block */ + xs->xattr_bh = new_bh; + xblk = (struct ocfs2_xattr_block *)new_bh->b_data; + memset(xblk, 0, inode->i_sb->s_blocksize); + strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE); + xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num); + xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start); + xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation); + xblk->xb_blkno = cpu_to_le64(first_blkno); + + xs->header = &xblk->xb_attrs.xb_header; + xs->base = (void *)xs->header; + xs->end = (void *)xblk + inode->i_sb->s_blocksize; + xs->here = xs->header->xh_entries; + + + ret = ocfs2_journal_dirty(handle, new_bh); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + di->i_xattr_loc = cpu_to_le64(first_blkno); + ret = ocfs2_journal_dirty(handle, xs->inode_bh); + if (ret < 0) + mlog_errno(ret); +out_commit: + ocfs2_commit_trans(osb, handle); +out: + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + if (ret < 0) + return ret; + } else + xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; + + if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) { + /* Set extended attribute into external block */ + ret = ocfs2_xattr_set_entry(inode, xi, xs, OCFS2_HAS_XATTR_FL); + if (!ret || ret != -ENOSPC) + goto end; + + ret = ocfs2_xattr_create_index_block(inode, xs); + if (ret) + goto end; + } + + ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs); + +end: + + return ret; +} + +/* + * ocfs2_xattr_set() + * + * Set, replace or remove an extended attribute for this inode. + * value is NULL to remove an existing extended attribute, else either + * create or replace an extended attribute. + */ +int ocfs2_xattr_set(struct inode *inode, + int name_index, + const char *name, + const void *value, + size_t value_len, + int flags) +{ + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di; + int ret; + u16 i, blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + struct ocfs2_xattr_info xi = { + .name_index = name_index, + .name = name, + .value = value, + .value_len = value_len, + }; + + struct ocfs2_xattr_search xis = { + .not_found = -ENODATA, + }; + + struct ocfs2_xattr_search xbs = { + .not_found = -ENODATA, + }; + + if (!ocfs2_supports_xattr(OCFS2_SB(inode->i_sb))) + return -EOPNOTSUPP; + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + xis.inode_bh = xbs.inode_bh = di_bh; + di = (struct ocfs2_dinode *)di_bh->b_data; + + down_write(&OCFS2_I(inode)->ip_xattr_sem); + /* + * Scan inode and external block to find the same name + * extended attribute and collect search infomation. + */ + ret = ocfs2_xattr_ibody_find(inode, name_index, name, &xis); + if (ret) + goto cleanup; + if (xis.not_found) { + ret = ocfs2_xattr_block_find(inode, name_index, name, &xbs); + if (ret) + goto cleanup; + } + + if (xis.not_found && xbs.not_found) { + ret = -ENODATA; + if (flags & XATTR_REPLACE) + goto cleanup; + ret = 0; + if (!value) + goto cleanup; + } else { + ret = -EEXIST; + if (flags & XATTR_CREATE) + goto cleanup; + } + + if (!value) { + /* Remove existing extended attribute */ + if (!xis.not_found) + ret = ocfs2_xattr_ibody_set(inode, &xi, &xis); + else if (!xbs.not_found) + ret = ocfs2_xattr_block_set(inode, &xi, &xbs); + } else { + /* We always try to set extended attribute into inode first*/ + ret = ocfs2_xattr_ibody_set(inode, &xi, &xis); + if (!ret && !xbs.not_found) { + /* + * If succeed and that extended attribute existing in + * external block, then we will remove it. + */ + xi.value = NULL; + xi.value_len = 0; + ret = ocfs2_xattr_block_set(inode, &xi, &xbs); + } else if (ret == -ENOSPC) { + if (di->i_xattr_loc && !xbs.xattr_bh) { + ret = ocfs2_xattr_block_find(inode, name_index, + name, &xbs); + if (ret) + goto cleanup; + } + /* + * If no space in inode, we will set extended attribute + * into external block. + */ + ret = ocfs2_xattr_block_set(inode, &xi, &xbs); + if (ret) + goto cleanup; + if (!xis.not_found) { + /* + * If succeed and that extended attribute + * existing in inode, we will remove it. + */ + xi.value = NULL; + xi.value_len = 0; + ret = ocfs2_xattr_ibody_set(inode, &xi, &xis); + } + } + } +cleanup: + up_write(&OCFS2_I(inode)->ip_xattr_sem); + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + brelse(xbs.xattr_bh); + for (i = 0; i < blk_per_bucket; i++) + brelse(xbs.bucket.bhs[i]); + + return ret; +} + +/* + * Find the xattr extent rec which may contains name_hash. + * e_cpos will be the first name hash of the xattr rec. + * el must be the ocfs2_xattr_header.xb_attrs.xb_root.xt_list. + */ +static int ocfs2_xattr_get_rec(struct inode *inode, + u32 name_hash, + u64 *p_blkno, + u32 *e_cpos, + u32 *num_clusters, + struct ocfs2_extent_list *el) +{ + int ret = 0, i; + struct buffer_head *eb_bh = NULL; + struct ocfs2_extent_block *eb; + struct ocfs2_extent_rec *rec = NULL; + u64 e_blkno = 0; + + if (el->l_tree_depth) { + ret = ocfs2_find_leaf(inode, el, name_hash, &eb_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) eb_bh->b_data; + el = &eb->h_list; + + if (el->l_tree_depth) { + ocfs2_error(inode->i_sb, + "Inode %lu has non zero tree depth in " + "xattr tree block %llu\n", inode->i_ino, + (unsigned long long)eb_bh->b_blocknr); + ret = -EROFS; + goto out; + } + } + + for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { + rec = &el->l_recs[i]; + + if (le32_to_cpu(rec->e_cpos) <= name_hash) { + e_blkno = le64_to_cpu(rec->e_blkno); + break; + } + } + + if (!e_blkno) { + ocfs2_error(inode->i_sb, "Inode %lu has bad extent " + "record (%u, %u, 0) in xattr", inode->i_ino, + le32_to_cpu(rec->e_cpos), + ocfs2_rec_clusters(el, rec)); + ret = -EROFS; + goto out; + } + + *p_blkno = le64_to_cpu(rec->e_blkno); + *num_clusters = le16_to_cpu(rec->e_leaf_clusters); + if (e_cpos) + *e_cpos = le32_to_cpu(rec->e_cpos); +out: + brelse(eb_bh); + return ret; +} + +typedef int (xattr_bucket_func)(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + void *para); + +static int ocfs2_find_xe_in_bucket(struct inode *inode, + struct buffer_head *header_bh, + int name_index, + const char *name, + u32 name_hash, + u16 *xe_index, + int *found) +{ + int i, ret = 0, cmp = 1, block_off, new_offset; + struct ocfs2_xattr_header *xh = + (struct ocfs2_xattr_header *)header_bh->b_data; + size_t name_len = strlen(name); + struct ocfs2_xattr_entry *xe = NULL; + struct buffer_head *name_bh = NULL; + char *xe_name; + + /* + * We don't use binary search in the bucket because there + * may be multiple entries with the same name hash. + */ + for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { + xe = &xh->xh_entries[i]; + + if (name_hash > le32_to_cpu(xe->xe_name_hash)) + continue; + else if (name_hash < le32_to_cpu(xe->xe_name_hash)) + break; + + cmp = name_index - ocfs2_xattr_get_type(xe); + if (!cmp) + cmp = name_len - xe->xe_name_len; + if (cmp) + continue; + + ret = ocfs2_xattr_bucket_get_name_value(inode, + xh, + i, + &block_off, + &new_offset); + if (ret) { + mlog_errno(ret); + break; + } + + ret = ocfs2_read_block(inode, header_bh->b_blocknr + block_off, + &name_bh); + if (ret) { + mlog_errno(ret); + break; + } + xe_name = name_bh->b_data + new_offset; + + cmp = memcmp(name, xe_name, name_len); + brelse(name_bh); + name_bh = NULL; + + if (cmp == 0) { + *xe_index = i; + *found = 1; + ret = 0; + break; + } + } + + return ret; +} + +/* + * Find the specified xattr entry in a series of buckets. + * This series start from p_blkno and last for num_clusters. + * The ocfs2_xattr_header.xh_num_buckets of the first bucket contains + * the num of the valid buckets. + * + * Return the buffer_head this xattr should reside in. And if the xattr's + * hash is in the gap of 2 buckets, return the lower bucket. + */ +static int ocfs2_xattr_bucket_find(struct inode *inode, + int name_index, + const char *name, + u32 name_hash, + u64 p_blkno, + u32 first_hash, + u32 num_clusters, + struct ocfs2_xattr_search *xs) +{ + int ret, found = 0; + struct buffer_head *bh = NULL; + struct buffer_head *lower_bh = NULL; + struct ocfs2_xattr_header *xh = NULL; + struct ocfs2_xattr_entry *xe = NULL; + u16 index = 0; + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + int low_bucket = 0, bucket, high_bucket; + u32 last_hash; + u64 blkno; + + ret = ocfs2_read_block(inode, p_blkno, &bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + xh = (struct ocfs2_xattr_header *)bh->b_data; + high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1; + + while (low_bucket <= high_bucket) { + brelse(bh); + bh = NULL; + bucket = (low_bucket + high_bucket) / 2; + + blkno = p_blkno + bucket * blk_per_bucket; + + ret = ocfs2_read_block(inode, blkno, &bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + xh = (struct ocfs2_xattr_header *)bh->b_data; + xe = &xh->xh_entries[0]; + if (name_hash < le32_to_cpu(xe->xe_name_hash)) { + high_bucket = bucket - 1; + continue; + } + + /* + * Check whether the hash of the last entry in our + * bucket is larger than the search one. for an empty + * bucket, the last one is also the first one. + */ + if (xh->xh_count) + xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1]; + + last_hash = le32_to_cpu(xe->xe_name_hash); + + /* record lower_bh which may be the insert place. */ + brelse(lower_bh); + lower_bh = bh; + bh = NULL; + + if (name_hash > le32_to_cpu(xe->xe_name_hash)) { + low_bucket = bucket + 1; + continue; + } + + /* the searched xattr should reside in this bucket if exists. */ + ret = ocfs2_find_xe_in_bucket(inode, lower_bh, + name_index, name, name_hash, + &index, &found); + if (ret) { + mlog_errno(ret); + goto out; + } + break; + } + + /* + * Record the bucket we have found. + * When the xattr's hash value is in the gap of 2 buckets, we will + * always set it to the previous bucket. + */ + if (!lower_bh) { + /* + * We can't find any bucket whose first name_hash is less + * than the find name_hash. + */ + BUG_ON(bh->b_blocknr != p_blkno); + lower_bh = bh; + bh = NULL; + } + xs->bucket.bhs[0] = lower_bh; + xs->bucket.xh = (struct ocfs2_xattr_header *) + xs->bucket.bhs[0]->b_data; + lower_bh = NULL; + + xs->header = xs->bucket.xh; + xs->base = xs->bucket.bhs[0]->b_data; + xs->end = xs->base + inode->i_sb->s_blocksize; + + if (found) { + /* + * If we have found the xattr enty, read all the blocks in + * this bucket. + */ + ret = ocfs2_read_blocks(inode, xs->bucket.bhs[0]->b_blocknr + 1, + blk_per_bucket - 1, &xs->bucket.bhs[1], + 0); + if (ret) { + mlog_errno(ret); + goto out; + } + + xs->here = &xs->header->xh_entries[index]; + mlog(0, "find xattr %s in bucket %llu, entry = %u\n", name, + (unsigned long long)xs->bucket.bhs[0]->b_blocknr, index); + } else + ret = -ENODATA; + +out: + brelse(bh); + brelse(lower_bh); + return ret; +} + +static int ocfs2_xattr_index_block_find(struct inode *inode, + struct buffer_head *root_bh, + int name_index, + const char *name, + struct ocfs2_xattr_search *xs) +{ + int ret; + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)root_bh->b_data; + struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root; + struct ocfs2_extent_list *el = &xb_root->xt_list; + u64 p_blkno = 0; + u32 first_hash, num_clusters = 0; + u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name)); + + if (le16_to_cpu(el->l_next_free_rec) == 0) + return -ENODATA; + + mlog(0, "find xattr %s, hash = %u, index = %d in xattr tree\n", + name, name_hash, name_index); + + ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &first_hash, + &num_clusters, el); + if (ret) { + mlog_errno(ret); + goto out; + } + + BUG_ON(p_blkno == 0 || num_clusters == 0 || first_hash > name_hash); + + mlog(0, "find xattr extent rec %u clusters from %llu, the first hash " + "in the rec is %u\n", num_clusters, (unsigned long long)p_blkno, + first_hash); + + ret = ocfs2_xattr_bucket_find(inode, name_index, name, name_hash, + p_blkno, first_hash, num_clusters, xs); + +out: + return ret; +} + +static int ocfs2_iterate_xattr_buckets(struct inode *inode, + u64 blkno, + u32 clusters, + xattr_bucket_func *func, + void *para) +{ + int i, j, ret = 0; + int blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)); + u32 num_buckets = clusters * bpc; + struct ocfs2_xattr_bucket bucket; + + memset(&bucket, 0, sizeof(bucket)); + + mlog(0, "iterating xattr buckets in %u clusters starting from %llu\n", + clusters, (unsigned long long)blkno); + + for (i = 0; i < num_buckets; i++, blkno += blk_per_bucket) { + ret = ocfs2_read_blocks(inode, blkno, blk_per_bucket, + bucket.bhs, 0); + if (ret) { + mlog_errno(ret); + goto out; + } + + bucket.xh = (struct ocfs2_xattr_header *)bucket.bhs[0]->b_data; + /* + * The real bucket num in this series of blocks is stored + * in the 1st bucket. + */ + if (i == 0) + num_buckets = le16_to_cpu(bucket.xh->xh_num_buckets); + + mlog(0, "iterating xattr bucket %llu, first hash %u\n", + (unsigned long long)blkno, + le32_to_cpu(bucket.xh->xh_entries[0].xe_name_hash)); + if (func) { + ret = func(inode, &bucket, para); + if (ret) { + mlog_errno(ret); + break; + } + } + + for (j = 0; j < blk_per_bucket; j++) + brelse(bucket.bhs[j]); + memset(&bucket, 0, sizeof(bucket)); + } + +out: + for (j = 0; j < blk_per_bucket; j++) + brelse(bucket.bhs[j]); + + return ret; +} + +struct ocfs2_xattr_tree_list { + char *buffer; + size_t buffer_size; + size_t result; +}; + +static int ocfs2_xattr_bucket_get_name_value(struct inode *inode, + struct ocfs2_xattr_header *xh, + int index, + int *block_off, + int *new_offset) +{ + u16 name_offset; + + if (index < 0 || index >= le16_to_cpu(xh->xh_count)) + return -EINVAL; + + name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset); + + *block_off = name_offset >> inode->i_sb->s_blocksize_bits; + *new_offset = name_offset % inode->i_sb->s_blocksize; + + return 0; +} + +static int ocfs2_list_xattr_bucket(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + void *para) +{ + int ret = 0, type; + struct ocfs2_xattr_tree_list *xl = (struct ocfs2_xattr_tree_list *)para; + int i, block_off, new_offset; + const char *prefix, *name; + + for (i = 0 ; i < le16_to_cpu(bucket->xh->xh_count); i++) { + struct ocfs2_xattr_entry *entry = &bucket->xh->xh_entries[i]; + type = ocfs2_xattr_get_type(entry); + prefix = ocfs2_xattr_prefix(type); + + if (prefix) { + ret = ocfs2_xattr_bucket_get_name_value(inode, + bucket->xh, + i, + &block_off, + &new_offset); + if (ret) + break; + + name = (const char *)bucket->bhs[block_off]->b_data + + new_offset; + ret = ocfs2_xattr_list_entry(xl->buffer, + xl->buffer_size, + &xl->result, + prefix, name, + entry->xe_name_len); + if (ret) + break; + } + } + + return ret; +} + +static int ocfs2_xattr_tree_list_index_block(struct inode *inode, + struct ocfs2_xattr_tree_root *xt, + char *buffer, + size_t buffer_size) +{ + struct ocfs2_extent_list *el = &xt->xt_list; + int ret = 0; + u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0; + u64 p_blkno = 0; + struct ocfs2_xattr_tree_list xl = { + .buffer = buffer, + .buffer_size = buffer_size, + .result = 0, + }; + + if (le16_to_cpu(el->l_next_free_rec) == 0) + return 0; + + while (name_hash > 0) { + ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, + &e_cpos, &num_clusters, el); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_iterate_xattr_buckets(inode, p_blkno, num_clusters, + ocfs2_list_xattr_bucket, + &xl); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (e_cpos == 0) + break; + + name_hash = e_cpos - 1; + } + + ret = xl.result; +out: + return ret; +} + +static int cmp_xe(const void *a, const void *b) +{ + const struct ocfs2_xattr_entry *l = a, *r = b; + u32 l_hash = le32_to_cpu(l->xe_name_hash); + u32 r_hash = le32_to_cpu(r->xe_name_hash); + + if (l_hash > r_hash) + return 1; + if (l_hash < r_hash) + return -1; + return 0; +} + +static void swap_xe(void *a, void *b, int size) +{ + struct ocfs2_xattr_entry *l = a, *r = b, tmp; + + tmp = *l; + memcpy(l, r, sizeof(struct ocfs2_xattr_entry)); + memcpy(r, &tmp, sizeof(struct ocfs2_xattr_entry)); +} + +/* + * When the ocfs2_xattr_block is filled up, new bucket will be created + * and all the xattr entries will be moved to the new bucket. + * Note: we need to sort the entries since they are not saved in order + * in the ocfs2_xattr_block. + */ +static void ocfs2_cp_xattr_block_to_bucket(struct inode *inode, + struct buffer_head *xb_bh, + struct buffer_head *xh_bh, + struct buffer_head *data_bh) +{ + int i, blocksize = inode->i_sb->s_blocksize; + u16 offset, size, off_change; + struct ocfs2_xattr_entry *xe; + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)xb_bh->b_data; + struct ocfs2_xattr_header *xb_xh = &xb->xb_attrs.xb_header; + struct ocfs2_xattr_header *xh = + (struct ocfs2_xattr_header *)xh_bh->b_data; + u16 count = le16_to_cpu(xb_xh->xh_count); + char *target = xh_bh->b_data, *src = xb_bh->b_data; + + mlog(0, "cp xattr from block %llu to bucket %llu\n", + (unsigned long long)xb_bh->b_blocknr, + (unsigned long long)xh_bh->b_blocknr); + + memset(xh_bh->b_data, 0, blocksize); + if (data_bh) + memset(data_bh->b_data, 0, blocksize); + /* + * Since the xe_name_offset is based on ocfs2_xattr_header, + * there is a offset change corresponding to the change of + * ocfs2_xattr_header's position. + */ + off_change = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header); + xe = &xb_xh->xh_entries[count - 1]; + offset = le16_to_cpu(xe->xe_name_offset) + off_change; + size = blocksize - offset; + + /* copy all the names and values. */ + if (data_bh) + target = data_bh->b_data; + memcpy(target + offset, src + offset, size); + + /* Init new header now. */ + xh->xh_count = xb_xh->xh_count; + xh->xh_num_buckets = cpu_to_le16(1); + xh->xh_name_value_len = cpu_to_le16(size); + xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size); + + /* copy all the entries. */ + target = xh_bh->b_data; + offset = offsetof(struct ocfs2_xattr_header, xh_entries); + size = count * sizeof(struct ocfs2_xattr_entry); + memcpy(target + offset, (char *)xb_xh + offset, size); + + /* Change the xe offset for all the xe because of the move. */ + off_change = OCFS2_XATTR_BUCKET_SIZE - blocksize + + offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header); + for (i = 0; i < count; i++) + le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change); + + mlog(0, "copy entry: start = %u, size = %u, offset_change = %u\n", + offset, size, off_change); + + sort(target + offset, count, sizeof(struct ocfs2_xattr_entry), + cmp_xe, swap_xe); +} + +/* + * After we move xattr from block to index btree, we have to + * update ocfs2_xattr_search to the new xe and base. + * + * When the entry is in xattr block, xattr_bh indicates the storage place. + * While if the entry is in index b-tree, "bucket" indicates the + * real place of the xattr. + */ +static int ocfs2_xattr_update_xattr_search(struct inode *inode, + struct ocfs2_xattr_search *xs, + struct buffer_head *old_bh, + struct buffer_head *new_bh) +{ + int ret = 0; + char *buf = old_bh->b_data; + struct ocfs2_xattr_block *old_xb = (struct ocfs2_xattr_block *)buf; + struct ocfs2_xattr_header *old_xh = &old_xb->xb_attrs.xb_header; + int i, blocksize = inode->i_sb->s_blocksize; + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + xs->bucket.bhs[0] = new_bh; + get_bh(new_bh); + xs->bucket.xh = (struct ocfs2_xattr_header *)xs->bucket.bhs[0]->b_data; + xs->header = xs->bucket.xh; + + xs->base = new_bh->b_data; + xs->end = xs->base + inode->i_sb->s_blocksize; + + if (!xs->not_found) { + if (OCFS2_XATTR_BUCKET_SIZE != blocksize) { + ret = ocfs2_read_blocks(inode, + xs->bucket.bhs[0]->b_blocknr + 1, + blk_per_bucket - 1, &xs->bucket.bhs[1], + 0); + if (ret) { + mlog_errno(ret); + return ret; + } + + } + i = xs->here - old_xh->xh_entries; + xs->here = &xs->header->xh_entries[i]; + } + + return ret; +} + +static int ocfs2_xattr_create_index_block(struct inode *inode, + struct ocfs2_xattr_search *xs) +{ + int ret, credits = OCFS2_SUBALLOC_ALLOC; + u32 bit_off, len; + u64 blkno; + handle_t *handle; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_alloc_context *data_ac; + struct buffer_head *xh_bh = NULL, *data_bh = NULL; + struct buffer_head *xb_bh = xs->xattr_bh; + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)xb_bh->b_data; + struct ocfs2_xattr_tree_root *xr; + u16 xb_flags = le16_to_cpu(xb->xb_flags); + u16 bpb = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + mlog(0, "create xattr index block for %llu\n", + (unsigned long long)xb_bh->b_blocknr); + + BUG_ON(xb_flags & OCFS2_XATTR_INDEXED); + + ret = ocfs2_reserve_clusters(osb, 1, &data_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * XXX: + * We can use this lock for now, and maybe move to a dedicated mutex + * if performance becomes a problem later. + */ + down_write(&oi->ip_alloc_sem); + + /* + * 3 more credits, one for xattr block update, one for the 1st block + * of the new xattr bucket and one for the value/data. + */ + credits += 3; + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out_sem; + } + + ret = ocfs2_journal_access(handle, inode, xb_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, &len); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* + * The bucket may spread in many blocks, and + * we will only touch the 1st block and the last block + * in the whole bucket(one for entry and one for data). + */ + blkno = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); + + mlog(0, "allocate 1 cluster from %llu to xattr block\n", + (unsigned long long)blkno); + + xh_bh = sb_getblk(inode->i_sb, blkno); + if (!xh_bh) { + ret = -EIO; + mlog_errno(ret); + goto out_commit; + } + + ocfs2_set_new_buffer_uptodate(inode, xh_bh); + + ret = ocfs2_journal_access(handle, inode, xh_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + if (bpb > 1) { + data_bh = sb_getblk(inode->i_sb, blkno + bpb - 1); + if (!data_bh) { + ret = -EIO; + mlog_errno(ret); + goto out_commit; + } + + ocfs2_set_new_buffer_uptodate(inode, data_bh); + + ret = ocfs2_journal_access(handle, inode, data_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + } + + ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xh_bh, data_bh); + + ocfs2_journal_dirty(handle, xh_bh); + if (data_bh) + ocfs2_journal_dirty(handle, data_bh); + + ret = ocfs2_xattr_update_xattr_search(inode, xs, xb_bh, xh_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* Change from ocfs2_xattr_header to ocfs2_xattr_tree_root */ + memset(&xb->xb_attrs, 0, inode->i_sb->s_blocksize - + offsetof(struct ocfs2_xattr_block, xb_attrs)); + + xr = &xb->xb_attrs.xb_root; + xr->xt_clusters = cpu_to_le32(1); + xr->xt_last_eb_blk = 0; + xr->xt_list.l_tree_depth = 0; + xr->xt_list.l_count = cpu_to_le16(ocfs2_xattr_recs_per_xb(inode->i_sb)); + xr->xt_list.l_next_free_rec = cpu_to_le16(1); + + xr->xt_list.l_recs[0].e_cpos = 0; + xr->xt_list.l_recs[0].e_blkno = cpu_to_le64(blkno); + xr->xt_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1); + + xb->xb_flags = cpu_to_le16(xb_flags | OCFS2_XATTR_INDEXED); + + ret = ocfs2_journal_dirty(handle, xb_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + +out_commit: + ocfs2_commit_trans(osb, handle); + +out_sem: + up_write(&oi->ip_alloc_sem); + +out: + if (data_ac) + ocfs2_free_alloc_context(data_ac); + + brelse(xh_bh); + brelse(data_bh); + + return ret; +} + +static int cmp_xe_offset(const void *a, const void *b) +{ + const struct ocfs2_xattr_entry *l = a, *r = b; + u32 l_name_offset = le16_to_cpu(l->xe_name_offset); + u32 r_name_offset = le16_to_cpu(r->xe_name_offset); + + if (l_name_offset < r_name_offset) + return 1; + if (l_name_offset > r_name_offset) + return -1; + return 0; +} + +/* + * defrag a xattr bucket if we find that the bucket has some + * holes beteen name/value pairs. + * We will move all the name/value pairs to the end of the bucket + * so that we can spare some space for insertion. + */ +static int ocfs2_defrag_xattr_bucket(struct inode *inode, + struct ocfs2_xattr_bucket *bucket) +{ + int ret, i; + size_t end, offset, len, value_len; + struct ocfs2_xattr_header *xh; + char *entries, *buf, *bucket_buf = NULL; + u64 blkno = bucket->bhs[0]->b_blocknr; + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + u16 xh_free_start; + size_t blocksize = inode->i_sb->s_blocksize; + handle_t *handle; + struct buffer_head **bhs; + struct ocfs2_xattr_entry *xe; + + bhs = kzalloc(sizeof(struct buffer_head *) * blk_per_bucket, + GFP_NOFS); + if (!bhs) + return -ENOMEM; + + ret = ocfs2_read_blocks(inode, blkno, blk_per_bucket, bhs, 0); + if (ret) + goto out; + + /* + * In order to make the operation more efficient and generic, + * we copy all the blocks into a contiguous memory and do the + * defragment there, so if anything is error, we will not touch + * the real block. + */ + bucket_buf = kmalloc(OCFS2_XATTR_BUCKET_SIZE, GFP_NOFS); + if (!bucket_buf) { + ret = -EIO; + goto out; + } + + buf = bucket_buf; + for (i = 0; i < blk_per_bucket; i++, buf += blocksize) + memcpy(buf, bhs[i]->b_data, blocksize); + + handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), blk_per_bucket); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + handle = NULL; + mlog_errno(ret); + goto out; + } + + for (i = 0; i < blk_per_bucket; i++) { + ret = ocfs2_journal_access(handle, inode, bhs[i], + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto commit; + } + } + + xh = (struct ocfs2_xattr_header *)bucket_buf; + entries = (char *)xh->xh_entries; + xh_free_start = le16_to_cpu(xh->xh_free_start); + + mlog(0, "adjust xattr bucket in %llu, count = %u, " + "xh_free_start = %u, xh_name_value_len = %u.\n", + (unsigned long long)blkno, le16_to_cpu(xh->xh_count), + xh_free_start, le16_to_cpu(xh->xh_name_value_len)); + + /* + * sort all the entries by their offset. + * the largest will be the first, so that we can + * move them to the end one by one. + */ + sort(entries, le16_to_cpu(xh->xh_count), + sizeof(struct ocfs2_xattr_entry), + cmp_xe_offset, swap_xe); + + /* Move all name/values to the end of the bucket. */ + xe = xh->xh_entries; + end = OCFS2_XATTR_BUCKET_SIZE; + for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) { + offset = le16_to_cpu(xe->xe_name_offset); + if (ocfs2_xattr_is_local(xe)) + value_len = OCFS2_XATTR_SIZE( + le64_to_cpu(xe->xe_value_size)); + else + value_len = OCFS2_XATTR_ROOT_SIZE; + len = OCFS2_XATTR_SIZE(xe->xe_name_len) + value_len; + + /* + * We must make sure that the name/value pair + * exist in the same block. So adjust end to + * the previous block end if needed. + */ + if (((end - len) / blocksize != + (end - 1) / blocksize)) + end = end - end % blocksize; + + if (end > offset + len) { + memmove(bucket_buf + end - len, + bucket_buf + offset, len); + xe->xe_name_offset = cpu_to_le16(end - len); + } + + mlog_bug_on_msg(end < offset + len, "Defrag check failed for " + "bucket %llu\n", (unsigned long long)blkno); + + end -= len; + } + + mlog_bug_on_msg(xh_free_start > end, "Defrag check failed for " + "bucket %llu\n", (unsigned long long)blkno); + + if (xh_free_start == end) + goto commit; + + memset(bucket_buf + xh_free_start, 0, end - xh_free_start); + xh->xh_free_start = cpu_to_le16(end); + + /* sort the entries by their name_hash. */ + sort(entries, le16_to_cpu(xh->xh_count), + sizeof(struct ocfs2_xattr_entry), + cmp_xe, swap_xe); + + buf = bucket_buf; + for (i = 0; i < blk_per_bucket; i++, buf += blocksize) { + memcpy(bhs[i]->b_data, buf, blocksize); + ocfs2_journal_dirty(handle, bhs[i]); + } + +commit: + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +out: + + if (bhs) { + for (i = 0; i < blk_per_bucket; i++) + brelse(bhs[i]); + } + kfree(bhs); + + kfree(bucket_buf); + return ret; +} + +/* + * Move half nums of the xattr bucket in the previous cluster to this new + * cluster. We only touch the last cluster of the previous extend record. + * + * first_bh is the first buffer_head of a series of bucket in the same + * extent rec and header_bh is the header of one bucket in this cluster. + * They will be updated if we move the data header_bh contains to the new + * cluster. first_hash will be set as the 1st xe's name_hash of the new cluster. + */ +static int ocfs2_mv_xattr_bucket_cross_cluster(struct inode *inode, + handle_t *handle, + struct buffer_head **first_bh, + struct buffer_head **header_bh, + u64 new_blkno, + u64 prev_blkno, + u32 num_clusters, + u32 *first_hash) +{ + int i, ret, credits; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); + int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); + int blocksize = inode->i_sb->s_blocksize; + struct buffer_head *old_bh, *new_bh, *prev_bh, *new_first_bh = NULL; + struct ocfs2_xattr_header *new_xh; + struct ocfs2_xattr_header *xh = + (struct ocfs2_xattr_header *)((*first_bh)->b_data); + + BUG_ON(le16_to_cpu(xh->xh_num_buckets) < num_buckets); + BUG_ON(OCFS2_XATTR_BUCKET_SIZE == osb->s_clustersize); + + prev_bh = *first_bh; + get_bh(prev_bh); + xh = (struct ocfs2_xattr_header *)prev_bh->b_data; + + prev_blkno += (num_clusters - 1) * bpc + bpc / 2; + + mlog(0, "move half of xattrs in cluster %llu to %llu\n", + (unsigned long long)prev_blkno, (unsigned long long)new_blkno); + + /* + * We need to update the 1st half of the new cluster and + * 1 more for the update of the 1st bucket of the previous + * extent record. + */ + credits = bpc / 2 + 1; + ret = ocfs2_extend_trans(handle, credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, prev_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + for (i = 0; i < bpc / 2; i++, prev_blkno++, new_blkno++) { + old_bh = new_bh = NULL; + new_bh = sb_getblk(inode->i_sb, new_blkno); + if (!new_bh) { + ret = -EIO; + mlog_errno(ret); + goto out; + } + + ocfs2_set_new_buffer_uptodate(inode, new_bh); + + ret = ocfs2_journal_access(handle, inode, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret < 0) { + mlog_errno(ret); + brelse(new_bh); + goto out; + } + + ret = ocfs2_read_block(inode, prev_blkno, &old_bh); + if (ret < 0) { + mlog_errno(ret); + brelse(new_bh); + goto out; + } + + memcpy(new_bh->b_data, old_bh->b_data, blocksize); + + if (i == 0) { + new_xh = (struct ocfs2_xattr_header *)new_bh->b_data; + new_xh->xh_num_buckets = cpu_to_le16(num_buckets / 2); + + if (first_hash) + *first_hash = le32_to_cpu( + new_xh->xh_entries[0].xe_name_hash); + new_first_bh = new_bh; + get_bh(new_first_bh); + } + + ocfs2_journal_dirty(handle, new_bh); + + if (*header_bh == old_bh) { + brelse(*header_bh); + *header_bh = new_bh; + get_bh(*header_bh); + + brelse(*first_bh); + *first_bh = new_first_bh; + get_bh(*first_bh); + } + brelse(new_bh); + brelse(old_bh); + } + + le16_add_cpu(&xh->xh_num_buckets, -(num_buckets / 2)); + + ocfs2_journal_dirty(handle, prev_bh); +out: + brelse(prev_bh); + brelse(new_first_bh); + return ret; +} + +static int ocfs2_read_xattr_bucket(struct inode *inode, + u64 blkno, + struct buffer_head **bhs, + int new) +{ + int ret = 0; + u16 i, blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + if (!new) + return ocfs2_read_blocks(inode, blkno, + blk_per_bucket, bhs, 0); + + for (i = 0; i < blk_per_bucket; i++) { + bhs[i] = sb_getblk(inode->i_sb, blkno + i); + if (bhs[i] == NULL) { + ret = -EIO; + mlog_errno(ret); + break; + } + ocfs2_set_new_buffer_uptodate(inode, bhs[i]); + } + + return ret; +} + +/* + * Find the suitable pos when we divide a bucket into 2. + * We have to make sure the xattrs with the same hash value exist + * in the same bucket. + * + * If this ocfs2_xattr_header covers more than one hash value, find a + * place where the hash value changes. Try to find the most even split. + * The most common case is that all entries have different hash values, + * and the first check we make will find a place to split. + */ +static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header *xh) +{ + struct ocfs2_xattr_entry *entries = xh->xh_entries; + int count = le16_to_cpu(xh->xh_count); + int delta, middle = count / 2; + + /* + * We start at the middle. Each step gets farther away in both + * directions. We therefore hit the change in hash value + * nearest to the middle. Note that this loop does not execute for + * count < 2. + */ + for (delta = 0; delta < middle; delta++) { + /* Let's check delta earlier than middle */ + if (cmp_xe(&entries[middle - delta - 1], + &entries[middle - delta])) + return middle - delta; + + /* For even counts, don't walk off the end */ + if ((middle + delta + 1) == count) + continue; + + /* Now try delta past middle */ + if (cmp_xe(&entries[middle + delta], + &entries[middle + delta + 1])) + return middle + delta + 1; + } + + /* Every entry had the same hash */ + return count; +} + +/* + * Move some xattrs in old bucket(blk) to new bucket(new_blk). + * first_hash will record the 1st hash of the new bucket. + * + * Normally half of the xattrs will be moved. But we have to make + * sure that the xattrs with the same hash value are stored in the + * same bucket. If all the xattrs in this bucket have the same hash + * value, the new bucket will be initialized as an empty one and the + * first_hash will be initialized as (hash_value+1). + */ +static int ocfs2_divide_xattr_bucket(struct inode *inode, + handle_t *handle, + u64 blk, + u64 new_blk, + u32 *first_hash, + int new_bucket_head) +{ + int ret, i; + int count, start, len, name_value_len = 0, xe_len, name_offset = 0; + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + struct buffer_head **s_bhs, **t_bhs = NULL; + struct ocfs2_xattr_header *xh; + struct ocfs2_xattr_entry *xe; + int blocksize = inode->i_sb->s_blocksize; + + mlog(0, "move some of xattrs from bucket %llu to %llu\n", + (unsigned long long)blk, (unsigned long long)new_blk); + + s_bhs = kcalloc(blk_per_bucket, sizeof(struct buffer_head *), GFP_NOFS); + if (!s_bhs) + return -ENOMEM; + + ret = ocfs2_read_xattr_bucket(inode, blk, s_bhs, 0); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, s_bhs[0], + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + t_bhs = kcalloc(blk_per_bucket, sizeof(struct buffer_head *), GFP_NOFS); + if (!t_bhs) { + ret = -ENOMEM; + goto out; + } + + ret = ocfs2_read_xattr_bucket(inode, new_blk, t_bhs, new_bucket_head); + if (ret) { + mlog_errno(ret); + goto out; + } + + for (i = 0; i < blk_per_bucket; i++) { + ret = ocfs2_journal_access(handle, inode, t_bhs[i], + new_bucket_head ? + OCFS2_JOURNAL_ACCESS_CREATE : + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + xh = (struct ocfs2_xattr_header *)s_bhs[0]->b_data; + count = le16_to_cpu(xh->xh_count); + start = ocfs2_xattr_find_divide_pos(xh); + + if (start == count) { + xe = &xh->xh_entries[start-1]; + + /* + * initialized a new empty bucket here. + * The hash value is set as one larger than + * that of the last entry in the previous bucket. + */ + for (i = 0; i < blk_per_bucket; i++) + memset(t_bhs[i]->b_data, 0, blocksize); + + xh = (struct ocfs2_xattr_header *)t_bhs[0]->b_data; + xh->xh_free_start = cpu_to_le16(blocksize); + xh->xh_entries[0].xe_name_hash = xe->xe_name_hash; + le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1); + + goto set_num_buckets; + } + + /* copy the whole bucket to the new first. */ + for (i = 0; i < blk_per_bucket; i++) + memcpy(t_bhs[i]->b_data, s_bhs[i]->b_data, blocksize); + + /* update the new bucket. */ + xh = (struct ocfs2_xattr_header *)t_bhs[0]->b_data; + + /* + * Calculate the total name/value len and xh_free_start for + * the old bucket first. + */ + name_offset = OCFS2_XATTR_BUCKET_SIZE; + name_value_len = 0; + for (i = 0; i < start; i++) { + xe = &xh->xh_entries[i]; + xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len); + if (ocfs2_xattr_is_local(xe)) + xe_len += + OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); + else + xe_len += OCFS2_XATTR_ROOT_SIZE; + name_value_len += xe_len; + if (le16_to_cpu(xe->xe_name_offset) < name_offset) + name_offset = le16_to_cpu(xe->xe_name_offset); + } + + /* + * Now begin the modification to the new bucket. + * + * In the new bucket, We just move the xattr entry to the beginning + * and don't touch the name/value. So there will be some holes in the + * bucket, and they will be removed when ocfs2_defrag_xattr_bucket is + * called. + */ + xe = &xh->xh_entries[start]; + len = sizeof(struct ocfs2_xattr_entry) * (count - start); + mlog(0, "mv xattr entry len %d from %d to %d\n", len, + (int)((char *)xe - (char *)xh), + (int)((char *)xh->xh_entries - (char *)xh)); + memmove((char *)xh->xh_entries, (char *)xe, len); + xe = &xh->xh_entries[count - start]; + len = sizeof(struct ocfs2_xattr_entry) * start; + memset((char *)xe, 0, len); + + le16_add_cpu(&xh->xh_count, -start); + le16_add_cpu(&xh->xh_name_value_len, -name_value_len); + + /* Calculate xh_free_start for the new bucket. */ + xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE); + for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { + xe = &xh->xh_entries[i]; + xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len); + if (ocfs2_xattr_is_local(xe)) + xe_len += + OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); + else + xe_len += OCFS2_XATTR_ROOT_SIZE; + if (le16_to_cpu(xe->xe_name_offset) < + le16_to_cpu(xh->xh_free_start)) + xh->xh_free_start = xe->xe_name_offset; + } + +set_num_buckets: + /* set xh->xh_num_buckets for the new xh. */ + if (new_bucket_head) + xh->xh_num_buckets = cpu_to_le16(1); + else + xh->xh_num_buckets = 0; + + for (i = 0; i < blk_per_bucket; i++) { + ocfs2_journal_dirty(handle, t_bhs[i]); + if (ret) + mlog_errno(ret); + } + + /* store the first_hash of the new bucket. */ + if (first_hash) + *first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash); + + /* + * Now only update the 1st block of the old bucket. If we + * just added a new empty bucket, there is no need to modify + * it. + */ + if (start == count) + goto out; + + xh = (struct ocfs2_xattr_header *)s_bhs[0]->b_data; + memset(&xh->xh_entries[start], 0, + sizeof(struct ocfs2_xattr_entry) * (count - start)); + xh->xh_count = cpu_to_le16(start); + xh->xh_free_start = cpu_to_le16(name_offset); + xh->xh_name_value_len = cpu_to_le16(name_value_len); + + ocfs2_journal_dirty(handle, s_bhs[0]); + if (ret) + mlog_errno(ret); + +out: + if (s_bhs) { + for (i = 0; i < blk_per_bucket; i++) + brelse(s_bhs[i]); + } + kfree(s_bhs); + + if (t_bhs) { + for (i = 0; i < blk_per_bucket; i++) + brelse(t_bhs[i]); + } + kfree(t_bhs); + + return ret; +} + +/* + * Copy xattr from one bucket to another bucket. + * + * The caller must make sure that the journal transaction + * has enough space for journaling. + */ +static int ocfs2_cp_xattr_bucket(struct inode *inode, + handle_t *handle, + u64 s_blkno, + u64 t_blkno, + int t_is_new) +{ + int ret, i; + int blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + int blocksize = inode->i_sb->s_blocksize; + struct buffer_head **s_bhs, **t_bhs = NULL; + + BUG_ON(s_blkno == t_blkno); + + mlog(0, "cp bucket %llu to %llu, target is %d\n", + (unsigned long long)s_blkno, (unsigned long long)t_blkno, + t_is_new); + + s_bhs = kzalloc(sizeof(struct buffer_head *) * blk_per_bucket, + GFP_NOFS); + if (!s_bhs) + return -ENOMEM; + + ret = ocfs2_read_xattr_bucket(inode, s_blkno, s_bhs, 0); + if (ret) + goto out; + + t_bhs = kzalloc(sizeof(struct buffer_head *) * blk_per_bucket, + GFP_NOFS); + if (!t_bhs) { + ret = -ENOMEM; + goto out; + } + + ret = ocfs2_read_xattr_bucket(inode, t_blkno, t_bhs, t_is_new); + if (ret) + goto out; + + for (i = 0; i < blk_per_bucket; i++) { + ret = ocfs2_journal_access(handle, inode, t_bhs[i], + t_is_new ? + OCFS2_JOURNAL_ACCESS_CREATE : + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) + goto out; + } + + for (i = 0; i < blk_per_bucket; i++) { + memcpy(t_bhs[i]->b_data, s_bhs[i]->b_data, blocksize); + ocfs2_journal_dirty(handle, t_bhs[i]); + } + +out: + if (s_bhs) { + for (i = 0; i < blk_per_bucket; i++) + brelse(s_bhs[i]); + } + kfree(s_bhs); + + if (t_bhs) { + for (i = 0; i < blk_per_bucket; i++) + brelse(t_bhs[i]); + } + kfree(t_bhs); + + return ret; +} + +/* + * Copy one xattr cluster from src_blk to to_blk. + * The to_blk will become the first bucket header of the cluster, so its + * xh_num_buckets will be initialized as the bucket num in the cluster. + */ +static int ocfs2_cp_xattr_cluster(struct inode *inode, + handle_t *handle, + struct buffer_head *first_bh, + u64 src_blk, + u64 to_blk, + u32 *first_hash) +{ + int i, ret, credits; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); + int num_buckets = ocfs2_xattr_buckets_per_cluster(osb); + struct buffer_head *bh = NULL; + struct ocfs2_xattr_header *xh; + u64 to_blk_start = to_blk; + + mlog(0, "cp xattrs from cluster %llu to %llu\n", + (unsigned long long)src_blk, (unsigned long long)to_blk); + + /* + * We need to update the new cluster and 1 more for the update of + * the 1st bucket of the previous extent rec. + */ + credits = bpc + 1; + ret = ocfs2_extend_trans(handle, credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, first_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + for (i = 0; i < num_buckets; i++) { + ret = ocfs2_cp_xattr_bucket(inode, handle, + src_blk, to_blk, 1); + if (ret) { + mlog_errno(ret); + goto out; + } + + src_blk += ocfs2_blocks_per_xattr_bucket(inode->i_sb); + to_blk += ocfs2_blocks_per_xattr_bucket(inode->i_sb); + } + + /* update the old bucket header. */ + xh = (struct ocfs2_xattr_header *)first_bh->b_data; + le16_add_cpu(&xh->xh_num_buckets, -num_buckets); + + ocfs2_journal_dirty(handle, first_bh); + + /* update the new bucket header. */ + ret = ocfs2_read_block(inode, to_blk_start, &bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + xh = (struct ocfs2_xattr_header *)bh->b_data; + xh->xh_num_buckets = cpu_to_le16(num_buckets); + + ocfs2_journal_dirty(handle, bh); + + if (first_hash) + *first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash); +out: + brelse(bh); + return ret; +} + +/* + * Move some xattrs in this cluster to the new cluster. + * This function should only be called when bucket size == cluster size. + * Otherwise ocfs2_mv_xattr_bucket_cross_cluster should be used instead. + */ +static int ocfs2_divide_xattr_cluster(struct inode *inode, + handle_t *handle, + u64 prev_blk, + u64 new_blk, + u32 *first_hash) +{ + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + int ret, credits = 2 * blk_per_bucket; + + BUG_ON(OCFS2_XATTR_BUCKET_SIZE < OCFS2_SB(inode->i_sb)->s_clustersize); + + ret = ocfs2_extend_trans(handle, credits); + if (ret) { + mlog_errno(ret); + return ret; + } + + /* Move half of the xattr in start_blk to the next bucket. */ + return ocfs2_divide_xattr_bucket(inode, handle, prev_blk, + new_blk, first_hash, 1); +} + +/* + * Move some xattrs from the old cluster to the new one since they are not + * contiguous in ocfs2 xattr tree. + * + * new_blk starts a new separate cluster, and we will move some xattrs from + * prev_blk to it. v_start will be set as the first name hash value in this + * new cluster so that it can be used as e_cpos during tree insertion and + * don't collide with our original b-tree operations. first_bh and header_bh + * will also be updated since they will be used in ocfs2_extend_xattr_bucket + * to extend the insert bucket. + * + * The problem is how much xattr should we move to the new one and when should + * we update first_bh and header_bh? + * 1. If cluster size > bucket size, that means the previous cluster has more + * than 1 bucket, so just move half nums of bucket into the new cluster and + * update the first_bh and header_bh if the insert bucket has been moved + * to the new cluster. + * 2. If cluster_size == bucket_size: + * a) If the previous extent rec has more than one cluster and the insert + * place isn't in the last cluster, copy the entire last cluster to the + * new one. This time, we don't need to upate the first_bh and header_bh + * since they will not be moved into the new cluster. + * b) Otherwise, move the bottom half of the xattrs in the last cluster into + * the new one. And we set the extend flag to zero if the insert place is + * moved into the new allocated cluster since no extend is needed. + */ +static int ocfs2_adjust_xattr_cross_cluster(struct inode *inode, + handle_t *handle, + struct buffer_head **first_bh, + struct buffer_head **header_bh, + u64 new_blk, + u64 prev_blk, + u32 prev_clusters, + u32 *v_start, + int *extend) +{ + int ret = 0; + int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); + + mlog(0, "adjust xattrs from cluster %llu len %u to %llu\n", + (unsigned long long)prev_blk, prev_clusters, + (unsigned long long)new_blk); + + if (ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)) > 1) + ret = ocfs2_mv_xattr_bucket_cross_cluster(inode, + handle, + first_bh, + header_bh, + new_blk, + prev_blk, + prev_clusters, + v_start); + else { + u64 last_blk = prev_blk + bpc * (prev_clusters - 1); + + if (prev_clusters > 1 && (*header_bh)->b_blocknr != last_blk) + ret = ocfs2_cp_xattr_cluster(inode, handle, *first_bh, + last_blk, new_blk, + v_start); + else { + ret = ocfs2_divide_xattr_cluster(inode, handle, + last_blk, new_blk, + v_start); + + if ((*header_bh)->b_blocknr == last_blk && extend) + *extend = 0; + } + } + + return ret; +} + +/* + * Add a new cluster for xattr storage. + * + * If the new cluster is contiguous with the previous one, it will be + * appended to the same extent record, and num_clusters will be updated. + * If not, we will insert a new extent for it and move some xattrs in + * the last cluster into the new allocated one. + * We also need to limit the maximum size of a btree leaf, otherwise we'll + * lose the benefits of hashing because we'll have to search large leaves. + * So now the maximum size is OCFS2_MAX_XATTR_TREE_LEAF_SIZE(or clustersize, + * if it's bigger). + * + * first_bh is the first block of the previous extent rec and header_bh + * indicates the bucket we will insert the new xattrs. They will be updated + * when the header_bh is moved into the new cluster. + */ +static int ocfs2_add_new_xattr_cluster(struct inode *inode, + struct buffer_head *root_bh, + struct buffer_head **first_bh, + struct buffer_head **header_bh, + u32 *num_clusters, + u32 prev_cpos, + u64 prev_blkno, + int *extend) +{ + int ret, credits; + u16 bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); + u32 prev_clusters = *num_clusters; + u32 clusters_to_add = 1, bit_off, num_bits, v_start = 0; + u64 block; + handle_t *handle = NULL; + struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_alloc_context *meta_ac = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_extent_tree et; + + mlog(0, "Add new xattr cluster for %llu, previous xattr hash = %u, " + "previous xattr blkno = %llu\n", + (unsigned long long)OCFS2_I(inode)->ip_blkno, + prev_cpos, (unsigned long long)prev_blkno); + + ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh); + + ret = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, + &data_ac, &meta_ac); + if (ret) { + mlog_errno(ret); + goto leave; + } + + credits = ocfs2_calc_extend_credits(osb->sb, et.et_root_el, + clusters_to_add); + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + handle = NULL; + mlog_errno(ret); + goto leave; + } + + ret = ocfs2_journal_access(handle, inode, root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto leave; + } + + ret = __ocfs2_claim_clusters(osb, handle, data_ac, 1, + clusters_to_add, &bit_off, &num_bits); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto leave; + } + + BUG_ON(num_bits > clusters_to_add); + + block = ocfs2_clusters_to_blocks(osb->sb, bit_off); + mlog(0, "Allocating %u clusters at block %u for xattr in inode %llu\n", + num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno); + + if (prev_blkno + prev_clusters * bpc == block && + (prev_clusters + num_bits) << osb->s_clustersize_bits <= + OCFS2_MAX_XATTR_TREE_LEAF_SIZE) { + /* + * If this cluster is contiguous with the old one and + * adding this new cluster, we don't surpass the limit of + * OCFS2_MAX_XATTR_TREE_LEAF_SIZE, cool. We will let it be + * initialized and used like other buckets in the previous + * cluster. + * So add it as a contiguous one. The caller will handle + * its init process. + */ + v_start = prev_cpos + prev_clusters; + *num_clusters = prev_clusters + num_bits; + mlog(0, "Add contiguous %u clusters to previous extent rec.\n", + num_bits); + } else { + ret = ocfs2_adjust_xattr_cross_cluster(inode, + handle, + first_bh, + header_bh, + block, + prev_blkno, + prev_clusters, + &v_start, + extend); + if (ret) { + mlog_errno(ret); + goto leave; + } + } + + if (handle->h_buffer_credits < credits) { + /* + * The journal has been restarted before, and don't + * have enough space for the insertion, so extend it + * here. + */ + ret = ocfs2_extend_trans(handle, credits); + if (ret) { + mlog_errno(ret); + goto leave; + } + } + mlog(0, "Insert %u clusters at block %llu for xattr at %u\n", + num_bits, (unsigned long long)block, v_start); + ret = ocfs2_insert_extent(osb, handle, inode, &et, v_start, block, + num_bits, 0, meta_ac); + if (ret < 0) { + mlog_errno(ret); + goto leave; + } + + ret = ocfs2_journal_dirty(handle, root_bh); + if (ret < 0) { + mlog_errno(ret); + goto leave; + } + +leave: + if (handle) + ocfs2_commit_trans(osb, handle); + if (data_ac) + ocfs2_free_alloc_context(data_ac); + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + + return ret; +} + +/* + * Extend a new xattr bucket and move xattrs to the end one by one until + * We meet with start_bh. Only move half of the xattrs to the bucket after it. + */ +static int ocfs2_extend_xattr_bucket(struct inode *inode, + struct buffer_head *first_bh, + struct buffer_head *start_bh, + u32 num_clusters) +{ + int ret, credits; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + u64 start_blk = start_bh->b_blocknr, end_blk; + u32 num_buckets = num_clusters * ocfs2_xattr_buckets_per_cluster(osb); + handle_t *handle; + struct ocfs2_xattr_header *first_xh = + (struct ocfs2_xattr_header *)first_bh->b_data; + u16 bucket = le16_to_cpu(first_xh->xh_num_buckets); + + mlog(0, "extend xattr bucket in %llu, xattr extend rec starting " + "from %llu, len = %u\n", (unsigned long long)start_blk, + (unsigned long long)first_bh->b_blocknr, num_clusters); + + BUG_ON(bucket >= num_buckets); + + end_blk = first_bh->b_blocknr + (bucket - 1) * blk_per_bucket; + + /* + * We will touch all the buckets after the start_bh(include it). + * Add one more bucket and modify the first_bh. + */ + credits = end_blk - start_blk + 2 * blk_per_bucket + 1; + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + handle = NULL; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, first_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto commit; + } + + while (end_blk != start_blk) { + ret = ocfs2_cp_xattr_bucket(inode, handle, end_blk, + end_blk + blk_per_bucket, 0); + if (ret) + goto commit; + end_blk -= blk_per_bucket; + } + + /* Move half of the xattr in start_blk to the next bucket. */ + ret = ocfs2_divide_xattr_bucket(inode, handle, start_blk, + start_blk + blk_per_bucket, NULL, 0); + + le16_add_cpu(&first_xh->xh_num_buckets, 1); + ocfs2_journal_dirty(handle, first_bh); + +commit: + ocfs2_commit_trans(osb, handle); +out: + return ret; +} + +/* + * Add new xattr bucket in an extent record and adjust the buckets accordingly. + * xb_bh is the ocfs2_xattr_block. + * We will move all the buckets starting from header_bh to the next place. As + * for this one, half num of its xattrs will be moved to the next one. + * + * We will allocate a new cluster if current cluster is full and adjust + * header_bh and first_bh if the insert place is moved to the new cluster. + */ +static int ocfs2_add_new_xattr_bucket(struct inode *inode, + struct buffer_head *xb_bh, + struct buffer_head *header_bh) +{ + struct ocfs2_xattr_header *first_xh = NULL; + struct buffer_head *first_bh = NULL; + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)xb_bh->b_data; + struct ocfs2_xattr_tree_root *xb_root = &xb->xb_attrs.xb_root; + struct ocfs2_extent_list *el = &xb_root->xt_list; + struct ocfs2_xattr_header *xh = + (struct ocfs2_xattr_header *)header_bh->b_data; + u32 name_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash); + struct super_block *sb = inode->i_sb; + struct ocfs2_super *osb = OCFS2_SB(sb); + int ret, num_buckets, extend = 1; + u64 p_blkno; + u32 e_cpos, num_clusters; + + mlog(0, "Add new xattr bucket starting form %llu\n", + (unsigned long long)header_bh->b_blocknr); + + /* + * Add refrence for header_bh here because it may be + * changed in ocfs2_add_new_xattr_cluster and we need + * to free it in the end. + */ + get_bh(header_bh); + + ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, &e_cpos, + &num_clusters, el); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_read_block(inode, p_blkno, &first_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + num_buckets = ocfs2_xattr_buckets_per_cluster(osb) * num_clusters; + first_xh = (struct ocfs2_xattr_header *)first_bh->b_data; + + if (num_buckets == le16_to_cpu(first_xh->xh_num_buckets)) { + ret = ocfs2_add_new_xattr_cluster(inode, + xb_bh, + &first_bh, + &header_bh, + &num_clusters, + e_cpos, + p_blkno, + &extend); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + if (extend) + ret = ocfs2_extend_xattr_bucket(inode, + first_bh, + header_bh, + num_clusters); + if (ret) + mlog_errno(ret); +out: + brelse(first_bh); + brelse(header_bh); + return ret; +} + +static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + int offs) +{ + int block_off = offs >> inode->i_sb->s_blocksize_bits; + + offs = offs % inode->i_sb->s_blocksize; + return bucket->bhs[block_off]->b_data + offs; +} + +/* + * Handle the normal xattr set, including replace, delete and new. + * + * Note: "local" indicates the real data's locality. So we can't + * just its bucket locality by its length. + */ +static void ocfs2_xattr_set_entry_normal(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + u32 name_hash, + int local) +{ + struct ocfs2_xattr_entry *last, *xe; + int name_len = strlen(xi->name); + struct ocfs2_xattr_header *xh = xs->header; + u16 count = le16_to_cpu(xh->xh_count), start; + size_t blocksize = inode->i_sb->s_blocksize; + char *val; + size_t offs, size, new_size; + + last = &xh->xh_entries[count]; + if (!xs->not_found) { + xe = xs->here; + offs = le16_to_cpu(xe->xe_name_offset); + if (ocfs2_xattr_is_local(xe)) + size = OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); + else + size = OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE); + + /* + * If the new value will be stored outside, xi->value has been + * initalized as an empty ocfs2_xattr_value_root, and the same + * goes with xi->value_len, so we can set new_size safely here. + * See ocfs2_xattr_set_in_bucket. + */ + new_size = OCFS2_XATTR_SIZE(name_len) + + OCFS2_XATTR_SIZE(xi->value_len); + + le16_add_cpu(&xh->xh_name_value_len, -size); + if (xi->value) { + if (new_size > size) + goto set_new_name_value; + + /* Now replace the old value with new one. */ + if (local) + xe->xe_value_size = cpu_to_le64(xi->value_len); + else + xe->xe_value_size = 0; + + val = ocfs2_xattr_bucket_get_val(inode, + &xs->bucket, offs); + memset(val + OCFS2_XATTR_SIZE(name_len), 0, + size - OCFS2_XATTR_SIZE(name_len)); + if (OCFS2_XATTR_SIZE(xi->value_len) > 0) + memcpy(val + OCFS2_XATTR_SIZE(name_len), + xi->value, xi->value_len); + + le16_add_cpu(&xh->xh_name_value_len, new_size); + ocfs2_xattr_set_local(xe, local); + return; + } else { + /* + * Remove the old entry if there is more than one. + * We don't remove the last entry so that we can + * use it to indicate the hash value of the empty + * bucket. + */ + last -= 1; + le16_add_cpu(&xh->xh_count, -1); + if (xh->xh_count) { + memmove(xe, xe + 1, + (void *)last - (void *)xe); + memset(last, 0, + sizeof(struct ocfs2_xattr_entry)); + } else + xh->xh_free_start = + cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE); + + return; + } + } else { + /* find a new entry for insert. */ + int low = 0, high = count - 1, tmp; + struct ocfs2_xattr_entry *tmp_xe; + + while (low <= high && count) { + tmp = (low + high) / 2; + tmp_xe = &xh->xh_entries[tmp]; + + if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash)) + low = tmp + 1; + else if (name_hash < + le32_to_cpu(tmp_xe->xe_name_hash)) + high = tmp - 1; + else { + low = tmp; + break; + } + } + + xe = &xh->xh_entries[low]; + if (low != count) + memmove(xe + 1, xe, (void *)last - (void *)xe); + + le16_add_cpu(&xh->xh_count, 1); + memset(xe, 0, sizeof(struct ocfs2_xattr_entry)); + xe->xe_name_hash = cpu_to_le32(name_hash); + xe->xe_name_len = name_len; + ocfs2_xattr_set_type(xe, xi->name_index); + } + +set_new_name_value: + /* Insert the new name+value. */ + size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(xi->value_len); + + /* + * We must make sure that the name/value pair + * exists in the same block. + */ + offs = le16_to_cpu(xh->xh_free_start); + start = offs - size; + + if (start >> inode->i_sb->s_blocksize_bits != + (offs - 1) >> inode->i_sb->s_blocksize_bits) { + offs = offs - offs % blocksize; + xh->xh_free_start = cpu_to_le16(offs); + } + + val = ocfs2_xattr_bucket_get_val(inode, + &xs->bucket, offs - size); + xe->xe_name_offset = cpu_to_le16(offs - size); + + memset(val, 0, size); + memcpy(val, xi->name, name_len); + memcpy(val + OCFS2_XATTR_SIZE(name_len), xi->value, xi->value_len); + + xe->xe_value_size = cpu_to_le64(xi->value_len); + ocfs2_xattr_set_local(xe, local); + xs->here = xe; + le16_add_cpu(&xh->xh_free_start, -size); + le16_add_cpu(&xh->xh_name_value_len, size); + + return; +} + +static int ocfs2_xattr_bucket_handle_journal(struct inode *inode, + handle_t *handle, + struct ocfs2_xattr_search *xs, + struct buffer_head **bhs, + u16 bh_num) +{ + int ret = 0, off, block_off; + struct ocfs2_xattr_entry *xe = xs->here; + + /* + * First calculate all the blocks we should journal_access + * and journal_dirty. The first block should always be touched. + */ + ret = ocfs2_journal_dirty(handle, bhs[0]); + if (ret) + mlog_errno(ret); + + /* calc the data. */ + off = le16_to_cpu(xe->xe_name_offset); + block_off = off >> inode->i_sb->s_blocksize_bits; + ret = ocfs2_journal_dirty(handle, bhs[block_off]); + if (ret) + mlog_errno(ret); + + return ret; +} + +/* + * Set the xattr entry in the specified bucket. + * The bucket is indicated by xs->bucket and it should have the enough + * space for the xattr insertion. + */ +static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + u32 name_hash, + int local) +{ + int i, ret; + handle_t *handle = NULL; + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n", + (unsigned long)xi->value_len, xi->name_index, + (unsigned long long)xs->bucket.bhs[0]->b_blocknr); + + if (!xs->bucket.bhs[1]) { + ret = ocfs2_read_blocks(inode, + xs->bucket.bhs[0]->b_blocknr + 1, + blk_per_bucket - 1, &xs->bucket.bhs[1], + 0); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + handle = ocfs2_start_trans(osb, blk_per_bucket); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + handle = NULL; + mlog_errno(ret); + goto out; + } + + for (i = 0; i < blk_per_bucket; i++) { + ret = ocfs2_journal_access(handle, inode, xs->bucket.bhs[i], + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local); + + /*Only dirty the blocks we have touched in set xattr. */ + ret = ocfs2_xattr_bucket_handle_journal(inode, handle, xs, + xs->bucket.bhs, blk_per_bucket); + if (ret) + mlog_errno(ret); +out: + ocfs2_commit_trans(osb, handle); + + return ret; +} + +static int ocfs2_xattr_value_update_size(struct inode *inode, + struct buffer_head *xe_bh, + struct ocfs2_xattr_entry *xe, + u64 new_size) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + handle_t *handle = NULL; + + handle = ocfs2_start_trans(osb, 1); + if (IS_ERR(handle)) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, xe_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; + } + + xe->xe_value_size = cpu_to_le64(new_size); + + ret = ocfs2_journal_dirty(handle, xe_bh); + if (ret < 0) + mlog_errno(ret); + +out_commit: + ocfs2_commit_trans(osb, handle); +out: + return ret; +} + +/* + * Truncate the specified xe_off entry in xattr bucket. + * bucket is indicated by header_bh and len is the new length. + * Both the ocfs2_xattr_value_root and the entry will be updated here. + * + * Copy the new updated xe and xe_value_root to new_xe and new_xv if needed. + */ +static int ocfs2_xattr_bucket_value_truncate(struct inode *inode, + struct buffer_head *header_bh, + int xe_off, + int len) +{ + int ret, offset; + u64 value_blk; + struct buffer_head *value_bh = NULL; + struct ocfs2_xattr_value_root *xv; + struct ocfs2_xattr_entry *xe; + struct ocfs2_xattr_header *xh = + (struct ocfs2_xattr_header *)header_bh->b_data; + size_t blocksize = inode->i_sb->s_blocksize; + + xe = &xh->xh_entries[xe_off]; + + BUG_ON(!xe || ocfs2_xattr_is_local(xe)); + + offset = le16_to_cpu(xe->xe_name_offset) + + OCFS2_XATTR_SIZE(xe->xe_name_len); + + value_blk = offset / blocksize; + + /* We don't allow ocfs2_xattr_value to be stored in different block. */ + BUG_ON(value_blk != (offset + OCFS2_XATTR_ROOT_SIZE - 1) / blocksize); + value_blk += header_bh->b_blocknr; + + ret = ocfs2_read_block(inode, value_blk, &value_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + xv = (struct ocfs2_xattr_value_root *) + (value_bh->b_data + offset % blocksize); + + mlog(0, "truncate %u in xattr bucket %llu to %d bytes.\n", + xe_off, (unsigned long long)header_bh->b_blocknr, len); + ret = ocfs2_xattr_value_truncate(inode, value_bh, xv, len); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_xattr_value_update_size(inode, header_bh, xe, len); + if (ret) { + mlog_errno(ret); + goto out; + } + +out: + brelse(value_bh); + return ret; +} + +static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode, + struct ocfs2_xattr_search *xs, + int len) +{ + int ret, offset; + struct ocfs2_xattr_entry *xe = xs->here; + struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base; + + BUG_ON(!xs->bucket.bhs[0] || !xe || ocfs2_xattr_is_local(xe)); + + offset = xe - xh->xh_entries; + ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket.bhs[0], + offset, len); + if (ret) + mlog_errno(ret); + + return ret; +} + +static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode, + struct ocfs2_xattr_search *xs, + char *val, + int value_len) +{ + int offset; + struct ocfs2_xattr_value_root *xv; + struct ocfs2_xattr_entry *xe = xs->here; + + BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe)); + + offset = le16_to_cpu(xe->xe_name_offset) + + OCFS2_XATTR_SIZE(xe->xe_name_len); + + xv = (struct ocfs2_xattr_value_root *)(xs->base + offset); + + return __ocfs2_xattr_set_value_outside(inode, xv, val, value_len); +} + +static int ocfs2_rm_xattr_cluster(struct inode *inode, + struct buffer_head *root_bh, + u64 blkno, + u32 cpos, + u32 len) +{ + int ret; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct inode *tl_inode = osb->osb_tl_inode; + handle_t *handle; + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)root_bh->b_data; + struct ocfs2_alloc_context *meta_ac = NULL; + struct ocfs2_cached_dealloc_ctxt dealloc; + struct ocfs2_extent_tree et; + + ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh); + + ocfs2_init_dealloc_ctxt(&dealloc); + + mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n", + cpos, len, (unsigned long long)blkno); + + ocfs2_remove_xattr_clusters_from_cache(inode, blkno, len); + + ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac); + if (ret) { + mlog_errno(ret); + return ret; + } + + mutex_lock(&tl_inode->i_mutex); + + if (ocfs2_truncate_log_needs_flush(osb)) { + ret = __ocfs2_flush_truncate_log(osb); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + } + + handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS); + if (IS_ERR(handle)) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access(handle, inode, root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, meta_ac, + &dealloc); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, -len); + + ret = ocfs2_journal_dirty(handle, root_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_truncate_log_append(osb, handle, blkno, len); + if (ret) + mlog_errno(ret); + +out_commit: + ocfs2_commit_trans(osb, handle); +out: + ocfs2_schedule_truncate_log_flush(osb, 1); + + mutex_unlock(&tl_inode->i_mutex); + + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + + ocfs2_run_deallocs(osb, &dealloc); + + return ret; +} + +static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, + struct ocfs2_xattr_search *xs) +{ + handle_t *handle = NULL; + struct ocfs2_xattr_header *xh = xs->bucket.xh; + struct ocfs2_xattr_entry *last = &xh->xh_entries[ + le16_to_cpu(xh->xh_count) - 1]; + int ret = 0; + + handle = ocfs2_start_trans((OCFS2_SB(inode->i_sb)), 1); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + return; + } + + ret = ocfs2_journal_access(handle, inode, xs->bucket.bhs[0], + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* Remove the old entry. */ + memmove(xs->here, xs->here + 1, + (void *)last - (void *)xs->here); + memset(last, 0, sizeof(struct ocfs2_xattr_entry)); + le16_add_cpu(&xh->xh_count, -1); + + ret = ocfs2_journal_dirty(handle, xs->bucket.bhs[0]); + if (ret < 0) + mlog_errno(ret); +out_commit: + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +} + +/* + * Set the xattr name/value in the bucket specified in xs. + * + * As the new value in xi may be stored in the bucket or in an outside cluster, + * we divide the whole process into 3 steps: + * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket) + * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs) + * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside) + * 4. If the clusters for the new outside value can't be allocated, we need + * to free the xattr we allocated in set. + */ +static int ocfs2_xattr_set_in_bucket(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs) +{ + int ret, local = 1; + size_t value_len; + char *val = (char *)xi->value; + struct ocfs2_xattr_entry *xe = xs->here; + u32 name_hash = ocfs2_xattr_name_hash(inode, xi->name, + strlen(xi->name)); + + if (!xs->not_found && !ocfs2_xattr_is_local(xe)) { + /* + * We need to truncate the xattr storage first. + * + * If both the old and new value are stored to + * outside block, we only need to truncate + * the storage and then set the value outside. + * + * If the new value should be stored within block, + * we should free all the outside block first and + * the modification to the xattr block will be done + * by following steps. + */ + if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) + value_len = xi->value_len; + else + value_len = 0; + + ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs, + value_len); + if (ret) + goto out; + + if (value_len) + goto set_value_outside; + } + + value_len = xi->value_len; + /* So we have to handle the inside block change now. */ + if (value_len > OCFS2_XATTR_INLINE_SIZE) { + /* + * If the new value will be stored outside of block, + * initalize a new empty value root and insert it first. + */ + local = 0; + xi->value = &def_xv; + xi->value_len = OCFS2_XATTR_ROOT_SIZE; + } + + ret = ocfs2_xattr_set_entry_in_bucket(inode, xi, xs, name_hash, local); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (value_len <= OCFS2_XATTR_INLINE_SIZE) + goto out; + + /* allocate the space now for the outside block storage. */ + ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs, + value_len); + if (ret) { + mlog_errno(ret); + + if (xs->not_found) { + /* + * We can't allocate enough clusters for outside + * storage and we have allocated xattr already, + * so need to remove it. + */ + ocfs2_xattr_bucket_remove_xs(inode, xs); + } + goto out; + } + +set_value_outside: + ret = ocfs2_xattr_bucket_set_value_outside(inode, xs, val, value_len); +out: + return ret; +} + +/* + * check whether the xattr bucket is filled up with the same hash value. + * If we want to insert the xattr with the same hash, return -ENOSPC. + * If we want to insert a xattr with different hash value, go ahead + * and ocfs2_divide_xattr_bucket will handle this. + */ +static int ocfs2_check_xattr_bucket_collision(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + const char *name) +{ + struct ocfs2_xattr_header *xh = bucket->xh; + u32 name_hash = ocfs2_xattr_name_hash(inode, name, strlen(name)); + + if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash)) + return 0; + + if (xh->xh_entries[le16_to_cpu(xh->xh_count) - 1].xe_name_hash == + xh->xh_entries[0].xe_name_hash) { + mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, " + "hash = %u\n", + (unsigned long long)bucket->bhs[0]->b_blocknr, + le32_to_cpu(xh->xh_entries[0].xe_name_hash)); + return -ENOSPC; + } + + return 0; +} + +static int ocfs2_xattr_set_entry_index_block(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs) +{ + struct ocfs2_xattr_header *xh; + struct ocfs2_xattr_entry *xe; + u16 count, header_size, xh_free_start; + int i, free, max_free, need, old; + size_t value_size = 0, name_len = strlen(xi->name); + size_t blocksize = inode->i_sb->s_blocksize; + int ret, allocation = 0; + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); + + mlog_entry("Set xattr %s in xattr index block\n", xi->name); + +try_again: + xh = xs->header; + count = le16_to_cpu(xh->xh_count); + xh_free_start = le16_to_cpu(xh->xh_free_start); + header_size = sizeof(struct ocfs2_xattr_header) + + count * sizeof(struct ocfs2_xattr_entry); + max_free = OCFS2_XATTR_BUCKET_SIZE - + le16_to_cpu(xh->xh_name_value_len) - header_size; + + mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size " + "of %u which exceed block size\n", + (unsigned long long)xs->bucket.bhs[0]->b_blocknr, + header_size); + + if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) + value_size = OCFS2_XATTR_ROOT_SIZE; + else if (xi->value) + value_size = OCFS2_XATTR_SIZE(xi->value_len); + + if (xs->not_found) + need = sizeof(struct ocfs2_xattr_entry) + + OCFS2_XATTR_SIZE(name_len) + value_size; + else { + need = value_size + OCFS2_XATTR_SIZE(name_len); + + /* + * We only replace the old value if the new length is smaller + * than the old one. Otherwise we will allocate new space in the + * bucket to store it. + */ + xe = xs->here; + if (ocfs2_xattr_is_local(xe)) + old = OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); + else + old = OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE); + + if (old >= value_size) + need = 0; + } + + free = xh_free_start - header_size; + /* + * We need to make sure the new name/value pair + * can exist in the same block. + */ + if (xh_free_start % blocksize < need) + free -= xh_free_start % blocksize; + + mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, " + "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len =" + " %u\n", xs->not_found, + (unsigned long long)xs->bucket.bhs[0]->b_blocknr, + free, need, max_free, le16_to_cpu(xh->xh_free_start), + le16_to_cpu(xh->xh_name_value_len)); + + if (free < need || count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) { + if (need <= max_free && + count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) { + /* + * We can create the space by defragment. Since only the + * name/value will be moved, the xe shouldn't be changed + * in xs. + */ + ret = ocfs2_defrag_xattr_bucket(inode, &xs->bucket); + if (ret) { + mlog_errno(ret); + goto out; + } + + xh_free_start = le16_to_cpu(xh->xh_free_start); + free = xh_free_start - header_size; + if (xh_free_start % blocksize < need) + free -= xh_free_start % blocksize; + + if (free >= need) + goto xattr_set; + + mlog(0, "Can't get enough space for xattr insert by " + "defragment. Need %u bytes, but we have %d, so " + "allocate new bucket for it.\n", need, free); + } + + /* + * We have to add new buckets or clusters and one + * allocation should leave us enough space for insert. + */ + BUG_ON(allocation); + + /* + * We do not allow for overlapping ranges between buckets. And + * the maximum number of collisions we will allow for then is + * one bucket's worth, so check it here whether we need to + * add a new bucket for the insert. + */ + ret = ocfs2_check_xattr_bucket_collision(inode, + &xs->bucket, + xi->name); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_add_new_xattr_bucket(inode, + xs->xattr_bh, + xs->bucket.bhs[0]); + if (ret) { + mlog_errno(ret); + goto out; + } + + for (i = 0; i < blk_per_bucket; i++) + brelse(xs->bucket.bhs[i]); + + memset(&xs->bucket, 0, sizeof(xs->bucket)); + + ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh, + xi->name_index, + xi->name, xs); + if (ret && ret != -ENODATA) + goto out; + xs->not_found = ret; + allocation = 1; + goto try_again; + } + +xattr_set: + ret = ocfs2_xattr_set_in_bucket(inode, xi, xs); +out: + mlog_exit(ret); + return ret; +} + +static int ocfs2_delete_xattr_in_bucket(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + void *para) +{ + int ret = 0; + struct ocfs2_xattr_header *xh = bucket->xh; + u16 i; + struct ocfs2_xattr_entry *xe; + + for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { + xe = &xh->xh_entries[i]; + if (ocfs2_xattr_is_local(xe)) + continue; + + ret = ocfs2_xattr_bucket_value_truncate(inode, + bucket->bhs[0], + i, 0); + if (ret) { + mlog_errno(ret); + break; + } + } + + return ret; +} + +static int ocfs2_delete_xattr_index_block(struct inode *inode, + struct buffer_head *xb_bh) +{ + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)xb_bh->b_data; + struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list; + int ret = 0; + u32 name_hash = UINT_MAX, e_cpos, num_clusters; + u64 p_blkno; + + if (le16_to_cpu(el->l_next_free_rec) == 0) + return 0; + + while (name_hash > 0) { + ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, + &e_cpos, &num_clusters, el); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_iterate_xattr_buckets(inode, p_blkno, num_clusters, + ocfs2_delete_xattr_in_bucket, + NULL); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_rm_xattr_cluster(inode, xb_bh, + p_blkno, e_cpos, num_clusters); + if (ret) { + mlog_errno(ret); + break; + } + + if (e_cpos == 0) + break; + + name_hash = e_cpos - 1; + } + +out: + return ret; +} + +/* + * 'trusted' attributes support + */ +static size_t ocfs2_xattr_trusted_list(struct inode *inode, char *list, + size_t list_size, const char *name, + size_t name_len) +{ + const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN; + const size_t total_len = prefix_len + name_len + 1; + + if (list && total_len <= list_size) { + memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len); + memcpy(list + prefix_len, name, name_len); + list[prefix_len + name_len] = '\0'; + } + return total_len; +} + +static int ocfs2_xattr_trusted_get(struct inode *inode, const char *name, + void *buffer, size_t size) +{ + if (strcmp(name, "") == 0) + return -EINVAL; + return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_TRUSTED, name, + buffer, size); +} + +static int ocfs2_xattr_trusted_set(struct inode *inode, const char *name, + const void *value, size_t size, int flags) +{ + if (strcmp(name, "") == 0) + return -EINVAL; + + return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_TRUSTED, name, value, + size, flags); +} + +struct xattr_handler ocfs2_xattr_trusted_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .list = ocfs2_xattr_trusted_list, + .get = ocfs2_xattr_trusted_get, + .set = ocfs2_xattr_trusted_set, +}; + +/* + * 'user' attributes support + */ +static size_t ocfs2_xattr_user_list(struct inode *inode, char *list, + size_t list_size, const char *name, + size_t name_len) +{ + const size_t prefix_len = XATTR_USER_PREFIX_LEN; + const size_t total_len = prefix_len + name_len + 1; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) + return 0; + + if (list && total_len <= list_size) { + memcpy(list, XATTR_USER_PREFIX, prefix_len); + memcpy(list + prefix_len, name, name_len); + list[prefix_len + name_len] = '\0'; + } + return total_len; +} + +static int ocfs2_xattr_user_get(struct inode *inode, const char *name, + void *buffer, size_t size) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (strcmp(name, "") == 0) + return -EINVAL; + if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) + return -EOPNOTSUPP; + return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_USER, name, + buffer, size); +} + +static int ocfs2_xattr_user_set(struct inode *inode, const char *name, + const void *value, size_t size, int flags) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (strcmp(name, "") == 0) + return -EINVAL; + if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) + return -EOPNOTSUPP; + + return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_USER, name, value, + size, flags); +} + +struct xattr_handler ocfs2_xattr_user_handler = { + .prefix = XATTR_USER_PREFIX, + .list = ocfs2_xattr_user_list, + .get = ocfs2_xattr_user_get, + .set = ocfs2_xattr_user_set, +}; diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h new file mode 100644 index 0000000..1d8314c --- /dev/null +++ b/fs/ocfs2/xattr.h @@ -0,0 +1,42 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * xattr.h + * + * Copyright (C) 2004, 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef OCFS2_XATTR_H +#define OCFS2_XATTR_H + +#include <linux/init.h> +#include <linux/xattr.h> + +enum ocfs2_xattr_type { + OCFS2_XATTR_INDEX_USER = 1, + OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS, + OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT, + OCFS2_XATTR_INDEX_TRUSTED, + OCFS2_XATTR_INDEX_SECURITY, + OCFS2_XATTR_MAX +}; + +extern struct xattr_handler ocfs2_xattr_user_handler; +extern struct xattr_handler ocfs2_xattr_trusted_handler; +extern struct xattr_handler *ocfs2_xattr_handlers[]; + +ssize_t ocfs2_listxattr(struct dentry *, char *, size_t); +int ocfs2_xattr_set(struct inode *, int, const char *, const void *, + size_t, int); +int ocfs2_xattr_remove(struct inode *, struct buffer_head *); + +#endif /* OCFS2_XATTR_H */ |