cluster: RHEL60 - gfs2_convert: corrupts file system when directory has di_height 3
by Lon Hohberger
Gitweb: http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=...
Commit: 69084346f4f4fc656853578c04b039a964fdc041
Parent: add3241e26811a0c98524add304312609166f7ae
Author: Abhijith Das <adas(a)redhat.com>
AuthorDate: Thu Oct 14 10:47:56 2010 -0500
Committer: Lon Hohberger <lhh(a)redhat.com>
CommitterDate: Fri Nov 5 09:39:06 2010 -0400
gfs2_convert: corrupts file system when directory has di_height 3
This patch fixes the conversion of directory inodes with height >= 2.
It moves a lot of code around, mainly because two functions
(adjust_indirect_blocks) and (adjust_jdata_inode) were doing a lot of the same
things and the latter function was too long. Adding support for directory
conversion would've added a third function that would duplicate a lot of the
same code... so I consolidated the common things into two smaller functions,
now called adjust_indirect_blocks() and get_metablocks(). The file-type
specific functions are now in two separate functions fix_ind_jdata() and
fix_ind_reg_or_dir().
Resolves: rhbz#630005
Signed-off-by: Abhi Das <adas(a)redhat.com>
---
gfs2/convert/gfs2_convert.c | 458 +++++++++++++++++--------------------------
1 files changed, 177 insertions(+), 281 deletions(-)
diff --git a/gfs2/convert/gfs2_convert.c b/gfs2/convert/gfs2_convert.c
index 6a754b1..e75fafe 100644
--- a/gfs2/convert/gfs2_convert.c
+++ b/gfs2/convert/gfs2_convert.c
@@ -416,180 +416,6 @@ static void fix_metatree(struct gfs2_sbd *sbp, struct gfs2_inode *ip,
/* */
/* Adapted from gfs2_fsck metawalk.c's build_and_check_metalist */
/* ------------------------------------------------------------------------- */
-static int adjust_indirect_blocks(struct gfs2_sbd *sbp, struct gfs2_inode *ip)
-{
- uint32_t gfs2_hgt;
- struct gfs2_buffer_head *bh;
- osi_list_t *tmp, *x;
- int h, header_size, bufsize, ptrnum;
- uint64_t *ptr1, block;
- uint64_t dinode_size;
- int error = 0, di_height;
- struct blocklist blocks, *blk, *newblk;
- struct metapath gfs2mp;
- struct gfs2_buffer_head *dibh = ip->i_bh;
-
- /* if there are no indirect blocks to check */
- if (ip->i_di.di_height <= 1)
- return 0;
-
- osi_list_init(&blocks.list);
-
- /* Add the dinode block to the blocks list */
- blk = malloc(sizeof(struct blocklist));
- if (!blk) {
- log_crit("Error: Can't allocate memory"
- " for indirect block fix.\n");
- return -1;
- }
- memset(blk, 0, sizeof(*blk));
- /* allocate a buffer to hold the pointers */
- bufsize = sbp->sd_inptrs * sizeof(uint64_t);
- blk->block = dibh->b_blocknr;
- blk->ptrbuf = malloc(bufsize);
- if (!blk->ptrbuf) {
- log_crit("Error: Can't allocate memory"
- " for file conversion.\n");
- free(blk);
- return -1;
- }
- memset(blk->ptrbuf, 0, bufsize);
- /* Fill in the pointers from the dinode buffer */
- memcpy(blk->ptrbuf, dibh->b_data + sizeof(struct gfs_dinode),
- sbp->bsize - sizeof(struct gfs_dinode));
- /* Zero out the pointers so we can fill them in later. */
- memset(dibh->b_data + sizeof(struct gfs_dinode), 0,
- sbp->bsize - sizeof(struct gfs_dinode));
- osi_list_add_prev(&blk->list, &blocks.list);
-
- /* Now run the metadata chain and build lists of all metadata blocks */
- osi_list_foreach(tmp, &blocks.list) {
- blk = osi_list_entry(tmp, struct blocklist, list);
-
- if (blk->height >= ip->i_di.di_height - 1)
- continue;
- header_size = (blk->height > 0 ? sizeof(struct gfs_indirect) :
- sizeof(struct gfs_dinode));
- for (ptr1 = (uint64_t *)blk->ptrbuf, ptrnum = 0;
- ptrnum < sbp->sd_inptrs; ptr1++, ptrnum++) {
- if (!*ptr1)
- continue;
-
- block = be64_to_cpu(*ptr1);
-
- newblk = malloc(sizeof(struct blocklist));
- if (!newblk) {
- log_crit("Error: Can't allocate memory"
- " for indirect block fix.\n");
- error = -1;
- goto out;
- }
- memset(newblk, 0, sizeof(*newblk));
- newblk->ptrbuf = malloc(bufsize);
- if (!newblk->ptrbuf) {
- log_crit("Error: Can't allocate memory"
- " for file conversion.\n");
- free(newblk);
- goto out;
- }
- memset(newblk->ptrbuf, 0, bufsize);
- newblk->block = block;
- newblk->height = blk->height + 1;
- /* Build the metapointer list from our predecessors */
- for (h = 0; h < blk->height; h++)
- newblk->mp.mp_list[h] = blk->mp.mp_list[h];
- newblk->mp.mp_list[h] = ptrnum;
- /* Queue it to be processed later on in the loop. */
- osi_list_add_prev(&newblk->list, &blocks.list);
-
- /* read the new metadata block's pointers */
- bh = bread(sbp, block);
- memcpy(newblk->ptrbuf, bh->b_data +
- sizeof(struct gfs_indirect), bufsize);
- /* Zero the buffer so we can fill it in later */
- memset(bh->b_data + sizeof(struct gfs_indirect), 0,
- bufsize);
- bmodified(bh);
- brelse(bh);
- /* Free the metadata block so we can reuse it.
- This allows us to convert a "full" file system. */
- ip->i_di.di_blocks--;
- gfs2_free_block(sbp, block);
- }
- }
-
- /* The gfs2 height may be different. We need to rebuild the
- metadata tree to the gfs2 height. */
- gfs2_hgt = calc_gfs2_tree_height(ip, ip->i_di.di_size);
- /* Save off the size because we're going to empty the contents
- and add the data blocks back in later. */
- dinode_size = ip->i_di.di_size;
- ip->i_di.di_size = 0ULL;
- di_height = ip->i_di.di_height;
- ip->i_di.di_height = 0;
-
- /* Now run through the block list a second time. If the block
- is the highest for metadata, rewrite the data to the gfs2
- offset. */
- osi_list_foreach_safe(tmp, &blocks.list, x) {
- unsigned int len;
- uint64_t *ptr2;
-
- blk = osi_list_entry(tmp, struct blocklist, list);
- /* If it's not metadata that holds data block pointers
- (i.e. metadata pointing to other metadata) */
- if (blk->height != di_height - 1) {
- osi_list_del(tmp);
- free(blk->ptrbuf);
- free(blk);
- continue;
- }
- /* Skip zero pointers at the start of the buffer. This may
- seem pointless, but the gfs1 blocks won't align with the
- gfs2 blocks. That means that a single block write of
- gfs1's pointers is likely to span two blocks on gfs2.
- That's a problem if the file system is full.
- So I'm trying to truncate the data at the start and end
- of the buffers (i.e. write only what we need to). */
- len = bufsize;
- for (ptr1 = (uint64_t *)blk->ptrbuf, ptrnum = 0;
- ptrnum < sbp->sd_inptrs; ptr1++, ptrnum++) {
- if (*ptr1 != 0x00)
- break;
- len -= sizeof(uint64_t);
- }
- /* Skip zero bytes at the end of the buffer */
- ptr2 = (uint64_t *)(blk->ptrbuf + bufsize) - 1;
- while (len > 0 && *ptr2 == 0) {
- ptr2--;
- len -= sizeof(uint64_t);
- }
- blk->mp.mp_list[di_height - 1] = ptrnum;
- mp_gfs1_to_gfs2(sbp, di_height, gfs2_hgt, &blk->mp, &gfs2mp);
- memcpy(&blk->mp, &gfs2mp, sizeof(struct metapath));
- blk->height -= di_height - gfs2_hgt;
- if (len)
- fix_metatree(sbp, ip, blk, ptr1, len);
- osi_list_del(tmp);
- free(blk->ptrbuf);
- free(blk);
- }
- ip->i_di.di_size = dinode_size;
-
- /* Set the new dinode height, which may or may not have changed. */
- /* The caller will take it from the ip and write it to the buffer */
- ip->i_di.di_height = gfs2_hgt;
- return 0;
-
-out:
- while (!osi_list_empty(&blocks.list)) {
- blk = osi_list_entry(tmp, struct blocklist, list);
- osi_list_del(&blocks.list);
- free(blk->ptrbuf);
- free(blk);
- }
- return error;
-}
static void jdata_mp_gfs1_to_gfs2(struct gfs2_sbd *sbp, int gfs1_h, int gfs2_h,
struct metapath *gfs1mp, struct metapath *gfs2mp,
@@ -696,42 +522,23 @@ static void fix_jdatatree(struct gfs2_sbd *sbp, struct gfs2_inode *ip,
}
}
-static int adjust_jdata_inode(struct gfs2_sbd *sbp, struct gfs2_inode *ip)
+static int get_inode_metablocks(struct gfs2_sbd *sbp, struct gfs2_inode *ip, struct blocklist *blocks)
{
- uint32_t gfs2_hgt;
- struct gfs2_buffer_head *bh;
- osi_list_t *tmp, *x;
- int h, header_size, bufsize, ptrnum;
+ struct blocklist *blk, *newblk;
+ struct gfs2_buffer_head *bh, *dibh = ip->i_bh;
+ osi_list_t *tmp;
uint64_t *ptr1, block;
- uint64_t dinode_size;
- int error = 0, di_height;
- struct blocklist blocks, *blk, *newblk;
- struct metapath gfs2mp;
- struct gfs2_buffer_head *dibh = ip->i_bh;
-
- /* Don't have to worry about things with stuffed inodes */
- if (ip->i_di.di_height == 0)
- return 0;
+ int h, header_size, ptrnum;
+ int bufsize = sbp->bsize - sizeof(struct gfs_indirect);
- osi_list_init(&blocks.list);
-
- /* Add the dinode block to the blocks list */
+ /* Add dinode block to the list */
blk = malloc(sizeof(struct blocklist));
if (!blk) {
- log_crit("Error: Can't allocate memory"
- " for indirect block fix.\n");
+ log_crit("Error: Can't allocate memory for indirect block fix\n");
return -1;
}
memset(blk, 0, sizeof(*blk));
- /* allocate a buffer to hold the pointers or data */
- bufsize = sbp->bsize - sizeof(struct gfs2_meta_header);
blk->block = dibh->b_blocknr;
- /*
- * blk->ptrbuf either contains
- * a) diptrs (for height=0)
- * b) inptrs (for height=1 to di_height - 1)
- * c) data for height = di_height
- */
blk->ptrbuf = malloc(bufsize);
if (!blk->ptrbuf) {
log_crit("Error: Can't allocate memory"
@@ -746,10 +553,10 @@ static int adjust_jdata_inode(struct gfs2_sbd *sbp, struct gfs2_inode *ip)
/* Zero out the pointers so we can fill them in later. */
memset(dibh->b_data + sizeof(struct gfs_dinode), 0,
sbp->bsize - sizeof(struct gfs_dinode));
- osi_list_add_prev(&blk->list, &blocks.list);
+ osi_list_add_prev(&blk->list, &blocks->list);
/* Now run the metadata chain and build lists of all metadata blocks */
- osi_list_foreach(tmp, &blocks.list) {
+ osi_list_foreach(tmp, &blocks->list) {
blk = osi_list_entry(tmp, struct blocklist, list);
if (blk->height >= ip->i_di.di_height - 1)
@@ -760,23 +567,19 @@ static int adjust_jdata_inode(struct gfs2_sbd *sbp, struct gfs2_inode *ip)
ptrnum < sbp->sd_inptrs; ptr1++, ptrnum++) {
if (!*ptr1)
continue;
-
block = be64_to_cpu(*ptr1);
newblk = malloc(sizeof(struct blocklist));
if (!newblk) {
- log_crit("Error: Can't allocate memory"
- " for indirect block fix.\n");
- error = -1;
- goto out;
+ log_crit("Error: Can't allocate memory for indirect block fix.\n");
+ return -1;
}
memset(newblk, 0, sizeof(*newblk));
newblk->ptrbuf = malloc(bufsize);
if (!newblk->ptrbuf) {
- log_crit("Error: Can't allocate memory"
- " for file conversion.\n");
+ log_crit("Error: Can't allocate memory for file conversion.\n");
free(newblk);
- goto out;
+ return -1;
}
memset(newblk->ptrbuf, 0, bufsize);
newblk->block = block;
@@ -786,15 +589,12 @@ static int adjust_jdata_inode(struct gfs2_sbd *sbp, struct gfs2_inode *ip)
newblk->mp.mp_list[h] = blk->mp.mp_list[h];
newblk->mp.mp_list[h] = ptrnum;
/* Queue it to be processed later on in the loop. */
- osi_list_add_prev(&newblk->list, &blocks.list);
-
+ osi_list_add_prev(&newblk->list, &blocks->list);
/* read the new metadata block's pointers */
bh = bread(sbp, block);
- memcpy(newblk->ptrbuf, bh->b_data + sizeof(struct gfs_indirect),
- sbp->bsize - sizeof(struct gfs_indirect));
+ memcpy(newblk->ptrbuf, bh->b_data + sizeof(struct gfs_indirect), bufsize);
/* Zero the buffer so we can fill it in later */
- memset(bh->b_data + sizeof(struct gfs_indirect), 0,
- sbp->bsize - sizeof(struct gfs_indirect));
+ memset(bh->b_data + sizeof(struct gfs_indirect), 0, bufsize);
bmodified(bh);
brelse(bh);
/* Free the block so we can reuse it. This allows us to
@@ -803,6 +603,140 @@ static int adjust_jdata_inode(struct gfs2_sbd *sbp, struct gfs2_inode *ip)
gfs2_free_block(sbp, block);
}
}
+ return 0;
+}
+
+static int fix_ind_reg_or_dir(struct gfs2_sbd *sbp, struct gfs2_inode *ip, uint32_t di_height,
+ uint32_t gfs2_hgt, struct blocklist *blk, struct blocklist *blocks)
+{
+ unsigned int len, bufsize;
+ uint64_t *ptr1, *ptr2;
+ int ptrnum;
+ struct metapath gfs2mp;
+
+ bufsize = sbp->bsize - sizeof(struct gfs_indirect);
+ len = bufsize;
+
+ /* Skip zero pointers at the start of the buffer. This may
+ seem pointless, but the gfs1 blocks won't align with the
+ gfs2 blocks. That means that a single block write of
+ gfs1's pointers is likely to span two blocks on gfs2.
+ That's a problem if the file system is full.
+ So I'm trying to truncate the data at the start and end
+ of the buffers (i.e. write only what we need to). */
+ for (ptr1 = (uint64_t *)blk->ptrbuf, ptrnum = 0;
+ ptrnum < sbp->sd_inptrs; ptr1++, ptrnum++) {
+ if (*ptr1 != 0x00)
+ break;
+ len -= sizeof(uint64_t);
+ }
+ /* Skip zero bytes at the end of the buffer */
+ ptr2 = (uint64_t *)(blk->ptrbuf + bufsize) - 1;
+ while (len > 0 && *ptr2 == 0) {
+ ptr2--;
+ len -= sizeof(uint64_t);
+ }
+ blk->mp.mp_list[di_height - 1] = ptrnum;
+ mp_gfs1_to_gfs2(sbp, di_height, gfs2_hgt, &blk->mp, &gfs2mp);
+ memcpy(&blk->mp, &gfs2mp, sizeof(struct metapath));
+ blk->height -= di_height - gfs2_hgt;
+ if (len)
+ fix_metatree(sbp, ip, blk, ptr1, len);
+
+ return 0;
+}
+
+static int fix_ind_jdata(struct gfs2_sbd *sbp, struct gfs2_inode *ip, uint32_t di_height,
+ uint32_t gfs2_hgt, uint64_t dinode_size, struct blocklist *blk,
+ struct blocklist *blocks)
+{
+ struct blocklist *newblk;
+ unsigned int len, bufsize;
+ uint64_t *ptr1, block;
+ int ptrnum, h;
+ struct metapath gfs2mp;
+ struct gfs2_buffer_head *bh;
+
+ bufsize = sbp->bsize - sizeof(struct gfs2_meta_header);
+ /*
+ * For each metadata block that holds jdata block pointers,
+ * get the blk pointers and copy them block by block
+ */
+ for (ptr1 = (uint64_t *) blk->ptrbuf, ptrnum = 0;
+ ptrnum < sbp->sd_inptrs; ptr1++, ptrnum++) {
+ if (!*ptr1)
+ continue;
+ block = be64_to_cpu(*ptr1);
+
+ newblk = malloc(sizeof(struct blocklist));
+ if (!newblk) {
+ log_crit("Error: Can't allocate memory for indirect block fix.\n");
+ return -1;
+ }
+ memset(newblk, 0, sizeof(*newblk));
+ newblk->ptrbuf = malloc(bufsize);
+ if (!newblk->ptrbuf) {
+ log_crit("Error: Can't allocate memory for file conversion.\n");
+ free(newblk);
+ return -1;
+ }
+ memset(newblk->ptrbuf, 0, bufsize);
+ newblk->block = block;
+ newblk->height = blk->height + 1;
+ /* Build the metapointer list from our predecessors */
+ for (h=0; h < blk->height; h++)
+ newblk->mp.mp_list[h] = blk->mp.mp_list[h];
+ newblk->mp.mp_list[h] = ptrnum;
+ bh = bread(sbp, block);
+ /* This is a data block. i.e newblk->height == ip->i_di.di_height */
+ /* read in the jdata block */
+ memcpy(newblk->ptrbuf, bh->b_data +
+ sizeof(struct gfs2_meta_header), bufsize);
+ memset(bh->b_data + sizeof(struct gfs2_meta_header), 0, bufsize);
+ bmodified(bh);
+ brelse(bh);
+ /* Free the block so we can reuse it. This allows us to
+ convert a "full" file system */
+ ip->i_di.di_blocks--;
+ gfs2_free_block(sbp, block);
+
+ len = bufsize;
+ jdata_mp_gfs1_to_gfs2(sbp, di_height, gfs2_hgt, &newblk->mp, &gfs2mp,
+ &len, dinode_size);
+ memcpy(&newblk->mp, &gfs2mp, sizeof(struct metapath));
+ newblk->height -= di_height - gfs2_hgt;
+ if (len)
+ fix_jdatatree(sbp, ip, newblk, newblk->ptrbuf, len);
+ free(newblk->ptrbuf);
+ free(newblk);
+ }
+ return 0;
+}
+
+static int adjust_indirect_blocks(struct gfs2_sbd *sbp, struct gfs2_inode *ip)
+{
+ uint64_t dinode_size;
+ uint32_t gfs2_hgt, di_height;
+ osi_list_t *tmp=NULL, *x;
+ struct blocklist blocks, *blk;
+ int error = 0;
+
+ int isdir = S_ISDIR(ip->i_di.di_mode); /* is always jdata */
+ int isjdata = ((GFS2_DIF_JDATA & ip->i_di.di_flags) && !isdir);
+ int isreg = (!isjdata && !isdir);
+
+ /* regular files and dirs are same upto height=2
+ jdata files (not dirs) are same only when height=0 */
+ if (((isreg||isdir) && ip->i_di.di_height <= 1) ||
+ (isjdata && ip->i_di.di_height == 0))
+ return 0; /* nothing to do */
+
+ osi_list_init(&blocks.list);
+
+ error = get_inode_metablocks(sbp, ip, &blocks);
+ if (error)
+ goto out;
+
/* The gfs2 height may be different. We need to rebuild the
metadata tree to the gfs2 height. */
gfs2_hgt = calc_gfs2_tree_height(ip, ip->i_di.di_size);
@@ -816,82 +750,36 @@ static int adjust_jdata_inode(struct gfs2_sbd *sbp, struct gfs2_inode *ip)
/* Now run through the block list a second time. If the block
is a data block, rewrite the data to the gfs2 offset. */
osi_list_foreach_safe(tmp, &blocks.list, x) {
- unsigned int len;
blk = osi_list_entry(tmp, struct blocklist, list);
- /* If it's not a highest level indirect block */
+ /* If it's not metadata that holds data block pointers
+ (i.e. metadata pointing to other metadata) */
if (blk->height != di_height - 1) {
osi_list_del(tmp);
free(blk->ptrbuf);
free(blk);
continue;
}
- /*
- * For each metadata block that holds jdata block pointers,
- * get the blk pointers and copy them block by block
- */
- for (ptr1 = (uint64_t *) blk->ptrbuf, ptrnum = 0;
- ptrnum < sbp->sd_inptrs; ptr1++, ptrnum++) {
- if (!*ptr1)
- continue;
- block = be64_to_cpu(*ptr1);
-
- newblk = malloc(sizeof(struct blocklist));
- if (!newblk) {
- log_crit("Error: Can't allocate memory"
- " for indirect block fix.\n");
- error = -1;
- goto out;
- }
- memset(newblk, 0, sizeof(*newblk));
- newblk->ptrbuf = malloc(bufsize);
- if (!newblk->ptrbuf) {
- log_crit("Error: Can't allocate memory"
- " for file conversion.\n");
- free(newblk);
- goto out;
- }
- memset(newblk->ptrbuf, 0, bufsize);
- newblk->block = block;
- newblk->height = blk->height + 1;
- /* Build the metapointer list from our predecessors */
- for (h=0; h < blk->height; h++)
- newblk->mp.mp_list[h] = blk->mp.mp_list[h];
- newblk->mp.mp_list[h] = ptrnum;
- bh = bread(sbp, block);
- /* This is a data block. i.e newblk->height == ip->i_di.di_height */
- /* read in the jdata block */
- memcpy(newblk->ptrbuf, bh->b_data +
- sizeof(struct gfs2_meta_header), bufsize);
- memset(bh->b_data + sizeof(struct gfs2_meta_header), 0,
- bufsize);
- bmodified(bh);
- brelse(bh);
- /* Free the block so we can reuse it. This allows us to
- convert a "full" file system */
- ip->i_di.di_blocks--;
- gfs2_free_block(sbp, block);
+ if (isreg || isdir) /* more or less same way to deal with either */
+ error = fix_ind_reg_or_dir(sbp, ip, di_height,
+ gfs2_hgt, blk, &blocks);
+ else if (isjdata)
+ error = fix_ind_jdata(sbp, ip, di_height, gfs2_hgt,
+ dinode_size, blk, &blocks);
+ if (error)
+ goto out;
- len = bufsize;
- jdata_mp_gfs1_to_gfs2(sbp, di_height, gfs2_hgt, &newblk->mp, &gfs2mp,
- &len, dinode_size);
- memcpy(&newblk->mp, &gfs2mp, sizeof(struct metapath));
- newblk->height -= di_height - gfs2_hgt;
- if (len)
- fix_jdatatree(sbp, ip, newblk, newblk->ptrbuf, len);
- free(newblk->ptrbuf);
- free(newblk);
- }
osi_list_del(tmp);
free(blk->ptrbuf);
free(blk);
}
+
ip->i_di.di_size = dinode_size;
/* Set the new dinode height, which may or may not have changed. */
/* The caller will take it from the ip and write it to the buffer */
ip->i_di.di_height = gfs2_hgt;
- return 0;
+ return error;
out:
while (!osi_list_empty(&blocks.list)) {
@@ -1056,12 +944,8 @@ static int adjust_inode(struct gfs2_sbd *sbp, struct gfs2_buffer_head *bh)
inode->i_di.di_goal_data = 0; /* make sure the upper 32b are 0 */
inode->i_di.di_goal_data = gfs1_dinode_struct->di_goal_dblk;
inode->i_di.di_generation = 0;
- if (!(inode->i_di.di_mode & S_IFDIR) &&
- inode->i_di.di_flags & GFS2_DIF_JDATA)
- ret = adjust_jdata_inode(sbp, inode);
- else
- ret = adjust_indirect_blocks(sbp, inode);
- if (ret)
+
+ if (adjust_indirect_blocks(sbp, inode))
return -1;
/* Check for cdpns */
if (inode->i_di.di_mode & S_IFLNK) {
@@ -1346,9 +1230,11 @@ static int fix_one_directory_exhash(struct gfs2_sbd *sbp, struct gfs2_inode *dip
leaf_block = be64_to_cpu(buf);
error = 0;
}
+ leaf_chain:
/* leaf blocks may be repeated, so skip the duplicates: */
if (leaf_block == prev_leaf_block) /* same block? */
continue; /* already converted */
+
prev_leaf_block = leaf_block;
/* read the leaf buffer in */
error = gfs2_get_leaf(dip, leaf_block, &bh_leaf);
@@ -1362,6 +1248,11 @@ static int fix_one_directory_exhash(struct gfs2_sbd *sbp, struct gfs2_inode *dip
brelse(bh_leaf);
if (dentmod && error == -EISDIR) /* dentmod was marked DT_DIR, break out */
break;
+ if (leaf.lf_next) { /* leaf has a leaf chain, process leaves in chain */
+ leaf_block = leaf.lf_next;
+ error = 0;
+ goto leaf_chain;
+ }
} /* for leaf_num */
return 0;
}/* fix_one_directory_exhash */
@@ -1400,6 +1291,10 @@ static int fix_directory_info(struct gfs2_sbd *sbp, osi_list_t *dir_to_fix)
osi_list_t *tmp, *fix;
struct inode_block *dir_iblk;
uint64_t offset, dirblock;
+ uint32_t gfs1_inptrs = sbp->sd_inptrs;
+ /* Directory inodes have been converted to gfs2, use gfs2 inptrs */
+ sbp->sd_inptrs = (sbp->bsize - sizeof(struct gfs2_meta_header))
+ / sizeof(uint64_t);
dirs_fixed = 0;
dirents_fixed = 0;
@@ -1430,6 +1325,7 @@ static int fix_directory_info(struct gfs2_sbd *sbp, osi_list_t *dir_to_fix)
osi_list_del(tmp);
free(tmp);
}
+ sbp->sd_inptrs = gfs1_inptrs;
return 0;
}/* fix_directory_info */
13 years, 7 months
resource-agents: master - overall cleanup and make ready for release
by Fabio M. Di Nitto
Gitweb: http://git.fedorahosted.org/git/resource-agents.git?p=resource-agents.git...
Commit: 487517a0935e80dbf9a1c01592c63dd5220563ac
Parent: aeb92be8857566888b238eba2295e306daa46d74
Author: Fabio M. Di Nitto <fdinitto(a)redhat.com>
AuthorDate: Fri Nov 5 05:55:27 2010 -0400
Committer: Fabio M. Di Nitto <fdinitto(a)redhat.com>
CommitterDate: Fri Nov 5 05:55:27 2010 -0400
overall cleanup and make ready for release
Signed-off-by: Fabio M. Di Nitto <fdinitto(a)redhat.com>
---
.gitignore | 2 +
Makefile.am | 7 +--
configure.ac | 18 -----
doc/Makefile.am | 6 +--
doc/gfs2.txt | 45 -------------
doc/journaling.txt | 155 ---------------------------------------------
doc/min-gfs.txt | 159 ----------------------------------------------
doc/usage.txt | 177 ----------------------------------------------------
8 files changed, 5 insertions(+), 564 deletions(-)
diff --git a/.gitignore b/.gitignore
index eeb5815..476babe 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,8 +5,10 @@ autoconf
autoheader
autom4te.cache
automake
+autoscan.log
compile
configure
+configure.scan
config.guess
config.log
config.sub
diff --git a/Makefile.am b/Makefile.am
index a570954..0a878ef 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -3,7 +3,8 @@ EXTRA_DIST = autogen.sh
AUTOMAKE_OPTIONS = foreign
MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure \
- missing install-sh
+ missing install-sh autoscan.log \
+ configure.scan
noinst_HEADERS = make/copyright.cf
@@ -11,10 +12,6 @@ SUBDIRS = rgmanager doc
install-exec-local:
$(INSTALL) -d $(DESTDIR)/$(LOGDIR)
- $(INSTALL) -d $(DESTDIR)/$(CLUSTERVARRUN)
- $(INSTALL) -d $(DESTDIR)/$(CLUSTERVARLIB)
uninstall-local:
rmdir $(DESTDIR)/$(LOGDIR) || :;
- rmdir $(DESTDIR)/$(CLUSTERVARRUN) || :;
- rmdir $(DESTDIR)/$(CLUSTERVARLIB) || :;
diff --git a/configure.ac b/configure.ac
index 29a92a5..b863d5b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -48,32 +48,14 @@ AC_ARG_ENABLE([debug],
[ --enable-debug enable debug build. ],
[ default="no" ])
-AC_ARG_WITH([default-config-dir],
- [ --default-config-dir=DIR
- cluster config directory. ],
- [ DEFAULT_CONFIG_DIR="$withval" ],
- [ DEFAULT_CONFIG_DIR="$sysconfdir/cluster" ])
-
-AC_ARG_WITH([default-config-file],
- [ --default-config-file=FILE
- cluster config file. ],
- [ DEFAULT_CONFIG_FILE="$withval" ],
- [ DEFAULT_CONFIG_FILE="cluster.conf" ])
-
## random vars
LOGDIR=${localstatedir}/log/cluster
-CLUSTERVARRUN=${localstatedir}/run/cluster
-CLUSTERVARLIB=${localstatedir}/lib/cluster
CLUSTERDATA=${datadir}/cluster
## do subst
-AC_SUBST([DEFAULT_CONFIG_DIR])
-AC_SUBST([DEFAULT_CONFIG_FILE])
AC_SUBST([LOGDIR])
-AC_SUBST([CLUSTERVARRUN])
-AC_SUBST([CLUSTERVARLIB])
AC_SUBST([CLUSTERDATA])
AC_CONFIG_FILES([Makefile
diff --git a/doc/Makefile.am b/doc/Makefile.am
index 2e6a2ec..13035fc 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,10 +1,6 @@
MAINTAINERCLEANFILES = Makefile.in
-dist_doc_DATA = gfs2.txt \
- journaling.txt \
- min-gfs.txt \
- usage.txt \
- COPYING.applications \
+dist_doc_DATA = COPYING.applications \
COPYING.libraries \
COPYRIGHT \
README.licence
diff --git a/doc/gfs2.txt b/doc/gfs2.txt
deleted file mode 100644
index 88f0143..0000000
--- a/doc/gfs2.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Global File System
-------------------
-
-http://sources.redhat.com/cluster/
-
-GFS is a cluster file system. It allows a cluster of computers to
-simultaneously use a block device that is shared between them (with FC,
-iSCSI, NBD, etc). GFS reads and writes to the block device like a local
-file system, but also uses a lock module to allow the computers coordinate
-their I/O so file system consistency is maintained. One of the nifty
-features of GFS is perfect consistency -- changes made to the file system
-on one machine show up immediately on all other machines in the cluster.
-
-GFS uses interchangable inter-node locking mechanisms. Different lock
-modules can plug into GFS and each file system selects the appropriate
-lock module at mount time. Lock modules include:
-
- lock_nolock -- does no real locking and allows gfs to be used as a
- local file system
-
- lock_dlm -- uses a distributed lock manager (dlm) for inter-node locking
- The dlm is found at linux/fs/dlm/
-
-In addition to interfacing with an external locking manager, a gfs lock
-module is responsible for interacting with external cluster management
-systems. Lock_dlm depends on user space cluster management systems found
-at the URL above.
-
-To use gfs as a local file system, no external clustering systems are
-needed, simply:
-
- $ gfs2_mkfs -p lock_nolock -j 1 /dev/block_device
- $ mount -t gfs2 /dev/block_device /dir
-
-GFS2 is not on-disk compatible with previous versions of GFS.
-
-The following man pages can be found at the URL above:
- gfs2_mkfs to make a filesystem
- gfs2_fsck to repair a filesystem
- gfs2_grow to expand a filesystem online
- gfs2_jadd to add journals to a filesystem online
- gfs2_tool to manipulate, examine and tune a filesystem
- gfs2_quota to examine and change quota values in a filesystem
- mount.gfs2 to find mount options
-
diff --git a/doc/journaling.txt b/doc/journaling.txt
deleted file mode 100644
index e89eefa..0000000
--- a/doc/journaling.txt
+++ /dev/null
@@ -1,155 +0,0 @@
-o Journaling & Replay
-
-The fundamental problem with a journaled cluster filesystem is
-handling journal replay with multiple journals. A single block of
-metadata can be modified sequentially by many different nodes in the
-cluster. As the block is modified by each node, it gets logged in the
-journal for each node. If care is not taken, it's possible to get
-into a situation where a journal replay can actually corrupt a
-filesystem. The error scenario is:
-
-1) Node A modifies a metadata block by putting a updated copy into its
- incore log.
-2) Node B wants to read and modify the block so it requests the lock
- and a blocking callback is sent to Node A.
-3) Node A flushes its incore log to disk, and then syncs out the
- metadata block to its inplace location.
-4) Node A then releases the lock.
-5) Node B reads in the block and puts a modified copy into its ondisk
- log and then the inplace block location.
-6) Node A crashes.
-
-At this point, Node A's journal needs to be replayed. Since there is
-a newer version of block inplace, if that block is replayed, the
-filesystem will be corrupted. There are a few different ways of
-avoiding this problem.
-
-1) Generation Numbers (GFS1)
-
- Each metadata block has header in it that contains a 64-bit
- generation number. As each block is logged into a journal, the
- generation number is incremented. This provides a strict ordering
- of the different versions of the block a they are logged in the FS'
- different journals. When journal replay happens, each block in the
- journal is not replayed if generation number in the journal is less
- than the generation number in place. This ensures that a newer
- version of a block is never replaced with an older version. So,
- this solution basically allows multiple copies of the same block in
- different journals, but it allows you to always know which is the
- correct one.
-
- Pros:
-
- A) This method allows the fastest callbacks. To release a lock,
- the incore log for the lock must be flushed and then the inplace
- data and metadata must be synced. That's it. The sync
- operations involved are: start the log body and wait for it to
- become stable on the disk, synchronously write the commit block,
- start the inplace metadata and wait for it to become stable on
- the disk.
-
- Cons:
-
- A) Maintaining the generation numbers is expensive. All newly
- allocated metadata block must be read off the disk in order to
- figure out what the previous value of the generation number was.
- When deallocating metadata, extra work and care must be taken to
- make sure dirty data isn't thrown away in such a way that the
- generation numbers stop doing their thing.
- B) You can't continue to modify the filesystem during journal
- replay. Basically, replay of a block is a read-modify-write
- operation: the block is read from disk, the generation number is
- compared, and (maybe) the new version is written out. Replay
- requires that the R-M-W operation is atomic with respect to
- other R-M-W operations that might be happening (say by a normal
- I/O process). Since journal replay doesn't (and can't) play by
- the normal metadata locking rules, you can't count on them to
- protect replay. Hence GFS1, quieces all writes on a filesystem
- before starting replay. This provides the mutual exclusion
- required, but it's slow and unnecessarily interrupts service on
- the whole cluster.
-
-2) Total Metadata Sync (OCFS2)
-
- This method is really simple in that it uses exactly the same
- infrastructure that a local journaled filesystem uses. Every time
- a node receives a callback, it stops all metadata modification,
- syncs out the whole incore journal, syncs out any dirty data, marks
- the journal as being clean (unmounted), and then releases the lock.
- Because journal is marked as clean and recovery won't look at any
- of the journaled blocks in it, a valid copy of any particular block
- only exists in one journal at a time and that journal always the
- journal who modified it last.
-
- Pros:
-
- A) Very simple to implement.
- B) You can reuse journaling code from other places (such as JBD).
- C) No quiece necessary for replay.
- D) No need for generation numbers sprinkled throughout the metadata.
-
- Cons:
-
- A) This method has the slowest possible callbacks. The sync
- operations are: stop all metadata operations, start and wait for
- the log body, write the log commit block, start and wait for all
- the FS' dirty metadata, write an unmount block. Writing the
- metadata for the whole filesystem can be particularly expensive
- because it can be scattered all over the disk and there can be a
- whole journal's worth of it.
-
-3) Revocation of a lock's buffers (GFS2)
-
- This method prevents a block from appearing in more than one
- journal by canceling out the metadata blocks in the journal that
- belong to the lock being released. Journaling works very similarly
- to a local filesystem or to #2 above.
-
- The biggest difference is you have to keep track of buffers in the
- active region of the ondisk journal, even after the inplace blocks
- have been written back. This is done in GFS2 by adding a second
- part to the Active Items List. The first part (in GFS2 called
- AIL1) contains a list of all the blocks which have been logged to
- the journal, but not written back to their inplace location. Once
- an item in AIL1 has been written back to its inplace location, it
- is moved to AIL2. Once the tail of the log moves past the block's
- transaction in the log, it can be removed from AIL2.
-
- When a callback occurs, the log is flushed to the disk and the
- metadata for the lock is synced to disk. At this point, any
- metadata blocks for the lock that are in the current active region
- of the log will be in the AIL2 list. We then build a transaction
- that contains revoke tags for each buffer in the AIL2 list that
- belongs to that lock.
-
- Pros:
-
- A) No quiece necessary for Replay
- B) No need for generation numbers sprinkled throughout the
- metadata.
- C) The sync operations are: stop all metadata operations, start and
- wait for the log body, write the log commit block, start and
- wait for all the FS' dirty metadata, start and wait for the log
- body of a transaction that revokes any of the lock's metadata
- buffers in the journal's active region, and write the commit
- block for that transaction.
-
- Cons:
-
- A) Recovery takes two passes, one to find all the revoke tags in
- the log and one to replay the metadata blocks using the revoke
- tags as a filter. This is necessary for a local filesystem and
- the total sync method, too. It's just that there will probably
- be more tags.
-
-Comparing #2 and #3, both do extra I/O during a lock callback to make
-sure that any metadata blocks in the log for that lock will be
-removed. I believe #2 will be slower because syncing out all the
-dirty metadata for entire filesystem requires lots of little,
-scattered I/O across the whole disk. The extra I/O done by #3 is a
-log write to the disk. So, not only should it be less I/O, but it
-should also be better suited to get good performance out of the disk
-subsystem.
-
-KWP 07/06/05
-
diff --git a/doc/min-gfs.txt b/doc/min-gfs.txt
deleted file mode 100644
index af1399c..0000000
--- a/doc/min-gfs.txt
+++ /dev/null
@@ -1,159 +0,0 @@
-
-Minimum GFS HowTo
------------------
-
-The following gfs configuration requires a minimum amount of hardware and
-no expensive storage system. It's the cheapest and quickest way to "play"
-with gfs.
-
-
- ---------- ----------
- | GNBD | | GNBD |
- | client | | client | <-- these nodes use gfs
- | node2 | | node3 |
- ---------- ----------
- | |
- ------------------ IP network
- |
- ----------
- | GNBD |
- | server | <-- this node doesn't use gfs
- | node1 |
- ----------
-
-- There are three machines to use with hostnames: node1, node2, node3
-
-- node1 has an extra disk /dev/sda1 to use for gfs
- (this could be hda1 or an lvm LV or an md device)
-
-- node1 will use gnbd to export this disk to node2 and node3
-
-- Node1 cannot use gfs, it only acts as a gnbd server.
- (Node1 will /not/ actually be part of the cluster since it is only
- running the gnbd server.)
-
-- Only node2 and node3 will be in the cluster and use gfs.
- (A two-node cluster is a special case for cman, noted in the config below.)
-
-- There's not much point to using clvm in this setup so it's left out.
-
-- Download the "cluster" source tree.
-
-- Build and install from the cluster source tree. (The kernel components
- are not required on node1 which will only need the gnbd_serv program.)
-
- cd cluster
- ./configure --kernel_src=/path/to/kernel
- make; make install
-
-- Create /etc/cluster/cluster.conf on node2 with the following contents:
-
-<?xml version="1.0"?>
-<cluster name="gamma" config_version="1">
-
-<cman two_node="1" expected_votes="1">
-</cman>
-
-<clusternodes>
-<clusternode name="node2">
- <fence>
- <method name="single">
- <device name="gnbd" ipaddr="node2"/>
- </method>
- </fence>
-</clusternode>
-
-<clusternode name="node3">
- <fence>
- <method name="single">
- <device name="gnbd" ipaddr="node3"/>
- </method>
- </fence>
-</clusternode>
-</clusternodes>
-
-<fencedevices>
- <fencedevice name="gnbd" agent="fence_gnbd" servers="node1"/>
-</fencedevices>
-
-</cluster>
-
-
-- load kernel modules on nodes
-
-node2 and node3> modprobe gnbd
-node2 and node3> modprobe gfs
-node2 and node3> modprobe lock_dlm
-
-- run the following commands
-
-node1> gnbd_serv -n
-node1> gnbd_export -c -d /dev/sda1 -e global_disk
-
-node2 and node3> gnbd_import -n -i node1
-node2 and node3> ccsd
-node2 and node3> cman_tool join
-node2 and node3> fence_tool join
-
-node2> gfs_mkfs -p lock_dlm -t gamma:gfs1 -j 2 /dev/gnbd/global_disk
-
-node2 and node3> mount -t gfs /dev/gnbd/global_disk /mnt
-
-- the end, you now have a gfs file system mounted on node2 and node3
-
-
-Appendix A
-----------
-
-To use manual fencing instead of gnbd fencing, the cluster.conf file
-would look like this:
-
-<?xml version="1.0"?>
-<cluster name="gamma" config_version="1">
-
-<cman two_node="1" expected_votes="1">
-</cman>
-
-<clusternodes>
-<clusternode name="node2">
- <fence>
- <method name="single">
- <device name="manual" ipaddr="node2"/>
- </method>
- </fence>
-</clusternode>
-
-<clusternode name="node3">
- <fence>
- <method name="single">
- <device name="manual" ipaddr="node3"/>
- </method>
- </fence>
-</clusternode>
-</clusternodes>
-
-<fencedevices>
- <fencedevice name="manual" agent="fence_manual"/>
-</fencedevices>
-
-</cluster>
-
-
-FAQ
----
-
-- Why can't node3 use gfs, too?
-
-You might be able to make it work, but we recommend that you not try.
-This software was not intended or designed to allow that kind of usage.
-
-- Isn't node3 a single point of failure? how do I avoid that?
-
-Yes it is. For the time being, there's no way to avoid that, apart from
-not using gnbd, of course. Eventually, there will be a way to avoid this
-using cluster mirroring.
-
-- More info from
- http://sources.redhat.com/cluster/gnbd/gnbd_usage.txt
- http://sources.redhat.com/cluster/doc/usage.txt
-
diff --git a/doc/usage.txt b/doc/usage.txt
deleted file mode 100644
index f9e2866..0000000
--- a/doc/usage.txt
+++ /dev/null
@@ -1,177 +0,0 @@
-How to install and run GFS.
-
-Refer to the cluster project page for the latest information.
-http://sources.redhat.com/cluster/
-
-
-Install
--------
-
-Install a Linux kernel with GFS2, DLM, configfs, IPV6 and SCTP,
- 2.6.23-rc1 or later
-
- If you want to use gfs1 (from cluster/gfs-kernel), then you need to
- export three additional symbols from gfs2 by adding the following lines
- to the end of linux/fs/gfs2/locking.c:
- EXPORT_SYMBOL_GPL(gfs2_unmount_lockproto);
- EXPORT_SYMBOL_GPL(gfs2_mount_lockproto);
- EXPORT_SYMBOL_GPL(gfs2_withdraw_lockproto);
-
-Install openais
- get the latest "whitetank" (stable) release from
- http://openais.org/
- or
- svn checkout http://svn.osdl.org/openais
- cd openais/branches/whitetank
- make; make install DESTDIR=/
-
-Install gfs/dlm/fencing/etc components
- get the latest cluster-2.xx.yy tarball from
- ftp://sources.redhat.com/pub/cluster/
- or
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/cluster login cvs
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/cluster checkout cluster
- the password is "cvs"
- cd cluster
- ./configure --kernel_src=/path/to/kernel
- make install
-
- NOTE: On 64-bit systems, you will usually need to add '--libdir=/usr/lib64'
- to the configure line.
-
-Install LVM2/CLVM (optional)
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/lvm2 login cvs
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/lvm2 checkout LVM2
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/lvm2
- the password is "cvs"
- cd LVM2
- ./configure --with-clvmd=cman --with-cluster=shared
- make; make install
-
- NOTE: On 64-bit systems, you will usually need to add '--libdir=/usr/lib64'
- to the configure line.
-
-Load kernel modules
--------------------
-
-modprobe gfs2
-modprobe gfs
-modprobe lock_dlm
-modprobe lock_nolock
-modprobe dlm
-
-
-Configuration
--------------
-
-Create /etc/cluster/cluster.conf and copy it to all nodes.
-
- The format and content of cluster.conf has changed little since the
- last generation of the software. See old example here:
- http://sources.redhat.com/cluster/doc/usage.txt
- The one change you will need to make is to add nodeids for all nodes
- in the cluster. These are now mandatory. eg:
-
- <clusternode name="node12.mycluster.mycompany.com" votes="1" nodeid="12">
-
- If you already have a cluster.conf file with no nodeids in it, then you can
- use the 'ccs_tool addnodeids' command to add them.
-
-
-Example cluster.conf
---------------------
-
-This is a basic cluster.conf file that requires manual fencing. The node
-names should resolve to the address on the network interface you want to
-use for openais/cman/dlm communication.
-
-<?xml version="1.0"?>
-<cluster name="alpha" config_version="1">
-
-<clusternodes>
-<clusternode name="node01" nodeid="1">
- <fence>
- </fence>
-</clusternode>
-
-<clusternode name="node02" nodeid="2">
- <fence>
- </fence>
-</clusternode>
-
-<clusternode name="node03" nodeid="3">
- <fence>
- </fence>
-</clusternode>
-</clusternodes>
-
-<fencedevices>
-</fencedevices>
-
-</cluster>
-
-
-Startup procedure
------------------
-
-Run these commands on each cluster node:
-
-> mount -t configfs none /sys/kernel/config
-> ccsd
-> cman_tool join
-> groupd
-> fenced
-> fence_tool join
-> dlm_controld
-> gfs_controld
-> clvmd (optional)
-> mkfs -t gfs2 -p lock_dlm -t <clustername>:<fsname> -j <#journals> <blockdev>
-> mount -t gfs2 [-v] <blockdev> <mountpoint>
-
-Notes:
-- replace "gfs2" with "gfs" above to use gfs1 instead of gfs2
-- <clustername> in mkfs should match the one in cluster.conf.
-- <fsname> in mkfs is any name you pick, each fs must have a different name.
-- <#journals> in mkfs should be greater than or equal to the number of nodes
- that you want to mount this fs, each node uses a separate journal.
-- To avoid unnecessary fencing when starting the cluster, it's best for
- all nodes to join the cluster (complete cman_tool join) before any
- of them do fence_tool join.
-- The cman_tool "status" and "nodes" options show the status and members
- of the cluster.
-- The group_tool command shows the status of fencing, dlm and gfs groups
- that the local node is part of.
-- The "cman" init script can be used for starting everything up through
- gfs_controld in the list above.
-
-
-Shutdown procedure
-------------------
-
-Run these commands on each cluster node:
-
-> umount [-v] <mountpoint>
-> fence_tool leave
-> cman_tool leave
-
-
-Converting from GFS1 to GFS2
-----------------------------
-
-If you have GFS1 filesystems that you need to convert to GFS2, follow
-this procedure:
-
-1. Back up your entire filesystem first.
- e.g. cp /dev/your_vg/lvol0 /your_gfs_backup
-
-2. Run fsck to ensure filesystem integrity.
- e.g. gfs2_fsck /dev/your_vg/lvol0
-
-3. Make sure the filesystem is not mounted from any node.
- e.g. for i in `grep "<clusternode name" /etc/cluster/cluster.conf | cut -d '"' -f2` ; do ssh $i "mount | grep gfs" ; done
-
-4. Make sure you have the latest software versions.
-
-5. Run gfs2_convert <blockdev> from one of the nodes.
- e.g. gfs2_convert /dev/your_vg/lvol0
-
13 years, 7 months
fence-agents: master - overall cleanup and make ready for release
by Fabio M. Di Nitto
Gitweb: http://git.fedorahosted.org/git/fence-agents.git?p=fence-agents.git;a=com...
Commit: 79336e5ef88fc33f40306466e48fafca9532d452
Parent: c3491b707166d877037d0f65ce7117396398d6af
Author: Fabio M. Di Nitto <fdinitto(a)redhat.com>
AuthorDate: Fri Nov 5 05:43:37 2010 -0400
Committer: Fabio M. Di Nitto <fdinitto(a)redhat.com>
CommitterDate: Fri Nov 5 05:43:37 2010 -0400
overall cleanup and make ready for release
Signed-off-by: Fabio M. Di Nitto <fdinitto(a)redhat.com>
---
.gitignore | 2 +
Makefile.am | 5 +-
configure.ac | 32 +------
doc/Makefile.am | 6 +-
doc/gfs2.txt | 45 --------
doc/journaling.txt | 155 ----------------------------
doc/min-gfs.txt | 159 ----------------------------
doc/usage.txt | 177 --------------------------------
fence/agents/node_assassin/Makefile.am | 2 +-
make/fenceman.mk | 2 +-
10 files changed, 9 insertions(+), 576 deletions(-)
diff --git a/.gitignore b/.gitignore
index d5dadff..cd80ece 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,8 +5,10 @@ autoconf
autoheader
autom4te.cache
automake
+autoscan.log
compile
configure
+configure.scan
config.guess
config.log
config.sub
diff --git a/Makefile.am b/Makefile.am
index 583ff0e..7fb4d60 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -6,7 +6,8 @@ MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure depcomp \
config.guess config.sub missing install-sh \
autoheader automake autoconf libtool libtoolize \
ltmain.sh compile make/clusterautoconfig.h.in \
- make/clusterautoconfig.h.in~
+ make/clusterautoconfig.h.in~ autoscan.log \
+ configure.scan
noinst_HEADERS = make/copyright.cf
@@ -17,12 +18,10 @@ SUBDIRS = fence doc
install-exec-local:
$(INSTALL) -d $(DESTDIR)/$(LOGDIR)
$(INSTALL) -d $(DESTDIR)/$(CLUSTERVARRUN)
- $(INSTALL) -d $(DESTDIR)/$(CLUSTERVARLIB)
uninstall-local:
rmdir $(DESTDIR)/$(LOGDIR) || :;
rmdir $(DESTDIR)/$(CLUSTERVARRUN) || :;
- rmdir $(DESTDIR)/$(CLUSTERVARLIB) || :;
maintainer-clean-local:
rm -rf m4
diff --git a/configure.ac b/configure.ac
index 3b70cfe..6f7c6df 100644
--- a/configure.ac
+++ b/configure.ac
@@ -52,6 +52,7 @@ AM_PROG_CC_C_O
AC_PROG_LN_S
AC_PROG_INSTALL
AC_PROG_MAKE_SET
+AC_PROG_AWK
## local helper functions
@@ -81,22 +82,14 @@ PKG_CHECK_MODULES([nss],[nss])
PKG_CHECK_MODULES([nspr],[nspr])
# Checks for header files.
-AC_CHECK_HEADERS([arpa/inet.h fcntl.h libintl.h limits.h malloc.h netdb.h netinet/in.h stddef.h stdint.h stdlib.h string.h sys/ioctl.h sys/socket.h sys/time.h syslog.h unistd.h])
+AC_CHECK_HEADERS([arpa/inet.h fcntl.h libintl.h limits.h stddef.h sys/socket.h sys/time.h syslog.h])
# Checks for typedefs, structures, and compiler characteristics.
-AC_C_INLINE
-AC_TYPE_INT32_T
-AC_TYPE_PID_T
AC_TYPE_SIZE_T
AC_TYPE_SSIZE_T
-AC_TYPE_UINT16_T
-AC_TYPE_UINT32_T
-AC_TYPE_UINT64_T
-AC_TYPE_UINT8_T
# Checks for library functions.
AC_FUNC_FORK
-AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK
AC_FUNC_MALLOC
AC_CHECK_FUNCS([alarm atexit bzero dup2 memmove memset select socket strcasecmp strchr strdup strerror strtol])
@@ -105,18 +98,6 @@ AC_ARG_ENABLE([debug],
[ --enable-debug enable debug build. ],
[ default="no" ])
-AC_ARG_WITH([syslogfacility],
- [ --syslogfacility=FACILITY
- cluster default syslog facility. ],
- [ SYSLOGFACILITY="$withval" ],
- [ SYSLOGFACILITY="LOG_LOCAL4" ])
-
-AC_ARG_WITH([sysloglevel],
- [ --sysloglevel=LEVEL
- cluster default syslog level. ],
- [ SYSLOGLEVEL="$withval" ],
- [ SYSLOGLEVEL="LOG_INFO" ])
-
AC_ARG_WITH([fenceagentslibdir],
[ --fenceagentslibdir=PATH
installation path for fence library. ],
@@ -145,7 +126,6 @@ AC_ARG_WITH([default-config-file],
LOGDIR=${localstatedir}/log/cluster
CLUSTERVARRUN=${localstatedir}/run/cluster
-CLUSTERVARLIB=${localstatedir}/lib/cluster
## do subst
@@ -165,17 +145,9 @@ AC_SUBST([CLUSTERVARRUN])
AC_DEFINE_UNQUOTED([CLUSTERVARRUN], "$(eval echo ${CLUSTERVARRUN})",
[Default cluster var/run directory])
-AC_SUBST([CLUSTERVARLIB])
-
AC_SUBST([FENCEAGENTSLIBDIR])
AC_SUBST([SNMPBIN])
-AC_DEFINE_UNQUOTED([SYSLOGFACILITY], $(eval echo ${SYSLOGFACILITY}),
- [Default syslog facility])
-
-AC_DEFINE_UNQUOTED([SYSLOGLEVEL], $(eval echo ${SYSLOGLEVEL}),
- [Default syslog level])
-
## *FLAGS handling
ENV_CFLAGS="$CFLAGS"
diff --git a/doc/Makefile.am b/doc/Makefile.am
index 2e6a2ec..13035fc 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,10 +1,6 @@
MAINTAINERCLEANFILES = Makefile.in
-dist_doc_DATA = gfs2.txt \
- journaling.txt \
- min-gfs.txt \
- usage.txt \
- COPYING.applications \
+dist_doc_DATA = COPYING.applications \
COPYING.libraries \
COPYRIGHT \
README.licence
diff --git a/doc/gfs2.txt b/doc/gfs2.txt
deleted file mode 100644
index 88f0143..0000000
--- a/doc/gfs2.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Global File System
-------------------
-
-http://sources.redhat.com/cluster/
-
-GFS is a cluster file system. It allows a cluster of computers to
-simultaneously use a block device that is shared between them (with FC,
-iSCSI, NBD, etc). GFS reads and writes to the block device like a local
-file system, but also uses a lock module to allow the computers coordinate
-their I/O so file system consistency is maintained. One of the nifty
-features of GFS is perfect consistency -- changes made to the file system
-on one machine show up immediately on all other machines in the cluster.
-
-GFS uses interchangable inter-node locking mechanisms. Different lock
-modules can plug into GFS and each file system selects the appropriate
-lock module at mount time. Lock modules include:
-
- lock_nolock -- does no real locking and allows gfs to be used as a
- local file system
-
- lock_dlm -- uses a distributed lock manager (dlm) for inter-node locking
- The dlm is found at linux/fs/dlm/
-
-In addition to interfacing with an external locking manager, a gfs lock
-module is responsible for interacting with external cluster management
-systems. Lock_dlm depends on user space cluster management systems found
-at the URL above.
-
-To use gfs as a local file system, no external clustering systems are
-needed, simply:
-
- $ gfs2_mkfs -p lock_nolock -j 1 /dev/block_device
- $ mount -t gfs2 /dev/block_device /dir
-
-GFS2 is not on-disk compatible with previous versions of GFS.
-
-The following man pages can be found at the URL above:
- gfs2_mkfs to make a filesystem
- gfs2_fsck to repair a filesystem
- gfs2_grow to expand a filesystem online
- gfs2_jadd to add journals to a filesystem online
- gfs2_tool to manipulate, examine and tune a filesystem
- gfs2_quota to examine and change quota values in a filesystem
- mount.gfs2 to find mount options
-
diff --git a/doc/journaling.txt b/doc/journaling.txt
deleted file mode 100644
index e89eefa..0000000
--- a/doc/journaling.txt
+++ /dev/null
@@ -1,155 +0,0 @@
-o Journaling & Replay
-
-The fundamental problem with a journaled cluster filesystem is
-handling journal replay with multiple journals. A single block of
-metadata can be modified sequentially by many different nodes in the
-cluster. As the block is modified by each node, it gets logged in the
-journal for each node. If care is not taken, it's possible to get
-into a situation where a journal replay can actually corrupt a
-filesystem. The error scenario is:
-
-1) Node A modifies a metadata block by putting a updated copy into its
- incore log.
-2) Node B wants to read and modify the block so it requests the lock
- and a blocking callback is sent to Node A.
-3) Node A flushes its incore log to disk, and then syncs out the
- metadata block to its inplace location.
-4) Node A then releases the lock.
-5) Node B reads in the block and puts a modified copy into its ondisk
- log and then the inplace block location.
-6) Node A crashes.
-
-At this point, Node A's journal needs to be replayed. Since there is
-a newer version of block inplace, if that block is replayed, the
-filesystem will be corrupted. There are a few different ways of
-avoiding this problem.
-
-1) Generation Numbers (GFS1)
-
- Each metadata block has header in it that contains a 64-bit
- generation number. As each block is logged into a journal, the
- generation number is incremented. This provides a strict ordering
- of the different versions of the block a they are logged in the FS'
- different journals. When journal replay happens, each block in the
- journal is not replayed if generation number in the journal is less
- than the generation number in place. This ensures that a newer
- version of a block is never replaced with an older version. So,
- this solution basically allows multiple copies of the same block in
- different journals, but it allows you to always know which is the
- correct one.
-
- Pros:
-
- A) This method allows the fastest callbacks. To release a lock,
- the incore log for the lock must be flushed and then the inplace
- data and metadata must be synced. That's it. The sync
- operations involved are: start the log body and wait for it to
- become stable on the disk, synchronously write the commit block,
- start the inplace metadata and wait for it to become stable on
- the disk.
-
- Cons:
-
- A) Maintaining the generation numbers is expensive. All newly
- allocated metadata block must be read off the disk in order to
- figure out what the previous value of the generation number was.
- When deallocating metadata, extra work and care must be taken to
- make sure dirty data isn't thrown away in such a way that the
- generation numbers stop doing their thing.
- B) You can't continue to modify the filesystem during journal
- replay. Basically, replay of a block is a read-modify-write
- operation: the block is read from disk, the generation number is
- compared, and (maybe) the new version is written out. Replay
- requires that the R-M-W operation is atomic with respect to
- other R-M-W operations that might be happening (say by a normal
- I/O process). Since journal replay doesn't (and can't) play by
- the normal metadata locking rules, you can't count on them to
- protect replay. Hence GFS1, quieces all writes on a filesystem
- before starting replay. This provides the mutual exclusion
- required, but it's slow and unnecessarily interrupts service on
- the whole cluster.
-
-2) Total Metadata Sync (OCFS2)
-
- This method is really simple in that it uses exactly the same
- infrastructure that a local journaled filesystem uses. Every time
- a node receives a callback, it stops all metadata modification,
- syncs out the whole incore journal, syncs out any dirty data, marks
- the journal as being clean (unmounted), and then releases the lock.
- Because journal is marked as clean and recovery won't look at any
- of the journaled blocks in it, a valid copy of any particular block
- only exists in one journal at a time and that journal always the
- journal who modified it last.
-
- Pros:
-
- A) Very simple to implement.
- B) You can reuse journaling code from other places (such as JBD).
- C) No quiece necessary for replay.
- D) No need for generation numbers sprinkled throughout the metadata.
-
- Cons:
-
- A) This method has the slowest possible callbacks. The sync
- operations are: stop all metadata operations, start and wait for
- the log body, write the log commit block, start and wait for all
- the FS' dirty metadata, write an unmount block. Writing the
- metadata for the whole filesystem can be particularly expensive
- because it can be scattered all over the disk and there can be a
- whole journal's worth of it.
-
-3) Revocation of a lock's buffers (GFS2)
-
- This method prevents a block from appearing in more than one
- journal by canceling out the metadata blocks in the journal that
- belong to the lock being released. Journaling works very similarly
- to a local filesystem or to #2 above.
-
- The biggest difference is you have to keep track of buffers in the
- active region of the ondisk journal, even after the inplace blocks
- have been written back. This is done in GFS2 by adding a second
- part to the Active Items List. The first part (in GFS2 called
- AIL1) contains a list of all the blocks which have been logged to
- the journal, but not written back to their inplace location. Once
- an item in AIL1 has been written back to its inplace location, it
- is moved to AIL2. Once the tail of the log moves past the block's
- transaction in the log, it can be removed from AIL2.
-
- When a callback occurs, the log is flushed to the disk and the
- metadata for the lock is synced to disk. At this point, any
- metadata blocks for the lock that are in the current active region
- of the log will be in the AIL2 list. We then build a transaction
- that contains revoke tags for each buffer in the AIL2 list that
- belongs to that lock.
-
- Pros:
-
- A) No quiece necessary for Replay
- B) No need for generation numbers sprinkled throughout the
- metadata.
- C) The sync operations are: stop all metadata operations, start and
- wait for the log body, write the log commit block, start and
- wait for all the FS' dirty metadata, start and wait for the log
- body of a transaction that revokes any of the lock's metadata
- buffers in the journal's active region, and write the commit
- block for that transaction.
-
- Cons:
-
- A) Recovery takes two passes, one to find all the revoke tags in
- the log and one to replay the metadata blocks using the revoke
- tags as a filter. This is necessary for a local filesystem and
- the total sync method, too. It's just that there will probably
- be more tags.
-
-Comparing #2 and #3, both do extra I/O during a lock callback to make
-sure that any metadata blocks in the log for that lock will be
-removed. I believe #2 will be slower because syncing out all the
-dirty metadata for entire filesystem requires lots of little,
-scattered I/O across the whole disk. The extra I/O done by #3 is a
-log write to the disk. So, not only should it be less I/O, but it
-should also be better suited to get good performance out of the disk
-subsystem.
-
-KWP 07/06/05
-
diff --git a/doc/min-gfs.txt b/doc/min-gfs.txt
deleted file mode 100644
index af1399c..0000000
--- a/doc/min-gfs.txt
+++ /dev/null
@@ -1,159 +0,0 @@
-
-Minimum GFS HowTo
------------------
-
-The following gfs configuration requires a minimum amount of hardware and
-no expensive storage system. It's the cheapest and quickest way to "play"
-with gfs.
-
-
- ---------- ----------
- | GNBD | | GNBD |
- | client | | client | <-- these nodes use gfs
- | node2 | | node3 |
- ---------- ----------
- | |
- ------------------ IP network
- |
- ----------
- | GNBD |
- | server | <-- this node doesn't use gfs
- | node1 |
- ----------
-
-- There are three machines to use with hostnames: node1, node2, node3
-
-- node1 has an extra disk /dev/sda1 to use for gfs
- (this could be hda1 or an lvm LV or an md device)
-
-- node1 will use gnbd to export this disk to node2 and node3
-
-- Node1 cannot use gfs, it only acts as a gnbd server.
- (Node1 will /not/ actually be part of the cluster since it is only
- running the gnbd server.)
-
-- Only node2 and node3 will be in the cluster and use gfs.
- (A two-node cluster is a special case for cman, noted in the config below.)
-
-- There's not much point to using clvm in this setup so it's left out.
-
-- Download the "cluster" source tree.
-
-- Build and install from the cluster source tree. (The kernel components
- are not required on node1 which will only need the gnbd_serv program.)
-
- cd cluster
- ./configure --kernel_src=/path/to/kernel
- make; make install
-
-- Create /etc/cluster/cluster.conf on node2 with the following contents:
-
-<?xml version="1.0"?>
-<cluster name="gamma" config_version="1">
-
-<cman two_node="1" expected_votes="1">
-</cman>
-
-<clusternodes>
-<clusternode name="node2">
- <fence>
- <method name="single">
- <device name="gnbd" ipaddr="node2"/>
- </method>
- </fence>
-</clusternode>
-
-<clusternode name="node3">
- <fence>
- <method name="single">
- <device name="gnbd" ipaddr="node3"/>
- </method>
- </fence>
-</clusternode>
-</clusternodes>
-
-<fencedevices>
- <fencedevice name="gnbd" agent="fence_gnbd" servers="node1"/>
-</fencedevices>
-
-</cluster>
-
-
-- load kernel modules on nodes
-
-node2 and node3> modprobe gnbd
-node2 and node3> modprobe gfs
-node2 and node3> modprobe lock_dlm
-
-- run the following commands
-
-node1> gnbd_serv -n
-node1> gnbd_export -c -d /dev/sda1 -e global_disk
-
-node2 and node3> gnbd_import -n -i node1
-node2 and node3> ccsd
-node2 and node3> cman_tool join
-node2 and node3> fence_tool join
-
-node2> gfs_mkfs -p lock_dlm -t gamma:gfs1 -j 2 /dev/gnbd/global_disk
-
-node2 and node3> mount -t gfs /dev/gnbd/global_disk /mnt
-
-- the end, you now have a gfs file system mounted on node2 and node3
-
-
-Appendix A
-----------
-
-To use manual fencing instead of gnbd fencing, the cluster.conf file
-would look like this:
-
-<?xml version="1.0"?>
-<cluster name="gamma" config_version="1">
-
-<cman two_node="1" expected_votes="1">
-</cman>
-
-<clusternodes>
-<clusternode name="node2">
- <fence>
- <method name="single">
- <device name="manual" ipaddr="node2"/>
- </method>
- </fence>
-</clusternode>
-
-<clusternode name="node3">
- <fence>
- <method name="single">
- <device name="manual" ipaddr="node3"/>
- </method>
- </fence>
-</clusternode>
-</clusternodes>
-
-<fencedevices>
- <fencedevice name="manual" agent="fence_manual"/>
-</fencedevices>
-
-</cluster>
-
-
-FAQ
----
-
-- Why can't node3 use gfs, too?
-
-You might be able to make it work, but we recommend that you not try.
-This software was not intended or designed to allow that kind of usage.
-
-- Isn't node3 a single point of failure? how do I avoid that?
-
-Yes it is. For the time being, there's no way to avoid that, apart from
-not using gnbd, of course. Eventually, there will be a way to avoid this
-using cluster mirroring.
-
-- More info from
- http://sources.redhat.com/cluster/gnbd/gnbd_usage.txt
- http://sources.redhat.com/cluster/doc/usage.txt
-
diff --git a/doc/usage.txt b/doc/usage.txt
deleted file mode 100644
index f9e2866..0000000
--- a/doc/usage.txt
+++ /dev/null
@@ -1,177 +0,0 @@
-How to install and run GFS.
-
-Refer to the cluster project page for the latest information.
-http://sources.redhat.com/cluster/
-
-
-Install
--------
-
-Install a Linux kernel with GFS2, DLM, configfs, IPV6 and SCTP,
- 2.6.23-rc1 or later
-
- If you want to use gfs1 (from cluster/gfs-kernel), then you need to
- export three additional symbols from gfs2 by adding the following lines
- to the end of linux/fs/gfs2/locking.c:
- EXPORT_SYMBOL_GPL(gfs2_unmount_lockproto);
- EXPORT_SYMBOL_GPL(gfs2_mount_lockproto);
- EXPORT_SYMBOL_GPL(gfs2_withdraw_lockproto);
-
-Install openais
- get the latest "whitetank" (stable) release from
- http://openais.org/
- or
- svn checkout http://svn.osdl.org/openais
- cd openais/branches/whitetank
- make; make install DESTDIR=/
-
-Install gfs/dlm/fencing/etc components
- get the latest cluster-2.xx.yy tarball from
- ftp://sources.redhat.com/pub/cluster/
- or
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/cluster login cvs
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/cluster checkout cluster
- the password is "cvs"
- cd cluster
- ./configure --kernel_src=/path/to/kernel
- make install
-
- NOTE: On 64-bit systems, you will usually need to add '--libdir=/usr/lib64'
- to the configure line.
-
-Install LVM2/CLVM (optional)
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/lvm2 login cvs
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/lvm2 checkout LVM2
- cvs -d :pserver:cvs@sources.redhat.com:/cvs/lvm2
- the password is "cvs"
- cd LVM2
- ./configure --with-clvmd=cman --with-cluster=shared
- make; make install
-
- NOTE: On 64-bit systems, you will usually need to add '--libdir=/usr/lib64'
- to the configure line.
-
-Load kernel modules
--------------------
-
-modprobe gfs2
-modprobe gfs
-modprobe lock_dlm
-modprobe lock_nolock
-modprobe dlm
-
-
-Configuration
--------------
-
-Create /etc/cluster/cluster.conf and copy it to all nodes.
-
- The format and content of cluster.conf has changed little since the
- last generation of the software. See old example here:
- http://sources.redhat.com/cluster/doc/usage.txt
- The one change you will need to make is to add nodeids for all nodes
- in the cluster. These are now mandatory. eg:
-
- <clusternode name="node12.mycluster.mycompany.com" votes="1" nodeid="12">
-
- If you already have a cluster.conf file with no nodeids in it, then you can
- use the 'ccs_tool addnodeids' command to add them.
-
-
-Example cluster.conf
---------------------
-
-This is a basic cluster.conf file that requires manual fencing. The node
-names should resolve to the address on the network interface you want to
-use for openais/cman/dlm communication.
-
-<?xml version="1.0"?>
-<cluster name="alpha" config_version="1">
-
-<clusternodes>
-<clusternode name="node01" nodeid="1">
- <fence>
- </fence>
-</clusternode>
-
-<clusternode name="node02" nodeid="2">
- <fence>
- </fence>
-</clusternode>
-
-<clusternode name="node03" nodeid="3">
- <fence>
- </fence>
-</clusternode>
-</clusternodes>
-
-<fencedevices>
-</fencedevices>
-
-</cluster>
-
-
-Startup procedure
------------------
-
-Run these commands on each cluster node:
-
-> mount -t configfs none /sys/kernel/config
-> ccsd
-> cman_tool join
-> groupd
-> fenced
-> fence_tool join
-> dlm_controld
-> gfs_controld
-> clvmd (optional)
-> mkfs -t gfs2 -p lock_dlm -t <clustername>:<fsname> -j <#journals> <blockdev>
-> mount -t gfs2 [-v] <blockdev> <mountpoint>
-
-Notes:
-- replace "gfs2" with "gfs" above to use gfs1 instead of gfs2
-- <clustername> in mkfs should match the one in cluster.conf.
-- <fsname> in mkfs is any name you pick, each fs must have a different name.
-- <#journals> in mkfs should be greater than or equal to the number of nodes
- that you want to mount this fs, each node uses a separate journal.
-- To avoid unnecessary fencing when starting the cluster, it's best for
- all nodes to join the cluster (complete cman_tool join) before any
- of them do fence_tool join.
-- The cman_tool "status" and "nodes" options show the status and members
- of the cluster.
-- The group_tool command shows the status of fencing, dlm and gfs groups
- that the local node is part of.
-- The "cman" init script can be used for starting everything up through
- gfs_controld in the list above.
-
-
-Shutdown procedure
-------------------
-
-Run these commands on each cluster node:
-
-> umount [-v] <mountpoint>
-> fence_tool leave
-> cman_tool leave
-
-
-Converting from GFS1 to GFS2
-----------------------------
-
-If you have GFS1 filesystems that you need to convert to GFS2, follow
-this procedure:
-
-1. Back up your entire filesystem first.
- e.g. cp /dev/your_vg/lvol0 /your_gfs_backup
-
-2. Run fsck to ensure filesystem integrity.
- e.g. gfs2_fsck /dev/your_vg/lvol0
-
-3. Make sure the filesystem is not mounted from any node.
- e.g. for i in `grep "<clusternode name" /etc/cluster/cluster.conf | cut -d '"' -f2` ; do ssh $i "mount | grep gfs" ; done
-
-4. Make sure you have the latest software versions.
-
-5. Run gfs2_convert <blockdev> from one of the nodes.
- e.g. gfs2_convert /dev/your_vg/lvol0
-
diff --git a/fence/agents/node_assassin/Makefile.am b/fence/agents/node_assassin/Makefile.am
index af7906b..9208523 100644
--- a/fence/agents/node_assassin/Makefile.am
+++ b/fence/agents/node_assassin/Makefile.am
@@ -37,7 +37,7 @@ $(TARGET).conf: $(TARGET).conf.in
cat $^ | sed \
-e 's#@''LOGDIR@#${LOGDIR}#g' \
-e 's#@''CONFDIR@#${DEFAULT_CONFIG_DIR}#g' \
- -e 's#@CONFFILE@#${DEFAULT_CONFIG_FILE}#g' \
+ -e 's#@''CONFFILE@#${DEFAULT_CONFIG_FILE}#g' \
> $@
$(TARGET).lib: $(TARGET).lib.in
diff --git a/make/fenceman.mk b/make/fenceman.mk
index 5da0a3a..e6df9f0 100644
--- a/make/fenceman.mk
+++ b/make/fenceman.mk
@@ -1,6 +1,6 @@
%.8: $(TARGET) $(top_srcdir)/fence/agents/lib/fence2man.xsl
set -e && \
- PYTHONPATH=$(top_srcdir)/fence/agents/lib:$(top_builddir)/fence/agents/lib \
+ PYTHONPATH=$(abs_srcdir)/../lib:$(abs_builddir)/../lib \
python $^ -o metadata > .$@.tmp && \
xsltproc $(top_srcdir)/fence/agents/lib/fence2man.xsl .$@.tmp > $@
13 years, 7 months
cluster: STABLE31 - build: drop fence-agents specific target
by Fabio M. Di Nitto
Gitweb: http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=...
Commit: d3fc38cdc8b14006c44ed9268a4c9e266218ef07
Parent: 9fe9d3725e864b51af24b702992a9a8d92385a3a
Author: Fabio M. Di Nitto <fdinitto(a)redhat.com>
AuthorDate: Fri Nov 5 05:16:04 2010 -0400
Committer: Fabio M. Di Nitto <fdinitto(a)redhat.com>
CommitterDate: Fri Nov 5 05:16:04 2010 -0400
build: drop fence-agents specific target
Signed-off-by: Fabio M. Di Nitto <fdinitto(a)redhat.com>
---
make/cobj.mk | 4 ----
1 files changed, 0 insertions(+), 4 deletions(-)
diff --git a/make/cobj.mk b/make/cobj.mk
index 03f1826..c018293 100644
--- a/make/cobj.mk
+++ b/make/cobj.mk
@@ -8,7 +8,3 @@
# used by rgmanager/src/daemons
%-noccs.o: $(S)/%.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(NOCCS_CFLAGS) -c -o $@ $<
-
-# used by fence/agents/xvm
-%-standalone.o: $(S)/%.c
- $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(STANDALONE_CFLAGS) -c -o $@ $<
13 years, 7 months
cluster: STABLE31 - dlm_controld.pcmk: drop last remaining bits
by Fabio M. Di Nitto
Gitweb: http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=...
Commit: 9fe9d3725e864b51af24b702992a9a8d92385a3a
Parent: 9b58afaa4700a7d03b941343fe74a5b1db113ac6
Author: Fabio M. Di Nitto <fdinitto(a)redhat.com>
AuthorDate: Fri Nov 5 05:14:19 2010 -0400
Committer: Fabio M. Di Nitto <fdinitto(a)redhat.com>
CommitterDate: Fri Nov 5 05:14:19 2010 -0400
dlm_controld.pcmk: drop last remaining bits
Signed-off-by: Fabio M. Di Nitto <fdinitto(a)redhat.com>
---
group/man/Makefile | 1 -
group/man/dlm_controld.pcmk.8 | 1 -
make/cobj.mk | 4 ----
3 files changed, 0 insertions(+), 6 deletions(-)
diff --git a/group/man/Makefile b/group/man/Makefile
index 9114676..68b0d90 100644
--- a/group/man/Makefile
+++ b/group/man/Makefile
@@ -1,6 +1,5 @@
MANTARGET= \
dlm_controld.8 \
- dlm_controld.pcmk.8 \
group_tool.8 \
groupd.8
diff --git a/group/man/dlm_controld.pcmk.8 b/group/man/dlm_controld.pcmk.8
deleted file mode 100644
index d69f41b..0000000
--- a/group/man/dlm_controld.pcmk.8
+++ /dev/null
@@ -1 +0,0 @@
-.so man8/dlm_controld.8
diff --git a/make/cobj.mk b/make/cobj.mk
index 4b12960..03f1826 100644
--- a/make/cobj.mk
+++ b/make/cobj.mk
@@ -5,10 +5,6 @@
%_lt.o: $(S)/%.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $<
-# used by group/dlm_controld
-%-pcmk.o: $(S)/%.c
- $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(PCMK_CFLAGS) -c -o $@ $<
-
# used by rgmanager/src/daemons
%-noccs.o: $(S)/%.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(NOCCS_CFLAGS) -c -o $@ $<
13 years, 7 months
dlm: master - build: drop cman/pacemaker configure options
by Fabio M. Di Nitto
Gitweb: http://git.fedorahosted.org/git/dlm.git?p=dlm.git;a=commitdiff;h=940d8896...
Commit: 940d8896eb1ef1056ea752ac45cb332314ca68ba
Parent: fc2ec0d6841352ccbd9fddddeaa17de44a65261b
Author: Fabio M. Di Nitto <fdinitto(a)redhat.com>
AuthorDate: Wed Nov 3 22:33:19 2010 +0100
Committer: Fabio M. Di Nitto <fdinitto(a)redhat.com>
CommitterDate: Wed Nov 3 22:33:19 2010 +0100
build: drop cman/pacemaker configure options
Signed-off-by: Fabio M. Di Nitto <fdinitto(a)redhat.com>
---
configure.ac | 38 ++++++---------------------------
group/dlm_controld/Makefile.am | 45 +++++++++++++++------------------------
2 files changed, 24 insertions(+), 59 deletions(-)
diff --git a/configure.ac b/configure.ac
index 488f3eb..f1c1ec2 100644
--- a/configure.ac
+++ b/configure.ac
@@ -52,6 +52,8 @@ AM_PROG_CC_C_O
AC_PROG_LN_S
AC_PROG_INSTALL
AC_PROG_MAKE_SET
+AC_PROG_CXX
+AC_PROG_RANLIB
## local helper functions
@@ -103,18 +105,6 @@ AC_ARG_WITH([sysloglevel],
[ SYSLOGLEVEL="$withval" ],
[ SYSLOGLEVEL="LOG_INFO" ])
-AC_ARG_ENABLE([pacemaker],
- [ --enable-pacemaker enable dlm_controld pacemaker build. ],
- [ default="no" ])
-
-AC_ARG_ENABLE([cman],
- [ --enable-cman enable dlm_controld cman build. ],,
- [ enable_cman="yes" ])
-
-if test "x${enable_pacemaker}" = xno && test "x${enable_cman}" = xno; then
- AC_MSG_ERROR([At least one dlm_controld build method (cman or pacemaker) has to be selected])
-fi
-
AC_ARG_WITH([kernel],
[ --with-kernel=path path to kernel source. ],
[ KERNEL_DIR="$withval" ],
@@ -126,22 +116,11 @@ PKG_CHECK_MODULES([corosync],[corosync])
PKG_CHECK_MODULES([cpg],[libcpg])
PKG_CHECK_MODULES([sackpt],[libSaCkpt])
PKG_CHECK_MODULES([logt],[liblogthread])
-
-if test "x${enable_pacemaker}" = xyes; then
- PKG_CHECK_MODULES([totempg],[libtotem_pg])
- PKG_CHECK_MODULES([xml],[libxml-2.0])
- PKG_CHECK_MODULES([glib],[glib-2.0])
- check_lib_no_libs cib cib_new
- check_lib_no_libs crmcluster crm_set_status_callback
- check_lib_no_libs crmcommon init_server_ipc_comms
-fi
-
-if test "x${enable_cman}" = xyes; then
- PKG_CHECK_MODULES([ccs],[libccs])
- PKG_CHECK_MODULES([cfg],[libcfg])
- PKG_CHECK_MODULES([quorum],[libquorum])
- PKG_CHECK_MODULES([fenced],[libfenced])
-fi
+PKG_CHECK_MODULES([ccs],[libccs])
+PKG_CHECK_MODULES([cfg],[libcfg])
+PKG_CHECK_MODULES([confdb],[libconfdb])
+PKG_CHECK_MODULES([quorum],[libquorum])
+PKG_CHECK_MODULES([fenced],[libfenced])
# external libs (no pkgconfig)
check_lib_no_libs pthread pthread_mutex_lock
@@ -213,9 +192,6 @@ AC_DEFINE_UNQUOTED([SYSLOGFACILITY], $(eval echo ${SYSLOGFACILITY}),
AC_DEFINE_UNQUOTED([SYSLOGLEVEL], $(eval echo ${SYSLOGLEVEL}),
[Default syslog level])
-AM_CONDITIONAL(ENABLE_PACEMAKER, test "x${enable_pacemaker}" = xyes)
-AM_CONDITIONAL(ENABLE_CMAN, test "x${enable_cman}" = xyes)
-
## *FLAGS handling
ENV_CFLAGS="$CFLAGS"
diff --git a/group/dlm_controld/Makefile.am b/group/dlm_controld/Makefile.am
index 8eb6186..a10e236 100644
--- a/group/dlm_controld/Makefile.am
+++ b/group/dlm_controld/Makefile.am
@@ -1,46 +1,35 @@
MAINTAINERCLEANFILES = Makefile.in
-sbin_PROGRAMS =
-
-if ENABLE_CMAN
-sbin_PROGRAMS += dlm_controld
-endif
+sbin_PROGRAMS = dlm_controld
noinst_HEADERS = config.h dlm_controld.h dlm_daemon.h
-shared_SOURCES = action.c cpg.c crc.c deadlock.c main.c \
- netlink.c plock.c
+dlm_controld_SOURCES = action.c cpg.c crc.c deadlock.c main.c \
+ netlink.c plock.c config.c member_cman.c \
+ logging.c
-shared_CPPFLAGS = -I$(top_srcdir)/dlm/libdlm \
+dlm_controld_CPPFLAGS = -I$(top_srcdir)/dlm/libdlm \
-I$(top_srcdir)/dlm/libdlmcontrol \
-I$(top_srcdir)/group/include
-shared_CFLAGS = $(logt_CFLAGS) $(cpg_CFLAGS) $(sackpt_CFLAGS)
-
-shared_LIBS = $(logt_LIBS) $(cpg_LIBS) $(sackpt_LIBS) -lpthread
-
-shared_LDADD = $(top_builddir)/dlm/libdlm/libdlm.la \
- $(top_builddir)/dlm/libdlmcontrol/libdlmcontrol.la
-
-if ENABLE_CMAN
-
-dlm_controld_SOURCES = $(shared_SOURCES) \
- config.c member_cman.c logging.c
-
-dlm_controld_CPPFLAGS = $(shared_CPPFLAGS)
-
-dlm_controld_CFLAGS = $(shared_CFLAGS) \
+dlm_controld_CFLAGS = $(logt_CFLAGS) \
+ $(cpg_CFLAGS) \
+ $(sackpt_CFLAGS) \
+ $(confdb_CFLAGS)
$(ccs_CFLAGS) \
$(fenced_CFLAGS) \
$(cfg_CFLAGS) \
$(quorum_CFLAGS)
-dlm_controld_LDFLAGS = $(shared_LIBS) \
+dlm_controld_LDFLAGS = $(logt_LIBS) \
+ $(cpg_LIBS) \
+ $(sackpt_LIBS) \
+ $(confdb_LIBS) \
$(ccs_LIBS) \
$(fenced_LIBS) \
$(cfg_LIBS) \
- $(quorum_LIBS)
+ $(quorum_LIBS) \
+ -lpthread
-dlm_controld_LDADD = $(shared_LDADD)
-
-endif
+dlm_controld_LDADD = $(top_builddir)/dlm/libdlm/libdlm.la \
+ $(top_builddir)/dlm/libdlmcontrol/libdlmcontrol.la
13 years, 7 months
cluster: STABLE31 - build: clean dlm_controld Makefile
by Fabio M. Di Nitto
Gitweb: http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=...
Commit: 9b58afaa4700a7d03b941343fe74a5b1db113ac6
Parent: 6502e1ed197687598872e6ac3af30e4a921ff21e
Author: Fabio M. Di Nitto <fdinitto(a)redhat.com>
AuthorDate: Tue Nov 2 21:21:27 2010 +0100
Committer: Fabio M. Di Nitto <fdinitto(a)redhat.com>
CommitterDate: Tue Nov 2 21:21:27 2010 +0100
build: clean dlm_controld Makefile
Signed-off-by: Fabio M. Di Nitto <fdinitto(a)redhat.com>
---
group/dlm_controld/Makefile | 38 +++++++++++++++++---------------------
1 files changed, 17 insertions(+), 21 deletions(-)
diff --git a/group/dlm_controld/Makefile b/group/dlm_controld/Makefile
index ee061cb..bb5c82e 100644
--- a/group/dlm_controld/Makefile
+++ b/group/dlm_controld/Makefile
@@ -1,7 +1,7 @@
include ../../make/defines.mk
TARGET = dlm_controld
-SBINDIRT = dlm_controld
+SBINDIRT = ${TARGET}
all: depends ${TARGET}
@@ -10,19 +10,17 @@ include $(OBJDIR)/make/clean.mk
include $(OBJDIR)/make/install.mk
include $(OBJDIR)/make/uninstall.mk
-SHAREDOBJS= action.o \
- cpg.o \
- crc.o \
- deadlock.o \
- main.o \
- netlink.o \
- plock.o \
- group.o
-
-OBJS= $(SHAREDOBJS) \
- config.o \
- member_cman.o \
- logging.o
+OBJS= action.o \
+ cpg.o \
+ crc.o \
+ deadlock.o \
+ main.o \
+ netlink.o \
+ plock.o \
+ group.o \
+ config.o \
+ member_cman.o \
+ logging.o
CFLAGS += -I${ccsincdir} -I${cmanincdir} -I${logtincdir}
CFLAGS += -I${dlmincdir} -I${dlmcontrolincdir}
@@ -32,21 +30,19 @@ CFLAGS += -I${KERNEL_SRC}/include/
CFLAGS += -I$(S)/../lib/ -I$(S)/../include/
CFLAGS += -I${incdir}
-LDFLAGS += -lpthread
-LDFLAGS += -L${dlmlibdir} -ldlm
LDFLAGS += -L${logtlibdir} -llogthread
+LDFLAGS += -L${ccslibdir} -L${cmanlibdir} -lccs -lcman
+LDFLAGS += -L${dlmlibdir} -L${fencedlibdir} -ldlm -lfenced
LDFLAGS += -L${openaislibdir} -lSaCkpt
LDFLAGS += -L${corosynclibdir} -lcpg -lconfdb
+LDFLAGS += -lpthread
LDFLAGS += -L../lib -lgroup
LDFLAGS += -L${libdir}
LDDEPS += ../lib/libgroup.a
-CMAN_LDFLAGS += -L${ccslibdir} -L${cmanlibdir} -lccs -lcman
-CMAN_LDFLAGS += -L${fencedlibdir} -lfenced
-
-dlm_controld: ${OBJS} ${LDDEPS}
- $(CC) -o $@ $^ $(LDFLAGS) $(CMAN_LDFLAGS)
+${TARGET}: ${OBJS} ${LDDEPS}
+ $(CC) -o $@ $^ $(LDFLAGS)
depends:
$(MAKE) -C ../lib all
13 years, 8 months
cluster: RHEL56 - fence_scsi_test: Add man page to Makefile.
by rohara
Gitweb: http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=...
Commit: af5036d74431ea4446ed7941a58ffafd09dc1050
Parent: 38ca868f6ee9c7ebb4059b7a4983734935c80ca4
Author: Ryan O'Hara <rohara(a)redhat.com>
AuthorDate: Tue Nov 2 13:51:54 2010 -0500
Committer: Ryan O'Hara <rohara(a)redhat.com>
CommitterDate: Tue Nov 2 13:51:54 2010 -0500
fence_scsi_test: Add man page to Makefile.
Resolves: rhbz#603838
Signed-off-by: Ryan O'Hara <rohara(a)redhat.com>
---
fence/man/Makefile | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/fence/man/Makefile b/fence/man/Makefile
index fdeb1df..080410e 100644
--- a/fence/man/Makefile
+++ b/fence/man/Makefile
@@ -30,6 +30,7 @@ TARGET8= \
fence_rib.8 \
fence_sanbox2.8 \
fence_scsi.8 \
+ fence_scsi_test.8 \
fence_tool.8 \
fence_vixel.8 \
fence_wti.8 \
13 years, 8 months
cluster: STABLE31 - build: remove --enable_pacemaker config option
by Fabio M. Di Nitto
Gitweb: http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=...
Commit: 6502e1ed197687598872e6ac3af30e4a921ff21e
Parent: 0616c9622e5f67fd73cd7917b69f2e2f775896cc
Author: Fabio M. Di Nitto <fdinitto(a)redhat.com>
AuthorDate: Tue Nov 2 20:49:53 2010 +0100
Committer: Fabio M. Di Nitto <fdinitto(a)redhat.com>
CommitterDate: Tue Nov 2 20:49:53 2010 +0100
build: remove --enable_pacemaker config option
Signed-off-by: Fabio M. Di Nitto <fdinitto(a)redhat.com>
---
configure | 7 -------
make/defines.mk.input | 1 -
2 files changed, 0 insertions(+), 8 deletions(-)
diff --git a/configure b/configure
index 2e9f8bc..7e1aca7 100755
--- a/configure
+++ b/configure
@@ -73,7 +73,6 @@ my %options = (
confdir => \$confdir,
conffile => \$conffile,
enable_contrib => \$enable_contrib,
- enable_pacemaker => \$enable_pacemaker,
somajor => \$somajor,
sominor => \$sominor,
release_version => \$release_version,
@@ -145,7 +144,6 @@ my $err = &GetOptions (\%options,
'sominor=s',
'release_version=s',
'enable_contrib',
- 'enable_pacemaker',
'without_common',
'without_config',
'without_cman',
@@ -221,7 +219,6 @@ if ($help || !$err) {
print "--ldapincdir=\tthe base directory for ldap include files. (Default: {incdir})\n";
print "--ldaplibdir=\tthe base directory for ldap libraries. (Default: {libdir})\n";
print "--enable_contrib\tEnable build of community contributed code/tools. (Default: no)\n";
- print "--enable_pacemaker\tEnable building of Pacemaker-specific pieces\n";
print "--without_common\tDisable common building (Default: enabled)\n";
print "--without_config\tDisable config building (Default: enabled)\n";
print "--without_cman\tDisable cman building (Default: enabled)\n";
@@ -507,9 +504,6 @@ if (!$conffile) {
if (!$enable_contrib) {
$enable_contrib="";
}
-if (!$enable_pacemaker) {
- $enable_pacemaker="";
-}
if (!$without_common) {
$without_common="";
}
@@ -604,7 +598,6 @@ while (<IFILE>) {
$_ =~ s/\@CONFDIR\@/$confdir/;
$_ =~ s/\@CONFFILE\@/$conffile/;
$_ =~ s/\@ENABLE_CONTRIB\@/$enable_contrib/;
- $_ =~ s/\@ENABLE_PACEMAKER\@/$enable_pacemaker/;
$_ =~ s/\@DISABLE_COMMON\@/$without_common/;
$_ =~ s/\@DISABLE_CONFIG\@/$without_config/;
$_ =~ s/\@DISABLE_CMAN\@/$without_cman/;
diff --git a/make/defines.mk.input b/make/defines.mk.input
index 5cb006a..3751cf5 100644
--- a/make/defines.mk.input
+++ b/make/defines.mk.input
@@ -58,7 +58,6 @@ ldaplibdir ?= @LDAPLIBDIR@
zlibincdir ?= @ZLIBINCDIR@
zliblibdir ?= @ZLIBLIBDIR@
contrib_code ?= @ENABLE_CONTRIB@
-enable_pacemaker ?= @ENABLE_PACEMAKER@
without_common ?= @DISABLE_COMMON@
without_config ?= @DISABLE_CONFIG@
without_cman ?= @DISABLE_CMAN@
13 years, 8 months
dlm: master - dlm_controld: remove pacemaker variant
by David Teigland
Gitweb: http://git.fedorahosted.org/git/dlm.git?p=dlm.git;a=commitdiff;h=fc2ec0d6...
Commit: fc2ec0d6841352ccbd9fddddeaa17de44a65261b
Parent: 2a7c15034a0101d9bb8948f1b40fc70728a79419
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Tue Nov 2 13:49:14 2010 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Tue Nov 2 13:49:14 2010 -0500
dlm_controld: remove pacemaker variant
the standard version is now compatible with pacemaker
Signed-off-by: David Teigland <teigland(a)redhat.com>
---
group/dlm_controld/Makefile.am | 29 ----
group/dlm_controld/pacemaker.c | 361 ----------------------------------------
2 files changed, 0 insertions(+), 390 deletions(-)
diff --git a/group/dlm_controld/Makefile.am b/group/dlm_controld/Makefile.am
index c14ab89..8eb6186 100644
--- a/group/dlm_controld/Makefile.am
+++ b/group/dlm_controld/Makefile.am
@@ -6,10 +6,6 @@ if ENABLE_CMAN
sbin_PROGRAMS += dlm_controld
endif
-if ENABLE_PACEMAKER
-sbin_PROGRAMS += dlm_controld.pcmk
-endif
-
noinst_HEADERS = config.h dlm_controld.h dlm_daemon.h
shared_SOURCES = action.c cpg.c crc.c deadlock.c main.c \
@@ -48,28 +44,3 @@ dlm_controld_LDFLAGS = $(shared_LIBS) \
dlm_controld_LDADD = $(shared_LDADD)
endif
-
-if ENABLE_PACEMAKER
-
-dlm_controld_pcmk_SOURCES = $(shared_SOURCES) \
- pacemaker.c
-
-dlm_controld_pcmk_CPPFLAGS= $(shared_CPPFLAGS) \
- -I$(prefix)/include/heartbeat \
- -I$(prefix)/include/pacemaker \
- -DENABLE_PACEMAKER=1
-
-dlm_controld_pcmk_CFLAGS = $(shared_CFLAGS) \
- $(glib_CFLAGS) \
- $(xml_CFLAGS) \
- $(totempg_CFLAGS)
-
-dlm_controld_pcmk_LDFLAGS = $(shared_LIBS) \
- $(glib_LIBS) \
- $(xml_LIBS) \
- $(totempg_LIBS) \
- -lcib -lcrmcommon -lcrmcluster
-
-dlm_controld_pcmk_LDADD = $(shared_LDADD)
-
-endif
diff --git a/group/dlm_controld/pacemaker.c b/group/dlm_controld/pacemaker.c
deleted file mode 100644
index 3150a1f..0000000
--- a/group/dlm_controld/pacemaker.c
+++ /dev/null
@@ -1,361 +0,0 @@
-#include "dlm_daemon.h"
-
-#include <syslog.h>
-
-#include "config.h"
-
-#include <glib.h>
-#include <bzlib.h>
-#include <heartbeat/ha_msg.h>
-
-#include <pacemaker/crm_config.h>
-
-#include <pacemaker/crm/crm.h>
-#include <pacemaker/crm/ais.h>
-#include <pacemaker/crm/attrd.h>
-/* heartbeat support is irrelevant here */
-#undef SUPPORT_HEARTBEAT
-#define SUPPORT_HEARTBEAT 0
-#include <pacemaker/crm/common/cluster.h>
-#include <pacemaker/crm/common/stack.h>
-#include <pacemaker/crm/common/ipc.h>
-#include <pacemaker/crm/msg_xml.h>
-#include <pacemaker/crm/cib.h>
-
-#define COMMS_DIR "/sys/kernel/config/dlm/cluster/comms"
-
-int setup_ccs(void)
-{
- /* To avoid creating an additional place for the dlm to be configured,
- * only allow configuration from the command-line until CoroSync is stable
- * enough to be used with Pacemaker
- */
- return 0;
-}
-
-void close_ccs(void) { return; }
-int get_weight(int nodeid, char *lockspace) { return 1; }
-
-/* TODO: Make this configurable
- * Can't use logging.c as-is as whitetank exposes a different logging API
- */
-void init_logging(void) {
- openlog("cluster-dlm", LOG_PERROR|LOG_PID|LOG_CONS|LOG_NDELAY, LOG_DAEMON);
- /* cl_log_enable_stderr(TRUE); */
-}
-
-void setup_logging(void) { return; }
-void close_logging(void) {
- closelog();
-}
-
-int setup_cluster_cfg(void) { return 0; }
-void process_cluster_cfg(int ci) {}
-void close_cluster_cfg(void) {}
-
-extern int ais_fd_async;
-
-int local_node_id = 0;
-char *local_node_uname = NULL;
-void dlm_process_node(gpointer key, gpointer value, gpointer user_data);
-
-int setup_cluster(void)
-{
- ais_fd_async = -1;
- crm_log_init("cluster-dlm", LOG_INFO, FALSE, TRUE, 0, NULL);
-
- if(init_ais_connection(NULL, NULL, NULL, &local_node_uname, &our_nodeid) == FALSE) {
- log_error("Connection to our AIS plugin (%d) failed", CRM_SERVICE);
- return -1;
- }
-
- /* Sign up for membership updates */
- send_ais_text(crm_class_notify, "true", TRUE, NULL, crm_msg_ais);
-
- /* Requesting the current list of known nodes */
- send_ais_text(crm_class_members, __FUNCTION__, TRUE, NULL, crm_msg_ais);
-
- return ais_fd_async;
-}
-
-void update_cluster(void)
-{
- static uint64_t last_membership = 0;
- cluster_quorate = crm_have_quorum;
- if(last_membership < crm_peer_seq) {
- log_debug("Processing membership %llu", crm_peer_seq);
- g_hash_table_foreach(crm_peer_cache, dlm_process_node, &last_membership);
- last_membership = crm_peer_seq;
- }
-}
-
-void process_cluster(int ci)
-{
- ais_dispatch(ais_fd_async, NULL);
- update_cluster();
-}
-
-void close_cluster(void) {
- terminate_ais_connection();
-}
-
-#include <arpa/inet.h>
-#include <corosync/totem/totemip.h>
-
-void dlm_process_node(gpointer key, gpointer value, gpointer user_data)
-{
- int rc = 0;
- struct stat tmp;
- char path[PATH_MAX];
- crm_node_t *node = value;
- uint64_t *last = user_data;
- const char *action = "Skipped";
-
- gboolean do_add = FALSE;
- gboolean do_remove = FALSE;
- gboolean is_active = FALSE;
-
- memset(path, 0, PATH_MAX);
- snprintf(path, PATH_MAX, "%s/%d", COMMS_DIR, node->id);
-
- rc = stat(path, &tmp);
- is_active = crm_is_member_active(node);
-
- if(rc == 0 && is_active) {
- /* nothing to do?
- * maybe the node left and came back...
- */
- } else if(rc == 0) {
- do_remove = TRUE;
-
- } else if(is_active && node->addr) {
- do_add = TRUE;
- }
-
- if(do_remove) {
- action = "Removed";
- del_configfs_node(node->id);
- }
-
- if(do_add) {
- char *addr_copy = strdup(node->addr);
- char *addr_top = addr_copy;
- char *addr = NULL;
-
- if(do_remove) {
- action = "Re-added";
- } else {
- action = "Added";
- }
-
- if(local_node_id == 0) {
- crm_node_t *local_node = g_hash_table_lookup(
- crm_peer_cache, local_node_uname);
- local_node_id = local_node->id;
- }
-
- do {
- char ipaddr[1024];
- int addr_family = AF_INET;
- int cna_len = 0, rc = 0;
- struct sockaddr_storage cna_addr;
- struct totem_ip_address totem_addr;
-
- addr = strsep(&addr_copy, " ");
- if(addr == NULL) {
- break;
- }
-
- /* do_cmd_get_node_addrs */
- if(strstr(addr, "ip(") == NULL) {
- continue;
-
- } else if(strchr(addr, ':')) {
- rc = sscanf(addr, "ip(%[0-9A-Fa-f:])", ipaddr);
- if(rc != 1) {
- log_error("Could not extract IPv6 address from '%s'", addr);
- continue;
- }
- addr_family = AF_INET6;
-
- } else {
- rc = sscanf(addr, "ip(%[0-9.]) ", ipaddr);
- if(rc != 1) {
- log_error("Could not extract IPv4 address from '%s'", addr);
- continue;
- }
- }
-
- rc = inet_pton(addr_family, ipaddr, &totem_addr);
- if(rc != 1) {
- log_error("Could not parse '%s' as in IPv%c address", ipaddr, (addr_family==AF_INET)?'4':'6');
- continue;
- }
-
- rc = totemip_parse(&totem_addr, ipaddr, addr_family);
- if(rc != 0) {
- log_error("Could not convert '%s' into a totem address", ipaddr);
- continue;
- }
-
- rc = totemip_totemip_to_sockaddr_convert(&totem_addr, 0, &cna_addr, &cna_len);
- if(rc != 0) {
- log_error("Could not convert totem address for '%s' into sockaddr", ipaddr);
- continue;
- }
-
- log_debug("Adding address %s to configfs for node %u/%s ", addr, node->id, node->uname);
- add_configfs_node(node->id, ((char*)&cna_addr), cna_len, (node->id == local_node_id));
-
- } while(addr != NULL);
- free(addr_top);
- }
-
- log_debug("%s %sctive node %u '%s': born-on=%llu, last-seen=%llu, this-event=%llu, last-event=%llu",
- action, crm_is_member_active(value)?"a":"ina",
- node->id, node->uname, node->born, node->last_seen,
- crm_peer_seq, (unsigned long long)*last);
-}
-
-int is_cluster_member(uint32_t nodeid)
-{
- crm_node_t *node = crm_get_peer(nodeid, NULL);
- return crm_is_member_active(node);
-}
-
-char *nodeid2name(int nodeid) {
- crm_node_t *node = crm_get_peer(nodeid, NULL);
- if(node->uname == NULL) {
- return NULL;
- }
- return strdup(node->uname);
-}
-
-static int pcmk_cluster_fd = 0;
-
-static void attrd_deadfn(int ci)
-{
- log_error("%s: Lost connection to the cluster", __FUNCTION__);
- pcmk_cluster_fd = 0;
- return;
-}
-
-void kick_node_from_cluster(int nodeid)
-{
- int fd = pcmk_cluster_fd;
- int rc = crm_terminate_member_no_mainloop(nodeid, NULL, &fd);
-
- if(fd > 0 && fd != pcmk_cluster_fd) {
- pcmk_cluster_fd = fd;
- client_add(pcmk_cluster_fd, NULL, attrd_deadfn);
- }
-
- switch(rc) {
- case 0:
- log_debug("Requested that node %d be kicked from the cluster", nodeid);
- break;
- case -1:
- log_error("Don't know how to kick node %d from the cluster", nodeid);
- break;
- case 1:
- log_error("Could not kick node %d from the cluster", nodeid);
- break;
- default:
- log_error("Unknown result when kicking node %d from the cluster", nodeid);
- break;
- }
- return;
-}
-
-cib_t *cib = NULL;
-
-static void cib_deadfn(int ci)
-{
- log_error("Lost connection to the cib");
- cib = NULL; /* TODO: memory leak in unlikely error path */
- return;
-}
-
-static cib_t *cib_connect(void)
-{
- int rc = 0;
- int cib_fd = 0;
- if(cib) {
- return cib;
- }
-
- cib = cib_new();
- rc = cib->cmds->signon_raw(cib, crm_system_name, cib_command, &cib_fd, NULL);
- if(rc != cib_ok) {
- log_error("Signon to cib failed: %s", cib_error2string(rc));
- cib = NULL; /* TODO: memory leak in unlikely error path */
-
- } else {
- client_add(cib_fd, NULL, cib_deadfn);
- }
- return cib;
-}
-
-
-int fence_in_progress(int *in_progress)
-{
- int rc = 0;
- xmlNode *xpath_data;
-
- cib_connect();
- if(cib == NULL) {
- return -1;
- }
-
- /* TODO: Not definitive - but a good approximation */
- rc = cib->cmds->query(cib, "//nvpar[@name='terminate']", &xpath_data,
- cib_xpath|cib_scope_local|cib_sync_call);
-
- if(xpath_data == NULL) {
- *in_progress = 0;
- return 0;
- }
-
- log_debug("Fencing in progress: %s", xpath_data?"true":"false");
- free_xml(xpath_data);
- *in_progress = 1;
- return 1;
-}
-
-#define XPATH_MAX 1024
-
-int fence_node_time(int nodeid, uint64_t *last_fenced_time)
-{
- int rc = 0;
- xmlNode *xpath_data;
- char xpath_query[XPATH_MAX];
- crm_node_t *node = crm_get_peer(nodeid, NULL);
-
- if(last_fenced_time) {
- *last_fenced_time = 0;
- }
-
- if(node == NULL || node->uname == NULL) {
- log_error("Nothing known about node %d", nodeid);
- return 0;
- }
-
- cib_connect();
- if(cib == NULL) {
- return -1;
- }
-
- snprintf(xpath_query, XPATH_MAX, "//lrm[@id='%s']", node->uname);
- rc = cib->cmds->query(
- cib, xpath_query, &xpath_data, cib_xpath|cib_scope_local|cib_sync_call);
-
- if(xpath_data == NULL) {
- /* the node has been shot - return 'now' */
- log_debug("Node %d/%s was last shot 'now'", nodeid, node->uname);
- *last_fenced_time = time(NULL);
- }
-
- free_xml(xpath_data);
- log_debug("It does not appear node %d/%s has been shot", nodeid, node->uname);
- return 0;
-}
13 years, 8 months