Skip to content

Commit

Permalink
glusterfs: capable to test with one single glfs instance
Browse files Browse the repository at this point in the history
Current multi-thread test creates one glfs instance per job. However,
there is requirement to run all jobs on one single instance, and that
all jobs would share the same set of underlying working threads.

Add a new option "single-instance" to control whether to create one
global instance or one instance per job. For testing with mutilple
gluster volumes, use host name and volume name to filter out same
volume, one specific volume will have one instance.

Signed-off-by: Zhang Huan <[email protected]>
  • Loading branch information
Zhang Huan committed Jun 20, 2018
1 parent 5de1d4b commit 53c508d
Show file tree
Hide file tree
Showing 2 changed files with 151 additions and 29 deletions.
1 change: 1 addition & 0 deletions engines/gfapi.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ struct gf_options {
void *pad;
char *gf_vol;
char *gf_brick;
int gf_single_instance;
};

struct gf_data {
Expand Down
179 changes: 150 additions & 29 deletions engines/glusterfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,160 @@ struct fio_option gfapi_options[] = {
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_GFAPI,
},
{
.name = "single-instance",
.lname = "Single glusterfs instance",
.type = FIO_OPT_BOOL,
.help = "Only one glusterfs instance",
.off1 = offsetof(struct gf_options, gf_single_instance),
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_GFAPI,
},
{
.name = NULL,
},
};

int fio_gf_setup(struct thread_data *td)
struct glfs_info {
struct flist_head list;
char *volume;
char *brick;
glfs_t *fs;
int refcount;
};

static pthread_mutex_t glfs_lock = PTHREAD_MUTEX_INITIALIZER;
static FLIST_HEAD(glfs_list_head);

static glfs_t *fio_gf_new_fs(char *volume, char *brick)
{
int r = 0;
glfs_t *fs;
struct stat sb = { 0, };

fs = glfs_new(volume);
if (!fs) {
log_err("glfs_new failed.\n");
goto out;
}
glfs_set_logging(fs, "/tmp/fio_gfapi.log", 7);
/* default to tcp */
r = glfs_set_volfile_server(fs, "tcp", brick, 0);
if (r) {
log_err("glfs_set_volfile_server failed.\n");
goto out;
}
r = glfs_init(fs);
if (r) {
log_err("glfs_init failed. Is glusterd running on brick?\n");
goto out;
}
sleep(2);
r = glfs_lstat(fs, ".", &sb);
if (r) {
log_err("glfs_lstat failed.\n");
goto out;
}

out:
if (r) {
glfs_fini(fs);
fs = NULL;
}
return fs;
}

static glfs_t *fio_gf_get_glfs(struct gf_options *opt,
char *volume, char *brick)
{
struct glfs_info *glfs = NULL;
struct glfs_info *tmp;
struct flist_head *entry;

if (!opt->gf_single_instance)
return fio_gf_new_fs(volume, brick);

pthread_mutex_lock (&glfs_lock);

flist_for_each(entry, &glfs_list_head) {
tmp = flist_entry(entry, struct glfs_info, list);
if (!strcmp(volume, tmp->volume) &&
!strcmp(brick, tmp->brick)) {
glfs = tmp;
break;
}
}

if (glfs) {
glfs->refcount++;
} else {
glfs = malloc(sizeof(*glfs));
if (!glfs)
goto out;
INIT_FLIST_HEAD(&glfs->list);
glfs->refcount = 0;
glfs->volume = strdup(volume);
glfs->brick = strdup(brick);
glfs->fs = fio_gf_new_fs(volume, brick);
if (!glfs->fs) {
free(glfs);
glfs = NULL;
goto out;
}

flist_add_tail(&glfs->list, &glfs_list_head);
glfs->refcount = 1;
}

out:
pthread_mutex_unlock (&glfs_lock);

if (glfs)
return glfs->fs;
return NULL;
}

static void fio_gf_put_glfs(struct gf_options *opt, glfs_t *fs)
{
struct glfs_info *glfs = NULL;
struct glfs_info *tmp;
struct flist_head *entry;

if (!opt->gf_single_instance) {
glfs_fini(fs);
return;
}

pthread_mutex_lock (&glfs_lock);

flist_for_each(entry, &glfs_list_head) {
tmp = flist_entry(entry, struct glfs_info, list);
if (tmp->fs == fs) {
glfs = tmp;
break;
}
}

if (!glfs) {
log_err("glfs not found to fini.\n");
} else {
glfs->refcount--;

if (glfs->refcount == 0) {
glfs_fini(glfs->fs);
free(glfs->volume);
free(glfs->brick);
flist_del(&glfs->list);
}
}

pthread_mutex_unlock (&glfs_lock);
}

int fio_gf_setup(struct thread_data *td)
{
struct gf_data *g = NULL;
struct gf_options *opt = td->eo;
struct stat sb = { 0, };

dprint(FD_IO, "fio setup\n");

Expand All @@ -49,42 +192,20 @@ int fio_gf_setup(struct thread_data *td)
log_err("malloc failed.\n");
return -ENOMEM;
}
g->fs = NULL;
g->fd = NULL;
g->aio_events = NULL;

g->fs = glfs_new(opt->gf_vol);
if (!g->fs) {
log_err("glfs_new failed.\n");
goto cleanup;
}
glfs_set_logging(g->fs, "/tmp/fio_gfapi.log", 7);
/* default to tcp */
r = glfs_set_volfile_server(g->fs, "tcp", opt->gf_brick, 0);
if (r) {
log_err("glfs_set_volfile_server failed.\n");
g->fs = fio_gf_get_glfs(opt, opt->gf_vol, opt->gf_brick);
if (!g->fs)
goto cleanup;
}
r = glfs_init(g->fs);
if (r) {
log_err("glfs_init failed. Is glusterd running on brick?\n");
goto cleanup;
}
sleep(2);
r = glfs_lstat(g->fs, ".", &sb);
if (r) {
log_err("glfs_lstat failed.\n");
goto cleanup;
}

dprint(FD_FILE, "fio setup %p\n", g->fs);
td->io_ops_data = g;
return 0;
cleanup:
if (g->fs)
glfs_fini(g->fs);
free(g);
td->io_ops_data = NULL;
return r;
return -EIO;
}

void fio_gf_cleanup(struct thread_data *td)
Expand All @@ -97,7 +218,7 @@ void fio_gf_cleanup(struct thread_data *td)
if (g->fd)
glfs_close(g->fd);
if (g->fs)
glfs_fini(g->fs);
fio_gf_put_glfs(td->eo, g->fs);
free(g);
td->io_ops_data = NULL;
}
Expand Down

0 comments on commit 53c508d

Please sign in to comment.