diff options
author | Boaz Harrosh <bharrosh@panasas.com> | 2011-08-05 15:06:04 -0700 |
---|---|---|
committer | Boaz Harrosh <bharrosh@panasas.com> | 2011-08-06 19:35:32 -0700 |
commit | 9e9db45649eb5d3ee5622fdad741914ecf1016a0 (patch) | |
tree | 19ab9e1431e3d6535cef3f2cba6fcff12bb6ba6c /fs/exofs/ios.c | |
parent | 85e44df4748670a1a7d8441b2d75843cdebc478a (diff) | |
download | op-kernel-dev-9e9db45649eb5d3ee5622fdad741914ecf1016a0.zip op-kernel-dev-9e9db45649eb5d3ee5622fdad741914ecf1016a0.tar.gz |
exofs: ios: Move to a per inode components & device-table
Exofs raid engine was saving on memory space by having a single layout-info,
single pid, and a single device-table, global to the filesystem. Then passing
a credential and object_id info at the io_state level, private for each
inode. It would also devise this contraption of rotating the device table
view for each inode->ino to spread out the device usage.
This is not compatible with the pnfs-objects standard, demanding that
each inode can have it's own layout-info, device-table, and each object
component it's own pid, oid and creds.
So: Bring exofs raid engine to be usable for generic pnfs-objects use by:
* Define an exofs_comp structure that holds obj_id and credential info.
* Break up exofs_layout struct to an exofs_components structure that holds a
possible array of exofs_comp and the array of devices + the size of the
arrays.
* Add a "comps" parameter to get_io_state() that specifies the ids creds
and device array to use for each IO.
This enables to keep the layout global, but the device-table view, creds
and IDs at the inode level. It only adds two 64bit to each inode, since
some of these members already existed in another form.
* ios raid engine now access layout-info and comps-info through the passed
pointers. Everything is pre-prepared by caller for generic access of
these structures and arrays.
At the exofs Level:
* Super block holds an exofs_components struct that holds the device
array, previously in layout. The devices there are in device-table
order. The device-array is twice bigger and repeats the device-table
twice so now each inode's device array can point to a random device
and have a round-robin view of the table, making it compatible to
previous exofs versions.
* Each inode has an exofs_components struct that is initialized at
load time, with it's own view of the device table IDs and creds.
When doing IO this gets passed to the io_state together with the
layout.
While preforming this change. Bugs where found where credentials with the
wrong IDs where used to access the different SB objects (super.c). As well
as some dead code. It was never noticed because the target we use does not
check the credentials.
Signed-off-by: Boaz Harrosh <bharrosh@panasas.com>
Diffstat (limited to 'fs/exofs/ios.c')
-rw-r--r-- | fs/exofs/ios.c | 102 |
1 files changed, 62 insertions, 40 deletions
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c index 21d6130..f9d5c5a 100644 --- a/fs/exofs/ios.c +++ b/fs/exofs/ios.c @@ -31,24 +31,41 @@ #define EXOFS_DBGMSG2(M...) do {} while (0) /* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */ -int exofs_get_rw_state(struct exofs_layout *layout, bool is_reading, - u64 offset, u64 length, struct exofs_io_state **pios) +static u8 *_ios_cred(struct exofs_io_state *ios, unsigned index) +{ + return ios->comps->comps[index & ios->comps->single_comp].cred; +} + +static struct osd_obj_id *_ios_obj(struct exofs_io_state *ios, unsigned index) +{ + return &ios->comps->comps[index & ios->comps->single_comp].obj; +} + +static struct osd_dev *_ios_od(struct exofs_io_state *ios, unsigned index) +{ + return ios->comps->ods[index]; +} + +int exofs_get_rw_state(struct exofs_layout *layout, + struct exofs_components *comps, + bool is_reading, u64 offset, u64 length, + struct exofs_io_state **pios) { struct exofs_io_state *ios; /*TODO: Maybe use kmem_cach per sbi of size * exofs_io_state_size(layout->s_numdevs) */ - ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL); + ios = kzalloc(exofs_io_state_size(comps->numdevs), GFP_KERNEL); if (unlikely(!ios)) { EXOFS_DBGMSG("Failed kzalloc bytes=%d\n", - exofs_io_state_size(layout->s_numdevs)); + exofs_io_state_size(comps->numdevs)); *pios = NULL; return -ENOMEM; } ios->layout = layout; - ios->obj.partition = layout->s_pid; + ios->comps = comps; ios->offset = offset; ios->length = length; ios->reading = is_reading; @@ -58,9 +75,10 @@ int exofs_get_rw_state(struct exofs_layout *layout, bool is_reading, } int exofs_get_io_state(struct exofs_layout *layout, + struct exofs_components *comps, struct exofs_io_state **ios) { - return exofs_get_rw_state(layout, true, 0, 0, ios); + return exofs_get_rw_state(layout, comps, true, 0, 0, ios); } void exofs_put_io_state(struct exofs_io_state *ios) @@ -119,7 +137,7 @@ static int exofs_io_execute(struct exofs_io_state *ios) if (unlikely(!or)) continue; - ret = osd_finalize_request(or, 0, ios->cred, NULL); + ret = osd_finalize_request(or, 0, _ios_cred(ios, i), NULL); if (unlikely(ret)) { EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n", ret); @@ -300,7 +318,7 @@ static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg, { unsigned pg = *cur_pg; struct request_queue *q = - osd_request_queue(exofs_ios_od(ios, per_dev->dev)); + osd_request_queue(_ios_od(ios, per_dev->dev)); per_dev->length += cur_len; @@ -440,10 +458,10 @@ int exofs_sbi_create(struct exofs_io_state *ios) { int i, ret; - for (i = 0; i < ios->layout->s_numdevs; i++) { + for (i = 0; i < ios->comps->numdevs; i++) { struct osd_request *or; - or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL); + or = osd_start_request(_ios_od(ios, i), GFP_KERNEL); if (unlikely(!or)) { EXOFS_ERR("%s: osd_start_request failed\n", __func__); ret = -ENOMEM; @@ -452,7 +470,7 @@ int exofs_sbi_create(struct exofs_io_state *ios) ios->per_dev[i].or = or; ios->numdevs++; - osd_req_create_object(or, &ios->obj); + osd_req_create_object(or, _ios_obj(ios, i)); } ret = exofs_io_execute(ios); @@ -464,10 +482,10 @@ int exofs_sbi_remove(struct exofs_io_state *ios) { int i, ret; - for (i = 0; i < ios->layout->s_numdevs; i++) { + for (i = 0; i < ios->comps->numdevs; i++) { struct osd_request *or; - or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL); + or = osd_start_request(_ios_od(ios, i), GFP_KERNEL); if (unlikely(!or)) { EXOFS_ERR("%s: osd_start_request failed\n", __func__); ret = -ENOMEM; @@ -476,7 +494,7 @@ int exofs_sbi_remove(struct exofs_io_state *ios) ios->per_dev[i].or = or; ios->numdevs++; - osd_req_remove_object(or, &ios->obj); + osd_req_remove_object(or, _ios_obj(ios, i)); } ret = exofs_io_execute(ios); @@ -498,7 +516,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp) struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp]; struct osd_request *or; - or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL); + or = osd_start_request(_ios_od(ios, dev), GFP_KERNEL); if (unlikely(!or)) { EXOFS_ERR("%s: osd_start_request failed\n", __func__); ret = -ENOMEM; @@ -533,25 +551,29 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp) bio->bi_rw |= REQ_WRITE; } - osd_req_write(or, &ios->obj, per_dev->offset, bio, - per_dev->length); + osd_req_write(or, _ios_obj(ios, dev), per_dev->offset, + bio, per_dev->length); EXOFS_DBGMSG("write(0x%llx) offset=0x%llx " "length=0x%llx dev=%d\n", - _LLU(ios->obj.id), _LLU(per_dev->offset), + _LLU(_ios_obj(ios, dev)->id), + _LLU(per_dev->offset), _LLU(per_dev->length), dev); } else if (ios->kern_buff) { - ret = osd_req_write_kern(or, &ios->obj, per_dev->offset, - ios->kern_buff, ios->length); + ret = osd_req_write_kern(or, _ios_obj(ios, dev), + per_dev->offset, + ios->kern_buff, ios->length); if (unlikely(ret)) goto out; EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx " "length=0x%llx dev=%d\n", - _LLU(ios->obj.id), _LLU(per_dev->offset), + _LLU(_ios_obj(ios, dev)->id), + _LLU(per_dev->offset), _LLU(ios->length), dev); } else { - osd_req_set_attributes(or, &ios->obj); + osd_req_set_attributes(or, _ios_obj(ios, dev)); EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n", - _LLU(ios->obj.id), ios->out_attr_len, dev); + _LLU(_ios_obj(ios, dev)->id), + ios->out_attr_len, dev); } if (ios->out_attr) @@ -590,13 +612,14 @@ static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp) { struct osd_request *or; struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp]; - unsigned first_dev = (unsigned)ios->obj.id; + struct osd_obj_id *obj = _ios_obj(ios, cur_comp); + unsigned first_dev = (unsigned)obj->id; if (ios->pages && !per_dev->length) return 0; /* Just an empty slot */ first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1; - or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL); + or = osd_start_request(_ios_od(ios, first_dev), GFP_KERNEL); if (unlikely(!or)) { EXOFS_ERR("%s: osd_start_request failed\n", __func__); return -ENOMEM; @@ -604,25 +627,26 @@ static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp) per_dev->or = or; if (ios->pages) { - osd_req_read(or, &ios->obj, per_dev->offset, + osd_req_read(or, obj, per_dev->offset, per_dev->bio, per_dev->length); EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx" - " dev=%d\n", _LLU(ios->obj.id), + " dev=%d\n", _LLU(obj->id), _LLU(per_dev->offset), _LLU(per_dev->length), first_dev); } else if (ios->kern_buff) { - int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset, + int ret = osd_req_read_kern(or, obj, per_dev->offset, ios->kern_buff, ios->length); EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx " "length=0x%llx dev=%d ret=>%d\n", - _LLU(ios->obj.id), _LLU(per_dev->offset), + _LLU(obj->id), _LLU(per_dev->offset), _LLU(ios->length), first_dev, ret); if (unlikely(ret)) return ret; } else { - osd_req_get_attributes(or, &ios->obj); + osd_req_get_attributes(or, obj); EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n", - _LLU(ios->obj.id), ios->in_attr_len, first_dev); + _LLU(obj->id), + ios->in_attr_len, first_dev); } if (ios->out_attr) osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len); @@ -682,14 +706,14 @@ static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp, struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp]; struct osd_request *or; - or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL); + or = osd_start_request(_ios_od(ios, cur_comp), GFP_KERNEL); if (unlikely(!or)) { EXOFS_ERR("%s: osd_start_request failed\n", __func__); return -ENOMEM; } per_dev->or = or; - osd_req_set_attributes(or, &ios->obj); + osd_req_set_attributes(or, _ios_obj(ios, cur_comp)); osd_req_add_set_attr_list(or, attr, 1); } @@ -721,9 +745,9 @@ void _calc_trunk_info(struct exofs_layout *layout, u64 file_offset, ti->max_devs = layout->group_width * layout->group_count; } -int exofs_oi_truncate(struct exofs_i_info *oi, u64 size) +int exofs_truncate(struct exofs_layout *layout, struct exofs_components *comps, + u64 size) { - struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info; struct exofs_io_state *ios; struct exofs_trunc_attr { struct osd_attr attr; @@ -732,7 +756,7 @@ int exofs_oi_truncate(struct exofs_i_info *oi, u64 size) struct _trunc_info ti; int i, ret; - ret = exofs_get_io_state(&sbi->layout, &ios); + ret = exofs_get_io_state(layout, comps, &ios); if (unlikely(ret)) return ret; @@ -745,9 +769,7 @@ int exofs_oi_truncate(struct exofs_i_info *oi, u64 size) goto out; } - ios->obj.id = exofs_oi_objno(oi); - ios->cred = oi->i_cred; - ios->numdevs = ios->layout->s_numdevs; + ios->numdevs = ios->comps->numdevs; for (i = 0; i < ti.max_devs; ++i) { struct exofs_trunc_attr *size_attr = &size_attrs[i]; @@ -770,7 +792,7 @@ int exofs_oi_truncate(struct exofs_i_info *oi, u64 size) size_attr->attr.val_ptr = &size_attr->newsize; EXOFS_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n", - _LLU(ios->obj.id), _LLU(obj_size), i); + _LLU(comps->comps->obj.id), _LLU(obj_size), i); ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1, &size_attr->attr); if (unlikely(ret)) |