- A stable fix for the alignment of the event number reported at the
end of the 'DM_LIST_DEVICES' ioctl. - A couple stable fixes for the DM crypt target. - A DM raid health status reporting fix. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJZ1pR1AAoJEMUj8QotnQNa48kIAJ+HTqeNjVhspxqKyJHPl78W 3N/B11dWJ/CQ4xN7tbpC2gmsbnBBHE8RFTJzk3xQo7yoKsD0muqH35n0XA7X2A29 i7DoYro/7F6ZuPlgzhzcCjA7eTugR4vcp5dTFYoIQG0DaOKAkN/+gJTVjNDjpRR5 oGljZhKTeS4UNJTv/+ZjSMuAPycZq8LKRMOn/EgqT9MD4cIQ9VHN2qGc8jQt0Xrb m58URvAoFesGnSjZcypk+JG2SbUfJ4WB3Db7+A+X7lu2219FIroFhNHMk9obYhXG mkrhEnAsVsq/paPhCY4gdXWmSe7RNiAeSJeWhUSrNfjUACf1GF+l4CgBeBWIX+0= =V40h -----END PGP SIGNATURE----- Merge tag 'for-4.14/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper fixes from Mike Snitzer: - a stable fix for the alignment of the event number reported at the end of the 'DM_LIST_DEVICES' ioctl. - a couple stable fixes for the DM crypt target. - a DM raid health status reporting fix. * tag 'for-4.14/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm raid: fix incorrect status output at the end of a "recover" process dm crypt: reject sector_size feature if device length is not aligned to it dm crypt: fix memory leak in crypt_ctr_cipher_old() dm ioctl: fix alignment of event number in the device list
This commit is contained in:
Коммит
076264ada9
|
@ -344,3 +344,4 @@ Version History
|
||||||
(wrong raid10_copies/raid10_format sequence)
|
(wrong raid10_copies/raid10_format sequence)
|
||||||
1.11.1 Add raid4/5/6 journal write-back support via journal_mode option
|
1.11.1 Add raid4/5/6 journal write-back support via journal_mode option
|
||||||
1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
|
1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
|
||||||
|
1.13.0 Fix dev_health status at end of "recover" (was 'a', now 'A')
|
||||||
|
|
|
@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
|
||||||
|
|
||||||
extern atomic_t dm_global_event_nr;
|
extern atomic_t dm_global_event_nr;
|
||||||
extern wait_queue_head_t dm_global_eventq;
|
extern wait_queue_head_t dm_global_eventq;
|
||||||
|
void dm_issue_global_event(void);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
|
||||||
kfree(cipher_api);
|
kfree(cipher_api);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
kfree(cipher_api);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
bad_mem:
|
bad_mem:
|
||||||
|
@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
|
||||||
ti->error = "Invalid feature value for sector_size";
|
ti->error = "Invalid feature value for sector_size";
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
|
||||||
|
ti->error = "Device size is not multiple of sector_size feature";
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
|
cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
|
||||||
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
|
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
|
||||||
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
|
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
|
||||||
|
|
|
@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
|
||||||
* Round up the ptr to an 8-byte boundary.
|
* Round up the ptr to an 8-byte boundary.
|
||||||
*/
|
*/
|
||||||
#define ALIGN_MASK 7
|
#define ALIGN_MASK 7
|
||||||
|
static inline size_t align_val(size_t val)
|
||||||
|
{
|
||||||
|
return (val + ALIGN_MASK) & ~ALIGN_MASK;
|
||||||
|
}
|
||||||
static inline void *align_ptr(void *ptr)
|
static inline void *align_ptr(void *ptr)
|
||||||
{
|
{
|
||||||
return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
|
return (void *)align_val((size_t)ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
|
||||||
struct hash_cell *hc;
|
struct hash_cell *hc;
|
||||||
size_t len, needed = 0;
|
size_t len, needed = 0;
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
struct dm_name_list *nl, *old_nl = NULL;
|
struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
|
||||||
uint32_t *event_nr;
|
uint32_t *event_nr;
|
||||||
|
|
||||||
down_write(&_hash_lock);
|
down_write(&_hash_lock);
|
||||||
|
@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < NUM_BUCKETS; i++) {
|
for (i = 0; i < NUM_BUCKETS; i++) {
|
||||||
list_for_each_entry (hc, _name_buckets + i, name_list) {
|
list_for_each_entry (hc, _name_buckets + i, name_list) {
|
||||||
needed += sizeof(struct dm_name_list);
|
needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
|
||||||
needed += strlen(hc->name) + 1;
|
needed += align_val(sizeof(uint32_t));
|
||||||
needed += ALIGN_MASK;
|
|
||||||
needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Grab our output buffer.
|
* Grab our output buffer.
|
||||||
*/
|
*/
|
||||||
nl = get_result_buffer(param, param_size, &len);
|
nl = orig_nl = get_result_buffer(param, param_size, &len);
|
||||||
if (len < needed) {
|
if (len < needed) {
|
||||||
param->flags |= DM_BUFFER_FULL_FLAG;
|
param->flags |= DM_BUFFER_FULL_FLAG;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
|
||||||
strcpy(nl->name, hc->name);
|
strcpy(nl->name, hc->name);
|
||||||
|
|
||||||
old_nl = nl;
|
old_nl = nl;
|
||||||
event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
|
event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
|
||||||
*event_nr = dm_get_event_nr(hc->md);
|
*event_nr = dm_get_event_nr(hc->md);
|
||||||
nl = align_ptr(event_nr + 1);
|
nl = align_ptr(event_nr + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* If mismatch happens, security may be compromised due to buffer
|
||||||
|
* overflow, so it's better to crash.
|
||||||
|
*/
|
||||||
|
BUG_ON((char *)nl - (char *)orig_nl != needed);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
up_write(&_hash_lock);
|
up_write(&_hash_lock);
|
||||||
|
@ -1622,6 +1629,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
|
||||||
* the ioctl.
|
* the ioctl.
|
||||||
*/
|
*/
|
||||||
#define IOCTL_FLAGS_NO_PARAMS 1
|
#define IOCTL_FLAGS_NO_PARAMS 1
|
||||||
|
#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
|
||||||
|
|
||||||
/*-----------------------------------------------------------------
|
/*-----------------------------------------------------------------
|
||||||
* Implementation of open/close/ioctl on the special char
|
* Implementation of open/close/ioctl on the special char
|
||||||
|
@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
|
||||||
ioctl_fn fn;
|
ioctl_fn fn;
|
||||||
} _ioctls[] = {
|
} _ioctls[] = {
|
||||||
{DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
|
{DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
|
||||||
{DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
|
{DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
|
||||||
{DM_LIST_DEVICES_CMD, 0, list_devices},
|
{DM_LIST_DEVICES_CMD, 0, list_devices},
|
||||||
|
|
||||||
{DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
|
{DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
|
||||||
{DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
|
{DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
|
||||||
{DM_DEV_RENAME_CMD, 0, dev_rename},
|
{DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
|
||||||
{DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
|
{DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
|
||||||
{DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
|
{DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
|
||||||
{DM_DEV_WAIT_CMD, 0, dev_wait},
|
{DM_DEV_WAIT_CMD, 0, dev_wait},
|
||||||
|
@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
|
||||||
unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
|
unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
|
||||||
DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
|
DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
|
||||||
|
|
||||||
|
if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
|
||||||
|
dm_issue_global_event();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy the results back to userland.
|
* Copy the results back to userland.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
|
||||||
static sector_t rs_get_progress(struct raid_set *rs,
|
static sector_t rs_get_progress(struct raid_set *rs,
|
||||||
sector_t resync_max_sectors, bool *array_in_sync)
|
sector_t resync_max_sectors, bool *array_in_sync)
|
||||||
{
|
{
|
||||||
sector_t r, recovery_cp, curr_resync_completed;
|
sector_t r, curr_resync_completed;
|
||||||
struct mddev *mddev = &rs->md;
|
struct mddev *mddev = &rs->md;
|
||||||
|
|
||||||
curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
|
curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
|
||||||
recovery_cp = mddev->recovery_cp;
|
|
||||||
*array_in_sync = false;
|
*array_in_sync = false;
|
||||||
|
|
||||||
if (rs_is_raid0(rs)) {
|
if (rs_is_raid0(rs)) {
|
||||||
|
@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs,
|
||||||
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||||
r = curr_resync_completed;
|
r = curr_resync_completed;
|
||||||
else
|
else
|
||||||
r = recovery_cp;
|
r = mddev->recovery_cp;
|
||||||
|
|
||||||
if (r == MaxSector) {
|
if ((r == MaxSector) ||
|
||||||
|
(test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
|
||||||
|
(mddev->curr_resync_completed == resync_max_sectors))) {
|
||||||
/*
|
/*
|
||||||
* Sync complete.
|
* Sync complete.
|
||||||
*/
|
*/
|
||||||
|
@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti)
|
||||||
|
|
||||||
static struct target_type raid_target = {
|
static struct target_type raid_target = {
|
||||||
.name = "raid",
|
.name = "raid",
|
||||||
.version = {1, 12, 1},
|
.version = {1, 13, 0},
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.ctr = raid_ctr,
|
.ctr = raid_ctr,
|
||||||
.dtr = raid_dtr,
|
.dtr = raid_dtr,
|
||||||
|
|
|
@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
|
||||||
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
|
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
|
||||||
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
|
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
|
||||||
|
|
||||||
|
void dm_issue_global_event(void)
|
||||||
|
{
|
||||||
|
atomic_inc(&dm_global_event_nr);
|
||||||
|
wake_up(&dm_global_eventq);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* One of these is allocated per bio.
|
* One of these is allocated per bio.
|
||||||
*/
|
*/
|
||||||
|
@ -1865,9 +1871,8 @@ static void event_callback(void *context)
|
||||||
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
|
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
|
||||||
|
|
||||||
atomic_inc(&md->event_nr);
|
atomic_inc(&md->event_nr);
|
||||||
atomic_inc(&dm_global_event_nr);
|
|
||||||
wake_up(&md->eventq);
|
wake_up(&md->eventq);
|
||||||
wake_up(&dm_global_eventq);
|
dm_issue_global_event();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
|
||||||
}
|
}
|
||||||
|
|
||||||
map = __bind(md, table, &limits);
|
map = __bind(md, table, &limits);
|
||||||
|
dm_issue_global_event();
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&md->suspend_lock);
|
mutex_unlock(&md->suspend_lock);
|
||||||
|
|
|
@ -269,9 +269,9 @@ enum {
|
||||||
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
||||||
|
|
||||||
#define DM_VERSION_MAJOR 4
|
#define DM_VERSION_MAJOR 4
|
||||||
#define DM_VERSION_MINOR 36
|
#define DM_VERSION_MINOR 37
|
||||||
#define DM_VERSION_PATCHLEVEL 0
|
#define DM_VERSION_PATCHLEVEL 0
|
||||||
#define DM_VERSION_EXTRA "-ioctl (2017-06-09)"
|
#define DM_VERSION_EXTRA "-ioctl (2017-09-20)"
|
||||||
|
|
||||||
/* Status bits */
|
/* Status bits */
|
||||||
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче