dm table: replace while loops with for loops

Also remove some unnecessary use of uninitialized_var().

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Mikulas Patocka 2017-04-18 16:51:46 -04:00 коммит произвёл Mike Snitzer
Родитель cc7e394024
Коммит 3c12016910
1 изменённых файлов: 32 добавлений и 31 удалений

Просмотреть файл

@ -373,7 +373,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
*/
dev_t dm_get_dev_t(const char *path)
{
dev_t uninitialized_var(dev);
dev_t dev;
struct block_device *bdev;
bdev = lookup_bdev(path);
@ -627,13 +627,13 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
struct dm_target *uninitialized_var(ti);
struct queue_limits ti_limits;
unsigned i = 0;
unsigned i;
/*
* Check each entry in the table in turn.
*/
while (i < dm_table_get_num_targets(table)) {
ti = dm_table_get_target(table, i++);
for (i = 0; i < dm_table_get_num_targets(table); i++) {
ti = dm_table_get_target(table, i);
blk_set_stacking_limits(&ti_limits);
@ -854,11 +854,11 @@ static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
static bool dm_table_supports_dax(struct dm_table *t)
{
struct dm_target *ti;
unsigned i = 0;
unsigned i;
/* Ensure that all targets support DAX. */
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->direct_access)
return false;
@ -1010,11 +1010,11 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
{
struct dm_target *uninitialized_var(ti);
unsigned i = 0;
struct dm_target *ti;
unsigned i;
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (dm_target_is_wildcard(ti->type))
return ti;
}
@ -1321,15 +1321,16 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
*/
bool dm_table_has_no_data_devices(struct dm_table *table)
{
struct dm_target *uninitialized_var(ti);
unsigned i = 0, num_devices = 0;
struct dm_target *ti;
unsigned i, num_devices;
while (i < dm_table_get_num_targets(table)) {
ti = dm_table_get_target(table, i++);
for (i = 0; i < dm_table_get_num_targets(table); i++) {
ti = dm_table_get_target(table, i);
if (!ti->type->iterate_devices)
return false;
num_devices = 0;
ti->type->iterate_devices(ti, count_device, &num_devices);
if (num_devices)
return false;
@ -1344,16 +1345,16 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits *limits)
{
struct dm_target *uninitialized_var(ti);
struct dm_target *ti;
struct queue_limits ti_limits;
unsigned i = 0;
unsigned i;
blk_set_stacking_limits(limits);
while (i < dm_table_get_num_targets(table)) {
for (i = 0; i < dm_table_get_num_targets(table); i++) {
blk_set_stacking_limits(&ti_limits);
ti = dm_table_get_target(table, i++);
ti = dm_table_get_target(table, i);
if (!ti->type->iterate_devices)
goto combine_limits;
@ -1435,7 +1436,7 @@ static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
{
struct dm_target *ti;
unsigned i = 0;
unsigned i;
/*
* Require at least one underlying device to support flushes.
@ -1443,8 +1444,8 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
* so we need to use iterate_devices here, which targets
* supporting flushes must provide.
*/
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_flush_bios)
continue;
@ -1504,10 +1505,10 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func)
{
struct dm_target *ti;
unsigned i = 0;
unsigned i;
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, func, NULL))
@ -1528,10 +1529,10 @@ static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *de
static bool dm_table_supports_write_same(struct dm_table *t)
{
struct dm_target *ti;
unsigned i = 0;
unsigned i;
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_write_same_bios)
return false;
@ -1555,7 +1556,7 @@ static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
static bool dm_table_supports_discards(struct dm_table *t)
{
struct dm_target *ti;
unsigned i = 0;
unsigned i;
/*
* Unless any target used by the table set discards_supported,
@ -1564,8 +1565,8 @@ static bool dm_table_supports_discards(struct dm_table *t)
* so we need to use iterate_devices here, which targets
* supporting discard selectively must provide.
*/
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_discard_bios)
continue;