PM: hibernate: Enforce ordering during image compression/decompression

commit 71cd7e80cfde548959952eac7063aeaea1f2e1c6 upstream.

An S4 (suspend to disk) test on the LoongArch 3A6000 platform sometimes
fails with the following error messaged in the dmesg log:

	Invalid LZO compressed length

That happens because when compressing/decompressing the image, the
synchronization between the control thread and the compress/decompress/crc
thread is based on a relaxed ordering interface, which is unreliable, and the
following situation may occur:

CPU 0					CPU 1
save_image_lzo				lzo_compress_threadfn
					  atomic_set(&d->stop, 1);
  atomic_read(&data[thr].stop)
  data[thr].cmp = data[thr].cmp_len;
	  				  WRITE data[thr].cmp_len

Then CPU0 gets a stale cmp_len and writes it to disk. During resume from S4,
wrong cmp_len is loaded.

To maintain data consistency between the two threads, use the acquire/release
variants of atomic set and read operations.

Fixes: 081a9d043c ("PM / Hibernate: Improve performance of LZO/plain hibernation, checksum image")
Cc: All applicable <stable@vger.kernel.org>
Signed-off-by: Hongchen Zhang <zhanghongchen@loongson.cn>
Co-developed-by: Weihao Li <liweihao@loongson.cn>
Signed-off-by: Weihao Li <liweihao@loongson.cn>
[ rjw: Subject rewrite and changelog edits ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Hongchen Zhang 2023-11-16 08:56:09 +08:00 коммит произвёл Greg Kroah-Hartman
Родитель a6fec6324f
Коммит 4d4bf19c81
1 изменённых файлов: 19 добавлений и 19 удалений

Просмотреть файл

@ -603,11 +603,11 @@ static int crc32_threadfn(void *data)
unsigned i;
while (1) {
wait_event(d->go, atomic_read(&d->ready) ||
wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
atomic_set(&d->stop, 1);
atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@ -616,7 +616,7 @@ static int crc32_threadfn(void *data)
for (i = 0; i < d->run_threads; i++)
*d->crc32 = crc32_le(*d->crc32,
d->unc[i], *d->unc_len[i]);
atomic_set(&d->stop, 1);
atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
@ -646,12 +646,12 @@ static int lzo_compress_threadfn(void *data)
struct cmp_data *d = data;
while (1) {
wait_event(d->go, atomic_read(&d->ready) ||
wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
atomic_set(&d->stop, 1);
atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@ -660,7 +660,7 @@ static int lzo_compress_threadfn(void *data)
d->ret = lzo1x_1_compress(d->unc, d->unc_len,
d->cmp + LZO_HEADER, &d->cmp_len,
d->wrk);
atomic_set(&d->stop, 1);
atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
@ -798,7 +798,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
data[thr].unc_len = off;
atomic_set(&data[thr].ready, 1);
atomic_set_release(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
@ -806,12 +806,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
break;
crc->run_threads = thr;
atomic_set(&crc->ready, 1);
atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
atomic_read(&data[thr].stop));
atomic_read_acquire(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
@ -850,7 +850,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
}
}
wait_event(crc->done, atomic_read(&crc->stop));
wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
}
@ -1132,12 +1132,12 @@ static int lzo_decompress_threadfn(void *data)
struct dec_data *d = data;
while (1) {
wait_event(d->go, atomic_read(&d->ready) ||
wait_event(d->go, atomic_read_acquire(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
atomic_set(&d->stop, 1);
atomic_set_release(&d->stop, 1);
wake_up(&d->done);
break;
}
@ -1150,7 +1150,7 @@ static int lzo_decompress_threadfn(void *data)
flush_icache_range((unsigned long)d->unc,
(unsigned long)d->unc + d->unc_len);
atomic_set(&d->stop, 1);
atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
return 0;
@ -1338,7 +1338,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
if (crc->run_threads) {
wait_event(crc->done, atomic_read(&crc->stop));
wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
crc->run_threads = 0;
}
@ -1374,7 +1374,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
pg = 0;
}
atomic_set(&data[thr].ready, 1);
atomic_set_release(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
@ -1393,7 +1393,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
atomic_read(&data[thr].stop));
atomic_read_acquire(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
@ -1424,7 +1424,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
ret = snapshot_write_next(snapshot);
if (ret <= 0) {
crc->run_threads = thr + 1;
atomic_set(&crc->ready, 1);
atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
goto out_finish;
}
@ -1432,13 +1432,13 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
crc->run_threads = thr;
atomic_set(&crc->ready, 1);
atomic_set_release(&crc->ready, 1);
wake_up(&crc->go);
}
out_finish:
if (crc->run_threads) {
wait_event(crc->done, atomic_read(&crc->stop));
wait_event(crc->done, atomic_read_acquire(&crc->stop));
atomic_set(&crc->stop, 0);
}
stop = ktime_get();