staging: erofs: replace BUG_ON with DBG_BUGON in data.c
This patch replace BUG_ON with DBG_BUGON in data.c, and add necessary error handler. Signed-off-by: Chen Gong <gongchen4@huawei.com> Reviewed-by: Gao Xiang <gaoxiang25@huawei.com> Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
284db12cfd
Коммит
9141b60cf6
|
@ -25,7 +25,7 @@ static inline void read_endio(struct bio *bio)
|
|||
struct page *page = bvec->bv_page;
|
||||
|
||||
/* page is already locked */
|
||||
BUG_ON(PageUptodate(page));
|
||||
DBG_BUGON(PageUptodate(page));
|
||||
|
||||
if (unlikely(err))
|
||||
SetPageError(page);
|
||||
|
@ -110,12 +110,12 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
|
|||
struct erofs_map_blocks *map,
|
||||
int flags)
|
||||
{
|
||||
int err = 0;
|
||||
erofs_blk_t nblocks, lastblk;
|
||||
u64 offset = map->m_la;
|
||||
struct erofs_vnode *vi = EROFS_V(inode);
|
||||
|
||||
trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
|
||||
BUG_ON(is_inode_layout_compression(inode));
|
||||
|
||||
nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
|
||||
lastblk = nblocks - is_inode_layout_inline(inode);
|
||||
|
@ -142,18 +142,27 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
|
|||
map->m_plen = inode->i_size - offset;
|
||||
|
||||
/* inline data should locate in one meta block */
|
||||
BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
|
||||
if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
|
||||
DBG_BUGON(1);
|
||||
err = -EIO;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
map->m_flags |= EROFS_MAP_META;
|
||||
} else {
|
||||
errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
|
||||
vi->nid, inode->i_size, map->m_la);
|
||||
BUG();
|
||||
DBG_BUGON(1);
|
||||
err = -EIO;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
out:
|
||||
map->m_llen = map->m_plen;
|
||||
|
||||
err_out:
|
||||
trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EROFS_FS_ZIP
|
||||
|
@ -209,7 +218,7 @@ static inline struct bio *erofs_read_raw_page(
|
|||
erofs_off_t current_block = (erofs_off_t)page->index;
|
||||
int err;
|
||||
|
||||
BUG_ON(!nblocks);
|
||||
DBG_BUGON(!nblocks);
|
||||
|
||||
if (PageUptodate(page)) {
|
||||
err = 0;
|
||||
|
@ -252,7 +261,7 @@ submit_bio_retry:
|
|||
}
|
||||
|
||||
/* for RAW access mode, m_plen must be equal to m_llen */
|
||||
BUG_ON(map.m_plen != map.m_llen);
|
||||
DBG_BUGON(map.m_plen != map.m_llen);
|
||||
|
||||
blknr = erofs_blknr(map.m_pa);
|
||||
blkoff = erofs_blkoff(map.m_pa);
|
||||
|
@ -262,7 +271,7 @@ submit_bio_retry:
|
|||
void *vsrc, *vto;
|
||||
struct page *ipage;
|
||||
|
||||
BUG_ON(map.m_plen > PAGE_SIZE);
|
||||
DBG_BUGON(map.m_plen > PAGE_SIZE);
|
||||
|
||||
ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
|
||||
|
||||
|
@ -289,7 +298,7 @@ submit_bio_retry:
|
|||
}
|
||||
|
||||
/* pa must be block-aligned for raw reading */
|
||||
BUG_ON(erofs_blkoff(map.m_pa) != 0);
|
||||
DBG_BUGON(erofs_blkoff(map.m_pa));
|
||||
|
||||
/* max # of continuous pages */
|
||||
if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
|
||||
|
@ -357,7 +366,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page)
|
|||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */
|
||||
DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -395,7 +404,7 @@ static int erofs_raw_access_readpages(struct file *filp,
|
|||
/* pages could still be locked */
|
||||
put_page(page);
|
||||
}
|
||||
BUG_ON(!list_empty(pages));
|
||||
DBG_BUGON(!list_empty(pages));
|
||||
|
||||
/* the rare case (end in gaps) */
|
||||
if (unlikely(bio != NULL))
|
||||
|
|
Загрузка…
Ссылка в новой задаче