reiserfs: strip trailing whitespace
This patch strips trailing whitespace from the reiserfs code. Signed-off-by: Jeff Mahoney <jeffm@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
3cd6dbe6fe
Коммит
0222e6571c
|
@ -1,4 +1,4 @@
|
|||
[LICENSING]
|
||||
[LICENSING]
|
||||
|
||||
ReiserFS is hereby licensed under the GNU General
|
||||
Public License version 2.
|
||||
|
@ -31,7 +31,7 @@ the GPL as not allowing those additional licensing options, you read
|
|||
it wrongly, and Richard Stallman agrees with me, when carefully read
|
||||
you can see that those restrictions on additional terms do not apply
|
||||
to the owner of the copyright, and my interpretation of this shall
|
||||
govern for this license.
|
||||
govern for this license.
|
||||
|
||||
Finally, nothing in this license shall be interpreted to allow you to
|
||||
fail to fairly credit me, or to remove my credits, without my
|
||||
|
|
|
@ -76,21 +76,21 @@ inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
|
|||
#define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
|
||||
#define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
|
||||
|
||||
/* summary:
|
||||
/* summary:
|
||||
if deleting something ( tb->insert_size[0] < 0 )
|
||||
return(balance_leaf_when_delete()); (flag d handled here)
|
||||
else
|
||||
if lnum is larger than 0 we put items into the left node
|
||||
if rnum is larger than 0 we put items into the right node
|
||||
if snum1 is larger than 0 we put items into the new node s1
|
||||
if snum2 is larger than 0 we put items into the new node s2
|
||||
if snum2 is larger than 0 we put items into the new node s2
|
||||
Note that all *num* count new items being created.
|
||||
|
||||
It would be easier to read balance_leaf() if each of these summary
|
||||
lines was a separate procedure rather than being inlined. I think
|
||||
that there are many passages here and in balance_leaf_when_delete() in
|
||||
which two calls to one procedure can replace two passages, and it
|
||||
might save cache space and improve software maintenance costs to do so.
|
||||
might save cache space and improve software maintenance costs to do so.
|
||||
|
||||
Vladimir made the perceptive comment that we should offload most of
|
||||
the decision making in this function into fix_nodes/check_balance, and
|
||||
|
@ -288,15 +288,15 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
|
|||
)
|
||||
{
|
||||
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
|
||||
int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
|
||||
int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
|
||||
of the affected item */
|
||||
struct buffer_info bi;
|
||||
struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */
|
||||
int snum[2]; /* number of items that will be placed
|
||||
into S_new (includes partially shifted
|
||||
items) */
|
||||
int sbytes[2]; /* if an item is partially shifted into S_new then
|
||||
if it is a directory item
|
||||
int sbytes[2]; /* if an item is partially shifted into S_new then
|
||||
if it is a directory item
|
||||
it is the number of entries from the item that are shifted into S_new
|
||||
else
|
||||
it is the number of bytes from the item that are shifted into S_new
|
||||
|
@ -1983,7 +1983,7 @@ static inline void do_balance_starts(struct tree_balance *tb)
|
|||
/* store_print_tb (tb); */
|
||||
|
||||
/* do not delete, just comment it out */
|
||||
/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
|
||||
/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
|
||||
"check");*/
|
||||
RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
|
||||
#ifdef CONFIG_REISERFS_CHECK
|
||||
|
|
|
@ -20,14 +20,14 @@
|
|||
** insertion/balancing, for files that are written in one write.
|
||||
** It avoids unnecessary tail packings (balances) for files that are written in
|
||||
** multiple writes and are small enough to have tails.
|
||||
**
|
||||
**
|
||||
** file_release is called by the VFS layer when the file is closed. If
|
||||
** this is the last open file descriptor, and the file
|
||||
** small enough to have a tail, and the tail is currently in an
|
||||
** unformatted node, the tail is converted back into a direct item.
|
||||
**
|
||||
**
|
||||
** We use reiserfs_truncate_file to pack the tail, since it already has
|
||||
** all the conditions coded.
|
||||
** all the conditions coded.
|
||||
*/
|
||||
static int reiserfs_file_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
|
@ -223,7 +223,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
|
|||
}
|
||||
|
||||
/* Write @count bytes at position @ppos in a file indicated by @file
|
||||
from the buffer @buf.
|
||||
from the buffer @buf.
|
||||
|
||||
generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
|
||||
something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was
|
||||
|
|
|
@ -30,8 +30,8 @@
|
|||
** get_direct_parent
|
||||
** get_neighbors
|
||||
** fix_nodes
|
||||
**
|
||||
**
|
||||
**
|
||||
**
|
||||
**/
|
||||
|
||||
#include <linux/time.h>
|
||||
|
@ -377,9 +377,9 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h,
|
|||
int needed_nodes;
|
||||
int start_item, /* position of item we start filling node from */
|
||||
end_item, /* position of item we finish filling node by */
|
||||
start_bytes, /* number of first bytes (entries for directory) of start_item-th item
|
||||
start_bytes, /* number of first bytes (entries for directory) of start_item-th item
|
||||
we do not include into node that is being filled */
|
||||
end_bytes; /* number of last bytes (entries for directory) of end_item-th item
|
||||
end_bytes; /* number of last bytes (entries for directory) of end_item-th item
|
||||
we do node include into node that is being filled */
|
||||
int split_item_positions[2]; /* these are positions in virtual item of
|
||||
items, that are split between S[0] and
|
||||
|
@ -569,7 +569,7 @@ extern struct tree_balance *cur_tb;
|
|||
|
||||
/* Set parameters for balancing.
|
||||
* Performs write of results of analysis of balancing into structure tb,
|
||||
* where it will later be used by the functions that actually do the balancing.
|
||||
* where it will later be used by the functions that actually do the balancing.
|
||||
* Parameters:
|
||||
* tb tree_balance structure;
|
||||
* h current level of the node;
|
||||
|
@ -1204,7 +1204,7 @@ static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
|
|||
* h current level of the node;
|
||||
* inum item number in S[h];
|
||||
* mode i - insert, p - paste;
|
||||
* Returns: 1 - schedule occurred;
|
||||
* Returns: 1 - schedule occurred;
|
||||
* 0 - balancing for higher levels needed;
|
||||
* -1 - no balancing for higher levels needed;
|
||||
* -2 - no disk space.
|
||||
|
@ -1239,7 +1239,7 @@ static int ip_check_balance(struct tree_balance *tb, int h)
|
|||
/* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
|
||||
where 4th parameter is s1bytes and 5th - s2bytes
|
||||
*/
|
||||
short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
|
||||
short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
|
||||
0,1 - do not shift and do not shift but bottle
|
||||
2 - shift only whole item to left
|
||||
3 - shift to left and bottle as much as possible
|
||||
|
@ -1288,7 +1288,7 @@ static int ip_check_balance(struct tree_balance *tb, int h)
|
|||
|
||||
create_virtual_node(tb, h);
|
||||
|
||||
/*
|
||||
/*
|
||||
determine maximal number of items we can shift to the left neighbor (in tb structure)
|
||||
and the maximal number of bytes that can flow to the left neighbor
|
||||
from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
|
||||
|
@ -1349,13 +1349,13 @@ static int ip_check_balance(struct tree_balance *tb, int h)
|
|||
|
||||
{
|
||||
int lpar, rpar, nset, lset, rset, lrset;
|
||||
/*
|
||||
/*
|
||||
* regular overflowing of the node
|
||||
*/
|
||||
|
||||
/* get_num_ver works in 2 modes (FLOW & NO_FLOW)
|
||||
/* get_num_ver works in 2 modes (FLOW & NO_FLOW)
|
||||
lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
|
||||
nset, lset, rset, lrset - shows, whether flowing items give better packing
|
||||
nset, lset, rset, lrset - shows, whether flowing items give better packing
|
||||
*/
|
||||
#define FLOW 1
|
||||
#define NO_FLOW 0 /* do not any splitting */
|
||||
|
@ -1545,7 +1545,7 @@ static int ip_check_balance(struct tree_balance *tb, int h)
|
|||
* h current level of the node;
|
||||
* inum item number in S[h];
|
||||
* mode i - insert, p - paste;
|
||||
* Returns: 1 - schedule occurred;
|
||||
* Returns: 1 - schedule occurred;
|
||||
* 0 - balancing for higher levels needed;
|
||||
* -1 - no balancing for higher levels needed;
|
||||
* -2 - no disk space.
|
||||
|
@ -1728,7 +1728,7 @@ static int dc_check_balance_internal(struct tree_balance *tb, int h)
|
|||
* h current level of the node;
|
||||
* inum item number in S[h];
|
||||
* mode i - insert, p - paste;
|
||||
* Returns: 1 - schedule occurred;
|
||||
* Returns: 1 - schedule occurred;
|
||||
* 0 - balancing for higher levels needed;
|
||||
* -1 - no balancing for higher levels needed;
|
||||
* -2 - no disk space.
|
||||
|
@ -1822,7 +1822,7 @@ static int dc_check_balance_leaf(struct tree_balance *tb, int h)
|
|||
* h current level of the node;
|
||||
* inum item number in S[h];
|
||||
* mode d - delete, c - cut.
|
||||
* Returns: 1 - schedule occurred;
|
||||
* Returns: 1 - schedule occurred;
|
||||
* 0 - balancing for higher levels needed;
|
||||
* -1 - no balancing for higher levels needed;
|
||||
* -2 - no disk space.
|
||||
|
@ -1851,7 +1851,7 @@ static int dc_check_balance(struct tree_balance *tb, int h)
|
|||
* h current level of the node;
|
||||
* inum item number in S[h];
|
||||
* mode i - insert, p - paste, d - delete, c - cut.
|
||||
* Returns: 1 - schedule occurred;
|
||||
* Returns: 1 - schedule occurred;
|
||||
* 0 - balancing for higher levels needed;
|
||||
* -1 - no balancing for higher levels needed;
|
||||
* -2 - no disk space.
|
||||
|
@ -2296,15 +2296,15 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb)
|
|||
* analyze what and where should be moved;
|
||||
* get sufficient number of new nodes;
|
||||
* Balancing will start only after all resources will be collected at a time.
|
||||
*
|
||||
*
|
||||
* When ported to SMP kernels, only at the last moment after all needed nodes
|
||||
* are collected in cache, will the resources be locked using the usual
|
||||
* textbook ordered lock acquisition algorithms. Note that ensuring that
|
||||
* this code neither write locks what it does not need to write lock nor locks out of order
|
||||
* will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
|
||||
*
|
||||
*
|
||||
* fix is meant in the sense of render unchanging
|
||||
*
|
||||
*
|
||||
* Latency might be improved by first gathering a list of what buffers are needed
|
||||
* and then getting as many of them in parallel as possible? -Hans
|
||||
*
|
||||
|
@ -2316,7 +2316,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb)
|
|||
* ins_ih & ins_sd are used when inserting
|
||||
* Returns: 1 - schedule occurred while the function worked;
|
||||
* 0 - schedule didn't occur while the function worked;
|
||||
* -1 - if no_disk_space
|
||||
* -1 - if no_disk_space
|
||||
*/
|
||||
|
||||
int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
* (see Applied Cryptography, 2nd edition, p448).
|
||||
*
|
||||
* Jeremy Fitzhardinge <jeremy@zip.com.au> 1998
|
||||
*
|
||||
*
|
||||
* Jeremy has agreed to the contents of reiserfs/README. -Hans
|
||||
* Yura's function is added (04/07/2000)
|
||||
*/
|
||||
|
|
|
@ -278,7 +278,7 @@ static void internal_delete_childs(struct buffer_info *cur_bi, int from, int n)
|
|||
|
||||
/* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
|
||||
* last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
|
||||
* last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
|
||||
* last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
|
||||
*/
|
||||
static void internal_copy_pointers_items(struct buffer_info *dest_bi,
|
||||
struct buffer_head *src,
|
||||
|
@ -385,7 +385,7 @@ static void internal_move_pointers_items(struct buffer_info *dest_bi,
|
|||
if (last_first == FIRST_TO_LAST) { /* shift_left occurs */
|
||||
first_pointer = 0;
|
||||
first_item = 0;
|
||||
/* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
|
||||
/* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
|
||||
for key - with first_item */
|
||||
internal_delete_pointers_items(src_bi, first_pointer,
|
||||
first_item, cpy_num - del_par);
|
||||
|
@ -453,7 +453,7 @@ static void internal_insert_key(struct buffer_info *dest_bi, int dest_position_b
|
|||
}
|
||||
}
|
||||
|
||||
/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
|
||||
/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
|
||||
* Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
|
||||
* Replace d_key'th key in buffer cfl.
|
||||
* Delete pointer_amount items and node pointers from buffer src.
|
||||
|
@ -518,7 +518,7 @@ static void internal_shift1_left(struct tree_balance *tb,
|
|||
/* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */
|
||||
}
|
||||
|
||||
/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
|
||||
/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
|
||||
* Copy n node pointers and n - 1 items from buffer src to buffer dest.
|
||||
* Replace d_key'th key in buffer cfr.
|
||||
* Delete n items and node pointers from buffer src.
|
||||
|
@ -749,7 +749,7 @@ int balance_internal(struct tree_balance *tb, /* tree_balance structure
|
|||
this means that new pointers and items must be inserted AFTER *
|
||||
child_pos
|
||||
}
|
||||
else
|
||||
else
|
||||
{
|
||||
it is the position of the leftmost pointer that must be deleted (together with
|
||||
its corresponding key to the left of the pointer)
|
||||
|
|
|
@ -52,7 +52,7 @@ void reiserfs_delete_inode(struct inode *inode)
|
|||
/* Do quota update inside a transaction for journaled quotas. We must do that
|
||||
* after delete_object so that quota updates go into the same transaction as
|
||||
* stat data deletion */
|
||||
if (!err)
|
||||
if (!err)
|
||||
DQUOT_FREE_INODE(inode);
|
||||
|
||||
if (journal_end(&th, inode->i_sb, jbegin_count))
|
||||
|
@ -363,7 +363,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block,
|
|||
}
|
||||
/* make sure we don't read more bytes than actually exist in
|
||||
** the file. This can happen in odd cases where i_size isn't
|
||||
** correct, and when direct item padding results in a few
|
||||
** correct, and when direct item padding results in a few
|
||||
** extra bytes at the end of the direct item
|
||||
*/
|
||||
if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
|
||||
|
@ -438,15 +438,15 @@ static int reiserfs_bmap(struct inode *inode, sector_t block,
|
|||
** -ENOENT instead of a valid buffer. block_prepare_write expects to
|
||||
** be able to do i/o on the buffers returned, unless an error value
|
||||
** is also returned.
|
||||
**
|
||||
**
|
||||
** So, this allows block_prepare_write to be used for reading a single block
|
||||
** in a page. Where it does not produce a valid page for holes, or past the
|
||||
** end of the file. This turns out to be exactly what we need for reading
|
||||
** tails for conversion.
|
||||
**
|
||||
** The point of the wrapper is forcing a certain value for create, even
|
||||
** though the VFS layer is calling this function with create==1. If you
|
||||
** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
|
||||
** though the VFS layer is calling this function with create==1. If you
|
||||
** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
|
||||
** don't use this function.
|
||||
*/
|
||||
static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
|
||||
|
@ -602,7 +602,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
|
|||
int done;
|
||||
int fs_gen;
|
||||
struct reiserfs_transaction_handle *th = NULL;
|
||||
/* space reserved in transaction batch:
|
||||
/* space reserved in transaction batch:
|
||||
. 3 balancings in direct->indirect conversion
|
||||
. 1 block involved into reiserfs_update_sd()
|
||||
XXX in practically impossible worst case direct2indirect()
|
||||
|
@ -754,7 +754,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
|
|||
reiserfs_write_unlock(inode->i_sb);
|
||||
|
||||
/* the item was found, so new blocks were not added to the file
|
||||
** there is no need to make sure the inode is updated with this
|
||||
** there is no need to make sure the inode is updated with this
|
||||
** transaction
|
||||
*/
|
||||
return retval;
|
||||
|
@ -986,7 +986,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
|
|||
|
||||
/* this loop could log more blocks than we had originally asked
|
||||
** for. So, we have to allow the transaction to end if it is
|
||||
** too big or too full. Update the inode so things are
|
||||
** too big or too full. Update the inode so things are
|
||||
** consistent if we crash before the function returns
|
||||
**
|
||||
** release the path so that anybody waiting on the path before
|
||||
|
@ -997,7 +997,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
|
|||
if (retval)
|
||||
goto failure;
|
||||
}
|
||||
/* inserting indirect pointers for a hole can take a
|
||||
/* inserting indirect pointers for a hole can take a
|
||||
** long time. reschedule if needed
|
||||
*/
|
||||
cond_resched();
|
||||
|
@ -1444,7 +1444,7 @@ void reiserfs_read_locked_inode(struct inode *inode,
|
|||
update sd on unlink all that is required is to check for nlink
|
||||
here. This bug was first found by Sizif when debugging
|
||||
SquidNG/Butterfly, forgotten, and found again after Philippe
|
||||
Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
|
||||
Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
|
||||
|
||||
More logical fix would require changes in fs/inode.c:iput() to
|
||||
remove inode from hash-table _after_ fs cleaned disk stuff up and
|
||||
|
@ -1619,7 +1619,7 @@ int reiserfs_write_inode(struct inode *inode, int do_sync)
|
|||
if (inode->i_sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
/* memory pressure can sometimes initiate write_inode calls with sync == 1,
|
||||
** these cases are just when the system needs ram, not when the
|
||||
** these cases are just when the system needs ram, not when the
|
||||
** inode needs to reach disk for safety, and they can safely be
|
||||
** ignored because the altered inode has already been logged.
|
||||
*/
|
||||
|
@ -1736,7 +1736,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i
|
|||
/* inserts the stat data into the tree, and then calls
|
||||
reiserfs_new_directory (to insert ".", ".." item if new object is
|
||||
directory) or reiserfs_new_symlink (to insert symlink body if new
|
||||
object is symlink) or nothing (if new object is regular file)
|
||||
object is symlink) or nothing (if new object is regular file)
|
||||
|
||||
NOTE! uid and gid must already be set in the inode. If we return
|
||||
non-zero due to an error, we have to drop the quota previously allocated
|
||||
|
@ -1744,7 +1744,7 @@ static int reiserfs_new_symlink(struct reiserfs_transaction_handle *th, struct i
|
|||
if we return non-zero, we also end the transaction. */
|
||||
int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
|
||||
struct inode *dir, int mode, const char *symname,
|
||||
/* 0 for regular, EMTRY_DIR_SIZE for dirs,
|
||||
/* 0 for regular, EMTRY_DIR_SIZE for dirs,
|
||||
strlen (symname) for symlinks) */
|
||||
loff_t i_size, struct dentry *dentry,
|
||||
struct inode *inode,
|
||||
|
@ -1794,7 +1794,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
|
|||
goto out_bad_inode;
|
||||
}
|
||||
if (old_format_only(sb))
|
||||
/* not a perfect generation count, as object ids can be reused, but
|
||||
/* not a perfect generation count, as object ids can be reused, but
|
||||
** this is as good as reiserfs can do right now.
|
||||
** note that the private part of inode isn't filled in yet, we have
|
||||
** to use the directory.
|
||||
|
@ -2081,7 +2081,7 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
|
|||
|
||||
if (p_s_inode->i_size > 0) {
|
||||
if ((error = grab_tail_page(p_s_inode, &page, &bh))) {
|
||||
// -ENOENT means we truncated past the end of the file,
|
||||
// -ENOENT means we truncated past the end of the file,
|
||||
// and get_block_create_0 could not find a block to read in,
|
||||
// which is ok.
|
||||
if (error != -ENOENT)
|
||||
|
@ -2093,11 +2093,11 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
|
|||
}
|
||||
}
|
||||
|
||||
/* so, if page != NULL, we have a buffer head for the offset at
|
||||
** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
|
||||
** then we have an unformatted node. Otherwise, we have a direct item,
|
||||
** and no zeroing is required on disk. We zero after the truncate,
|
||||
** because the truncate might pack the item anyway
|
||||
/* so, if page != NULL, we have a buffer head for the offset at
|
||||
** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
|
||||
** then we have an unformatted node. Otherwise, we have a direct item,
|
||||
** and no zeroing is required on disk. We zero after the truncate,
|
||||
** because the truncate might pack the item anyway
|
||||
** (it will unmap bh if it packs).
|
||||
*/
|
||||
/* it is enough to reserve space in transaction for 2 balancings:
|
||||
|
@ -2306,8 +2306,8 @@ static int map_block_for_writepage(struct inode *inode,
|
|||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* mason@suse.com: updated in 2.5.54 to follow the same general io
|
||||
/*
|
||||
* mason@suse.com: updated in 2.5.54 to follow the same general io
|
||||
* start/recovery path as __block_write_full_page, along with special
|
||||
* code to handle reiserfs tails.
|
||||
*/
|
||||
|
@ -2447,7 +2447,7 @@ static int reiserfs_write_full_page(struct page *page,
|
|||
unlock_page(page);
|
||||
|
||||
/*
|
||||
* since any buffer might be the only dirty buffer on the page,
|
||||
* since any buffer might be the only dirty buffer on the page,
|
||||
* the first submit_bh can bring the page out of writeback.
|
||||
* be careful with the buffers.
|
||||
*/
|
||||
|
@ -2466,8 +2466,8 @@ static int reiserfs_write_full_page(struct page *page,
|
|||
if (nr == 0) {
|
||||
/*
|
||||
* if this page only had a direct item, it is very possible for
|
||||
* no io to be required without there being an error. Or,
|
||||
* someone else could have locked them and sent them down the
|
||||
* no io to be required without there being an error. Or,
|
||||
* someone else could have locked them and sent them down the
|
||||
* pipe without locking the page
|
||||
*/
|
||||
bh = head;
|
||||
|
@ -2486,7 +2486,7 @@ static int reiserfs_write_full_page(struct page *page,
|
|||
|
||||
fail:
|
||||
/* catches various errors, we need to make sure any valid dirty blocks
|
||||
* get to the media. The page is currently locked and not marked for
|
||||
* get to the media. The page is currently locked and not marked for
|
||||
* writeback
|
||||
*/
|
||||
ClearPageUptodate(page);
|
||||
|
|
|
@ -189,7 +189,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
|
|||
}
|
||||
|
||||
/* we unpack by finding the page with the tail, and calling
|
||||
** reiserfs_prepare_write on that page. This will force a
|
||||
** reiserfs_prepare_write on that page. This will force a
|
||||
** reiserfs_get_block to unpack the tail for us.
|
||||
*/
|
||||
index = inode->i_size >> PAGE_CACHE_SHIFT;
|
||||
|
|
|
@ -1,36 +1,36 @@
|
|||
/*
|
||||
** Write ahead logging implementation copyright Chris Mason 2000
|
||||
**
|
||||
** The background commits make this code very interelated, and
|
||||
** The background commits make this code very interelated, and
|
||||
** overly complex. I need to rethink things a bit....The major players:
|
||||
**
|
||||
** journal_begin -- call with the number of blocks you expect to log.
|
||||
** journal_begin -- call with the number of blocks you expect to log.
|
||||
** If the current transaction is too
|
||||
** old, it will block until the current transaction is
|
||||
** old, it will block until the current transaction is
|
||||
** finished, and then start a new one.
|
||||
** Usually, your transaction will get joined in with
|
||||
** Usually, your transaction will get joined in with
|
||||
** previous ones for speed.
|
||||
**
|
||||
** journal_join -- same as journal_begin, but won't block on the current
|
||||
** journal_join -- same as journal_begin, but won't block on the current
|
||||
** transaction regardless of age. Don't ever call
|
||||
** this. Ever. There are only two places it should be
|
||||
** this. Ever. There are only two places it should be
|
||||
** called from, and they are both inside this file.
|
||||
**
|
||||
** journal_mark_dirty -- adds blocks into this transaction. clears any flags
|
||||
** journal_mark_dirty -- adds blocks into this transaction. clears any flags
|
||||
** that might make them get sent to disk
|
||||
** and then marks them BH_JDirty. Puts the buffer head
|
||||
** into the current transaction hash.
|
||||
** and then marks them BH_JDirty. Puts the buffer head
|
||||
** into the current transaction hash.
|
||||
**
|
||||
** journal_end -- if the current transaction is batchable, it does nothing
|
||||
** otherwise, it could do an async/synchronous commit, or
|
||||
** a full flush of all log and real blocks in the
|
||||
** a full flush of all log and real blocks in the
|
||||
** transaction.
|
||||
**
|
||||
** flush_old_commits -- if the current transaction is too old, it is ended and
|
||||
** commit blocks are sent to disk. Forces commit blocks
|
||||
** to disk for all backgrounded commits that have been
|
||||
** flush_old_commits -- if the current transaction is too old, it is ended and
|
||||
** commit blocks are sent to disk. Forces commit blocks
|
||||
** to disk for all backgrounded commits that have been
|
||||
** around too long.
|
||||
** -- Note, if you call this as an immediate flush from
|
||||
** -- Note, if you call this as an immediate flush from
|
||||
** from within kupdate, it will ignore the immediate flag
|
||||
*/
|
||||
|
||||
|
@ -212,7 +212,7 @@ static void allocate_bitmap_nodes(struct super_block *p_s_sb)
|
|||
list_add(&bn->list, &journal->j_bitmap_nodes);
|
||||
journal->j_free_bitmap_nodes++;
|
||||
} else {
|
||||
break; // this is ok, we'll try again when more are needed
|
||||
break; /* this is ok, we'll try again when more are needed */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -283,7 +283,7 @@ static int free_bitmap_nodes(struct super_block *p_s_sb)
|
|||
}
|
||||
|
||||
/*
|
||||
** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
|
||||
** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
|
||||
** jb_array is the array to be filled in.
|
||||
*/
|
||||
int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
|
||||
|
@ -315,7 +315,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
|
|||
}
|
||||
|
||||
/*
|
||||
** find an available list bitmap. If you can't find one, flush a commit list
|
||||
** find an available list bitmap. If you can't find one, flush a commit list
|
||||
** and try again
|
||||
*/
|
||||
static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
|
||||
|
@ -348,7 +348,7 @@ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
|
|||
return jb;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
** allocates a new chunk of X nodes, and links them all together as a list.
|
||||
** Uses the cnode->next and cnode->prev pointers
|
||||
** returns NULL on failure
|
||||
|
@ -376,7 +376,7 @@ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
|
|||
}
|
||||
|
||||
/*
|
||||
** pulls a cnode off the free list, or returns NULL on failure
|
||||
** pulls a cnode off the free list, or returns NULL on failure
|
||||
*/
|
||||
static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
|
||||
{
|
||||
|
@ -403,7 +403,7 @@ static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
|
|||
}
|
||||
|
||||
/*
|
||||
** returns a cnode to the free list
|
||||
** returns a cnode to the free list
|
||||
*/
|
||||
static void free_cnode(struct super_block *p_s_sb,
|
||||
struct reiserfs_journal_cnode *cn)
|
||||
|
@ -1192,8 +1192,8 @@ static int flush_commit_list(struct super_block *s,
|
|||
}
|
||||
|
||||
/*
|
||||
** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
|
||||
** returns NULL if it can't find anything
|
||||
** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
|
||||
** returns NULL if it can't find anything
|
||||
*/
|
||||
static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
|
||||
reiserfs_journal_cnode
|
||||
|
@ -1335,8 +1335,8 @@ static int update_journal_header_block(struct super_block *p_s_sb,
|
|||
return _update_journal_header_block(p_s_sb, offset, trans_id);
|
||||
}
|
||||
|
||||
/*
|
||||
** flush any and all journal lists older than you are
|
||||
/*
|
||||
** flush any and all journal lists older than you are
|
||||
** can only be called from flush_journal_list
|
||||
*/
|
||||
static int flush_older_journal_lists(struct super_block *p_s_sb,
|
||||
|
@ -1382,8 +1382,8 @@ static void del_from_work_list(struct super_block *s,
|
|||
** always set flushall to 1, unless you are calling from inside
|
||||
** flush_journal_list
|
||||
**
|
||||
** IMPORTANT. This can only be called while there are no journal writers,
|
||||
** and the journal is locked. That means it can only be called from
|
||||
** IMPORTANT. This can only be called while there are no journal writers,
|
||||
** and the journal is locked. That means it can only be called from
|
||||
** do_journal_end, or by journal_release
|
||||
*/
|
||||
static int flush_journal_list(struct super_block *s,
|
||||
|
@ -1429,7 +1429,7 @@ static int flush_journal_list(struct super_block *s,
|
|||
goto flush_older_and_return;
|
||||
}
|
||||
|
||||
/* start by putting the commit list on disk. This will also flush
|
||||
/* start by putting the commit list on disk. This will also flush
|
||||
** the commit lists of any olders transactions
|
||||
*/
|
||||
flush_commit_list(s, jl, 1);
|
||||
|
@ -1444,8 +1444,8 @@ static int flush_journal_list(struct super_block *s,
|
|||
goto flush_older_and_return;
|
||||
}
|
||||
|
||||
/* loop through each cnode, see if we need to write it,
|
||||
** or wait on a more recent transaction, or just ignore it
|
||||
/* loop through each cnode, see if we need to write it,
|
||||
** or wait on a more recent transaction, or just ignore it
|
||||
*/
|
||||
if (atomic_read(&(journal->j_wcount)) != 0) {
|
||||
reiserfs_panic(s, "journal-844", "journal list is flushing, "
|
||||
|
@ -1473,8 +1473,8 @@ static int flush_journal_list(struct super_block *s,
|
|||
if (!pjl && cn->bh) {
|
||||
saved_bh = cn->bh;
|
||||
|
||||
/* we do this to make sure nobody releases the buffer while
|
||||
** we are working with it
|
||||
/* we do this to make sure nobody releases the buffer while
|
||||
** we are working with it
|
||||
*/
|
||||
get_bh(saved_bh);
|
||||
|
||||
|
@ -1497,8 +1497,8 @@ static int flush_journal_list(struct super_block *s,
|
|||
goto free_cnode;
|
||||
}
|
||||
|
||||
/* bh == NULL when the block got to disk on its own, OR,
|
||||
** the block got freed in a future transaction
|
||||
/* bh == NULL when the block got to disk on its own, OR,
|
||||
** the block got freed in a future transaction
|
||||
*/
|
||||
if (saved_bh == NULL) {
|
||||
goto free_cnode;
|
||||
|
@ -1586,7 +1586,7 @@ static int flush_journal_list(struct super_block *s,
|
|||
__func__);
|
||||
flush_older_and_return:
|
||||
|
||||
/* before we can update the journal header block, we _must_ flush all
|
||||
/* before we can update the journal header block, we _must_ flush all
|
||||
** real blocks from all older transactions to disk. This is because
|
||||
** once the header block is updated, this transaction will not be
|
||||
** replayed after a crash
|
||||
|
@ -1596,7 +1596,7 @@ static int flush_journal_list(struct super_block *s,
|
|||
}
|
||||
|
||||
err = journal->j_errno;
|
||||
/* before we can remove everything from the hash tables for this
|
||||
/* before we can remove everything from the hash tables for this
|
||||
** transaction, we must make sure it can never be replayed
|
||||
**
|
||||
** since we are only called from do_journal_end, we know for sure there
|
||||
|
@ -2016,9 +2016,9 @@ static int journal_compare_desc_commit(struct super_block *p_s_sb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* returns 0 if it did not find a description block
|
||||
/* returns 0 if it did not find a description block
|
||||
** returns -1 if it found a corrupt commit block
|
||||
** returns 1 if both desc and commit were valid
|
||||
** returns 1 if both desc and commit were valid
|
||||
*/
|
||||
static int journal_transaction_is_valid(struct super_block *p_s_sb,
|
||||
struct buffer_head *d_bh,
|
||||
|
@ -2380,8 +2380,8 @@ static int journal_read(struct super_block *p_s_sb)
|
|||
bdevname(journal->j_dev_bd, b));
|
||||
start = get_seconds();
|
||||
|
||||
/* step 1, read in the journal header block. Check the transaction it says
|
||||
** is the first unflushed, and if that transaction is not valid,
|
||||
/* step 1, read in the journal header block. Check the transaction it says
|
||||
** is the first unflushed, and if that transaction is not valid,
|
||||
** replay is done
|
||||
*/
|
||||
journal->j_header_bh = journal_bread(p_s_sb,
|
||||
|
@ -2406,8 +2406,8 @@ static int journal_read(struct super_block *p_s_sb)
|
|||
le32_to_cpu(jh->j_last_flush_trans_id));
|
||||
valid_journal_header = 1;
|
||||
|
||||
/* now, we try to read the first unflushed offset. If it is not valid,
|
||||
** there is nothing more we can do, and it makes no sense to read
|
||||
/* now, we try to read the first unflushed offset. If it is not valid,
|
||||
** there is nothing more we can do, and it makes no sense to read
|
||||
** through the whole log.
|
||||
*/
|
||||
d_bh =
|
||||
|
@ -2919,7 +2919,7 @@ int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* this must be called inside a transaction, and requires the
|
||||
/* this must be called inside a transaction, and requires the
|
||||
** kernel_lock to be held
|
||||
*/
|
||||
void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
|
||||
|
@ -3040,7 +3040,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
|
|||
now = get_seconds();
|
||||
|
||||
/* if there is no room in the journal OR
|
||||
** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
|
||||
** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
|
||||
** we don't sleep if there aren't other writers
|
||||
*/
|
||||
|
||||
|
@ -3240,7 +3240,7 @@ int journal_begin(struct reiserfs_transaction_handle *th,
|
|||
**
|
||||
** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
|
||||
** transaction is committed.
|
||||
**
|
||||
**
|
||||
** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
|
||||
*/
|
||||
int journal_mark_dirty(struct reiserfs_transaction_handle *th,
|
||||
|
@ -3290,7 +3290,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
|
|||
atomic_read(&(journal->j_wcount)));
|
||||
return 1;
|
||||
}
|
||||
/* this error means I've screwed up, and we've overflowed the transaction.
|
||||
/* this error means I've screwed up, and we've overflowed the transaction.
|
||||
** Nothing can be done here, except make the FS readonly or panic.
|
||||
*/
|
||||
if (journal->j_len >= journal->j_trans_max) {
|
||||
|
@ -3380,7 +3380,7 @@ int journal_end(struct reiserfs_transaction_handle *th,
|
|||
}
|
||||
}
|
||||
|
||||
/* removes from the current transaction, relsing and descrementing any counters.
|
||||
/* removes from the current transaction, relsing and descrementing any counters.
|
||||
** also files the removed buffer directly onto the clean list
|
||||
**
|
||||
** called by journal_mark_freed when a block has been deleted
|
||||
|
@ -3478,7 +3478,7 @@ static int can_dirty(struct reiserfs_journal_cnode *cn)
|
|||
}
|
||||
|
||||
/* syncs the commit blocks, but does not force the real buffers to disk
|
||||
** will wait until the current transaction is done/committed before returning
|
||||
** will wait until the current transaction is done/committed before returning
|
||||
*/
|
||||
int journal_end_sync(struct reiserfs_transaction_handle *th,
|
||||
struct super_block *p_s_sb, unsigned long nblocks)
|
||||
|
@ -3560,13 +3560,13 @@ int reiserfs_flush_old_commits(struct super_block *p_s_sb)
|
|||
|
||||
/*
|
||||
** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
|
||||
**
|
||||
** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
|
||||
**
|
||||
** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
|
||||
** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
|
||||
** flushes the commit list and returns 0.
|
||||
**
|
||||
** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
|
||||
**
|
||||
**
|
||||
** Note, we can't allow the journal_end to proceed while there are still writers in the log.
|
||||
*/
|
||||
static int check_journal_end(struct reiserfs_transaction_handle *th,
|
||||
|
@ -3594,7 +3594,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
|
|||
atomic_dec(&(journal->j_wcount));
|
||||
}
|
||||
|
||||
/* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
|
||||
/* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
|
||||
** will be dealt with by next transaction that actually writes something, but should be taken
|
||||
** care of in this trans
|
||||
*/
|
||||
|
@ -3603,7 +3603,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
|
|||
/* if wcount > 0, and we are called to with flush or commit_now,
|
||||
** we wait on j_join_wait. We will wake up when the last writer has
|
||||
** finished the transaction, and started it on its way to the disk.
|
||||
** Then, we flush the commit or journal list, and just return 0
|
||||
** Then, we flush the commit or journal list, and just return 0
|
||||
** because the rest of journal end was already done for this transaction.
|
||||
*/
|
||||
if (atomic_read(&(journal->j_wcount)) > 0) {
|
||||
|
@ -3674,7 +3674,7 @@ static int check_journal_end(struct reiserfs_transaction_handle *th,
|
|||
/*
|
||||
** Does all the work that makes deleting blocks safe.
|
||||
** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
|
||||
**
|
||||
**
|
||||
** otherwise:
|
||||
** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
|
||||
** before this transaction has finished.
|
||||
|
@ -3878,7 +3878,7 @@ extern struct tree_balance *cur_tb;
|
|||
** be written to disk while we are altering it. So, we must:
|
||||
** clean it
|
||||
** wait on it.
|
||||
**
|
||||
**
|
||||
*/
|
||||
int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
|
||||
struct buffer_head *bh, int wait)
|
||||
|
@ -3920,7 +3920,7 @@ static void flush_old_journal_lists(struct super_block *s)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
** long and ugly. If flush, will not return until all commit
|
||||
** blocks and all real buffers in the trans are on disk.
|
||||
** If no_async, won't return until all commit blocks are on disk.
|
||||
|
@ -3981,7 +3981,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
|
|||
wait_on_commit = 1;
|
||||
}
|
||||
|
||||
/* check_journal_end locks the journal, and unlocks if it does not return 1
|
||||
/* check_journal_end locks the journal, and unlocks if it does not return 1
|
||||
** it tells us if we should continue with the journal_end, or just return
|
||||
*/
|
||||
if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
|
||||
|
@ -4078,7 +4078,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
|
|||
last_cn->next = jl_cn;
|
||||
}
|
||||
last_cn = jl_cn;
|
||||
/* make sure the block we are trying to log is not a block
|
||||
/* make sure the block we are trying to log is not a block
|
||||
of journal or reserved area */
|
||||
|
||||
if (is_block_in_log_or_reserved_area
|
||||
|
@ -4225,9 +4225,9 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
|
|||
} else if (!(jl->j_state & LIST_COMMIT_PENDING))
|
||||
queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
|
||||
|
||||
/* if the next transaction has any chance of wrapping, flush
|
||||
** transactions that might get overwritten. If any journal lists are very
|
||||
** old flush them as well.
|
||||
/* if the next transaction has any chance of wrapping, flush
|
||||
** transactions that might get overwritten. If any journal lists are very
|
||||
** old flush them as well.
|
||||
*/
|
||||
first_jl:
|
||||
list_for_each_safe(entry, safe, &journal->j_journal_list) {
|
||||
|
|
|
@ -119,8 +119,8 @@ static void leaf_copy_dir_entries(struct buffer_info *dest_bi,
|
|||
DEH_SIZE * copy_count + copy_records_len);
|
||||
}
|
||||
|
||||
/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
|
||||
part of it or nothing (see the return 0 below) from SOURCE to the end
|
||||
/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
|
||||
part of it or nothing (see the return 0 below) from SOURCE to the end
|
||||
(if last_first) or beginning (!last_first) of the DEST */
|
||||
/* returns 1 if anything was copied, else 0 */
|
||||
static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
|
||||
|
@ -396,7 +396,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
|
|||
else {
|
||||
struct item_head n_ih;
|
||||
|
||||
/* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
|
||||
/* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
|
||||
part defined by 'cpy_bytes'; create new item header; change old item_header (????);
|
||||
n_ih = new item_header;
|
||||
*/
|
||||
|
@ -426,7 +426,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
|
|||
else {
|
||||
struct item_head n_ih;
|
||||
|
||||
/* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
|
||||
/* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
|
||||
part defined by 'cpy_bytes'; create new item header;
|
||||
n_ih = new item_header;
|
||||
*/
|
||||
|
@ -724,7 +724,7 @@ int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes)
|
|||
static void leaf_delete_items_entirely(struct buffer_info *bi,
|
||||
int first, int del_num);
|
||||
/* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
|
||||
If not.
|
||||
If not.
|
||||
If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
|
||||
the first item. Part defined by del_bytes. Don't delete first item header
|
||||
If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
|
||||
|
@ -783,7 +783,7 @@ void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
|
|||
/* len = body len of item */
|
||||
len = ih_item_len(ih);
|
||||
|
||||
/* delete the part of the last item of the bh
|
||||
/* delete the part of the last item of the bh
|
||||
do not delete item header
|
||||
*/
|
||||
leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1,
|
||||
|
@ -865,7 +865,7 @@ void leaf_insert_into_buf(struct buffer_info *bi, int before,
|
|||
}
|
||||
}
|
||||
|
||||
/* paste paste_size bytes to affected_item_num-th item.
|
||||
/* paste paste_size bytes to affected_item_num-th item.
|
||||
When item is a directory, this only prepare space for new entries */
|
||||
void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
|
||||
int pos_in_item, int paste_size,
|
||||
|
@ -1022,7 +1022,7 @@ static int leaf_cut_entries(struct buffer_head *bh,
|
|||
/* when cut item is part of regular file
|
||||
pos_in_item - first byte that must be cut
|
||||
cut_size - number of bytes to be cut beginning from pos_in_item
|
||||
|
||||
|
||||
when cut item is part of directory
|
||||
pos_in_item - number of first deleted entry
|
||||
cut_size - count of deleted entries
|
||||
|
@ -1275,7 +1275,7 @@ void leaf_paste_entries(struct buffer_info *bi,
|
|||
/* change item key if necessary (when we paste before 0-th entry */
|
||||
if (!before) {
|
||||
set_le_ih_k_offset(ih, deh_offset(new_dehs));
|
||||
/* memcpy (&ih->ih_key.k_offset,
|
||||
/* memcpy (&ih->ih_key.k_offset,
|
||||
&new_dehs->deh_offset, SHORT_KEY_SIZE);*/
|
||||
}
|
||||
#ifdef CONFIG_REISERFS_CHECK
|
||||
|
|
|
@ -106,7 +106,7 @@ key of the first directory entry in it.
|
|||
This function first calls search_by_key, then, if item whose first
|
||||
entry matches is not found it looks for the entry inside directory
|
||||
item found by search_by_key. Fills the path to the entry, and to the
|
||||
entry position in the item
|
||||
entry position in the item
|
||||
|
||||
*/
|
||||
|
||||
|
@ -371,7 +371,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
|
|||
return d_splice_alias(inode, dentry);
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
** looks up the dentry of the parent directory for child.
|
||||
** taken from ext2_get_parent
|
||||
*/
|
||||
|
@ -401,7 +401,7 @@ struct dentry *reiserfs_get_parent(struct dentry *child)
|
|||
return d_obtain_alias(inode);
|
||||
}
|
||||
|
||||
/* add entry to the directory (entry can be hidden).
|
||||
/* add entry to the directory (entry can be hidden).
|
||||
|
||||
insert definition of when hidden directories are used here -Hans
|
||||
|
||||
|
@ -559,7 +559,7 @@ static int drop_new_inode(struct inode *inode)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* utility function that does setup for reiserfs_new_inode.
|
||||
/* utility function that does setup for reiserfs_new_inode.
|
||||
** DQUOT_INIT needs lots of credits so it's better to have it
|
||||
** outside of a transaction, so we had to pull some bits of
|
||||
** reiserfs_new_inode out into this func.
|
||||
|
@ -820,7 +820,7 @@ static inline int reiserfs_empty_dir(struct inode *inode)
|
|||
{
|
||||
/* we can cheat because an old format dir cannot have
|
||||
** EMPTY_DIR_SIZE, and a new format dir cannot have
|
||||
** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
|
||||
** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
|
||||
** regardless of disk format version, the directory is empty.
|
||||
*/
|
||||
if (inode->i_size != EMPTY_DIR_SIZE &&
|
||||
|
@ -1162,7 +1162,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
|
|||
return retval;
|
||||
}
|
||||
|
||||
// de contains information pointing to an entry which
|
||||
/* de contains information pointing to an entry which */
|
||||
static int de_still_valid(const char *name, int len,
|
||||
struct reiserfs_dir_entry *de)
|
||||
{
|
||||
|
@ -1206,10 +1206,10 @@ static void set_ino_in_dir_entry(struct reiserfs_dir_entry *de,
|
|||
de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* process, that is going to call fix_nodes/do_balance must hold only
|
||||
* one path. If it holds 2 or more, it can get into endless waiting in
|
||||
* get_empty_nodes or its clones
|
||||
* get_empty_nodes or its clones
|
||||
*/
|
||||
static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct inode *new_dir, struct dentry *new_dentry)
|
||||
|
@ -1263,7 +1263,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
|
||||
old_inode_mode = old_inode->i_mode;
|
||||
if (S_ISDIR(old_inode_mode)) {
|
||||
// make sure, that directory being renamed has correct ".."
|
||||
// make sure, that directory being renamed has correct ".."
|
||||
// and that its new parent directory has not too many links
|
||||
// already
|
||||
|
||||
|
@ -1274,8 +1274,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
}
|
||||
}
|
||||
|
||||
/* directory is renamed, its parent directory will be changed,
|
||||
** so find ".." entry
|
||||
/* directory is renamed, its parent directory will be changed,
|
||||
** so find ".." entry
|
||||
*/
|
||||
dot_dot_de.de_gen_number_bit_string = NULL;
|
||||
retval =
|
||||
|
@ -1385,9 +1385,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
this stuff, yes? Then, having
|
||||
gathered everything into RAM we
|
||||
should lock the buffers, yes? -Hans */
|
||||
/* probably. our rename needs to hold more
|
||||
** than one path at once. The seals would
|
||||
** have to be written to deal with multi-path
|
||||
/* probably. our rename needs to hold more
|
||||
** than one path at once. The seals would
|
||||
** have to be written to deal with multi-path
|
||||
** issues -chris
|
||||
*/
|
||||
/* sanity checking before doing the rename - avoid races many
|
||||
|
@ -1465,7 +1465,7 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|||
}
|
||||
|
||||
if (S_ISDIR(old_inode_mode)) {
|
||||
// adjust ".." of renamed directory
|
||||
/* adjust ".." of renamed directory */
|
||||
set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir));
|
||||
journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh);
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s)
|
|||
|
||||
if (cur_size > new_size) {
|
||||
/* mark everyone used that was listed as free at the end of the objectid
|
||||
** map
|
||||
** map
|
||||
*/
|
||||
objectid_map[new_size - 1] = objectid_map[cur_size - 1];
|
||||
set_sb_oid_cursize(disk_sb, new_size);
|
||||
|
|
|
@ -178,11 +178,11 @@ static char *is_there_reiserfs_struct(char *fmt, int *what)
|
|||
appropriative printk. With this reiserfs_warning you can use format
|
||||
specification for complex structures like you used to do with
|
||||
printfs for integers, doubles and pointers. For instance, to print
|
||||
out key structure you have to write just:
|
||||
reiserfs_warning ("bad key %k", key);
|
||||
instead of
|
||||
printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
|
||||
key->k_offset, key->k_uniqueness);
|
||||
out key structure you have to write just:
|
||||
reiserfs_warning ("bad key %k", key);
|
||||
instead of
|
||||
printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
|
||||
key->k_offset, key->k_uniqueness);
|
||||
*/
|
||||
static DEFINE_SPINLOCK(error_lock);
|
||||
static void prepare_error_buf(const char *fmt, va_list args)
|
||||
|
@ -244,11 +244,11 @@ static void prepare_error_buf(const char *fmt, va_list args)
|
|||
}
|
||||
|
||||
/* in addition to usual conversion specifiers this accepts reiserfs
|
||||
specific conversion specifiers:
|
||||
%k to print little endian key,
|
||||
%K to print cpu key,
|
||||
specific conversion specifiers:
|
||||
%k to print little endian key,
|
||||
%K to print cpu key,
|
||||
%h to print item_head,
|
||||
%t to print directory entry
|
||||
%t to print directory entry
|
||||
%z to print block head (arg must be struct buffer_head *
|
||||
%b to print buffer_head
|
||||
*/
|
||||
|
@ -314,17 +314,17 @@ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
|
|||
maintainer-errorid. Don't bother with reusing errorids, there are
|
||||
lots of numbers out there.
|
||||
|
||||
Example:
|
||||
|
||||
Example:
|
||||
|
||||
reiserfs_panic(
|
||||
p_sb, "reiser-29: reiserfs_new_blocknrs: "
|
||||
"one of search_start or rn(%d) is equal to MAX_B_NUM,"
|
||||
"which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
|
||||
"which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
|
||||
rn, bh
|
||||
);
|
||||
|
||||
Regular panic()s sometimes clear the screen before the message can
|
||||
be read, thus the need for the while loop.
|
||||
be read, thus the need for the while loop.
|
||||
|
||||
Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
|
||||
pointless complexity):
|
||||
|
|
|
@ -633,7 +633,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start,
|
|||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
/*
|
||||
* Make Linus happy.
|
||||
* Local variables:
|
||||
* c-indentation-style: "K&R"
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
/*
|
||||
/*
|
||||
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
|
||||
*/
|
||||
|
||||
/*
|
||||
/*
|
||||
* Written by Alexander Zarochentcev.
|
||||
*
|
||||
* The kernel part of the (on-line) reiserfs resizer.
|
||||
|
@ -101,7 +101,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
|
|||
memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
|
||||
|
||||
/* just in case vfree schedules on us, copy the new
|
||||
** pointer into the journal struct before freeing the
|
||||
** pointer into the journal struct before freeing the
|
||||
** old one
|
||||
*/
|
||||
node_tmp = jb->bitmaps;
|
||||
|
|
|
@ -77,7 +77,7 @@ inline void copy_item_head(struct item_head *p_v_to,
|
|||
/* k1 is pointer to on-disk structure which is stored in little-endian
|
||||
form. k2 is pointer to cpu variable. For key of items of the same
|
||||
object this returns 0.
|
||||
Returns: -1 if key1 < key2
|
||||
Returns: -1 if key1 < key2
|
||||
0 if key1 == key2
|
||||
1 if key1 > key2 */
|
||||
inline int comp_short_keys(const struct reiserfs_key *le_key,
|
||||
|
@ -890,7 +890,7 @@ static inline int prepare_for_direct_item(struct treepath *path,
|
|||
}
|
||||
// new file gets truncated
|
||||
if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
|
||||
//
|
||||
//
|
||||
round_len = ROUND_UP(new_file_length);
|
||||
/* this was n_new_file_length < le_ih ... */
|
||||
if (round_len < le_ih_k_offset(le_ih)) {
|
||||
|
@ -1443,7 +1443,7 @@ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th,
|
|||
if (atomic_read(&p_s_inode->i_count) > 1 ||
|
||||
!tail_has_to_be_packed(p_s_inode) ||
|
||||
!page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) {
|
||||
// leave tail in an unformatted node
|
||||
/* leave tail in an unformatted node */
|
||||
*p_c_mode = M_SKIP_BALANCING;
|
||||
cut_bytes =
|
||||
n_block_size - (n_new_file_size & (n_block_size - 1));
|
||||
|
@ -1826,7 +1826,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, struct inode *p
|
|||
/* While there are bytes to truncate and previous file item is presented in the tree. */
|
||||
|
||||
/*
|
||||
** This loop could take a really long time, and could log
|
||||
** This loop could take a really long time, and could log
|
||||
** many more blocks than a transaction can hold. So, we do a polite
|
||||
** journal end here, and if the transaction needs ending, we make
|
||||
** sure the file is consistent before ending the current trans
|
||||
|
|
|
@ -758,7 +758,7 @@ static int reiserfs_getopt(struct super_block *s, char **cur, opt_desc_t * opts,
|
|||
char **opt_arg, unsigned long *bit_flags)
|
||||
{
|
||||
char *p;
|
||||
/* foo=bar,
|
||||
/* foo=bar,
|
||||
^ ^ ^
|
||||
| | +-- option_end
|
||||
| +-- arg_start
|
||||
|
@ -1348,7 +1348,7 @@ static int read_super_block(struct super_block *s, int offset)
|
|||
}
|
||||
//
|
||||
// ok, reiserfs signature (old or new) found in at the given offset
|
||||
//
|
||||
//
|
||||
fs_blocksize = sb_blocksize(rs);
|
||||
brelse(bh);
|
||||
sb_set_blocksize(s, fs_blocksize);
|
||||
|
@ -1534,8 +1534,8 @@ static int what_hash(struct super_block *s)
|
|||
code = find_hash_out(s);
|
||||
|
||||
if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
|
||||
/* detection has found the hash, and we must check against the
|
||||
** mount options
|
||||
/* detection has found the hash, and we must check against the
|
||||
** mount options
|
||||
*/
|
||||
if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
|
||||
reiserfs_warning(s, "reiserfs-2507",
|
||||
|
@ -1567,7 +1567,7 @@ static int what_hash(struct super_block *s)
|
|||
}
|
||||
}
|
||||
|
||||
/* if we are mounted RW, and we have a new valid hash code, update
|
||||
/* if we are mounted RW, and we have a new valid hash code, update
|
||||
** the super
|
||||
*/
|
||||
if (code != UNSET_HASH &&
|
||||
|
|
|
@ -46,7 +46,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
|
|||
/* Set the key to search for the place for new unfm pointer */
|
||||
make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
|
||||
|
||||
// FIXME: we could avoid this
|
||||
/* FIXME: we could avoid this */
|
||||
if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
|
||||
reiserfs_error(sb, "PAP-14030",
|
||||
"pasted or inserted byte exists in "
|
||||
|
|
|
@ -14,7 +14,7 @@ typedef enum {
|
|||
} reiserfs_super_block_flags;
|
||||
|
||||
/* struct reiserfs_super_block accessors/mutators
|
||||
* since this is a disk structure, it will always be in
|
||||
* since this is a disk structure, it will always be in
|
||||
* little endian format. */
|
||||
#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
|
||||
#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
|
||||
|
@ -83,16 +83,16 @@ typedef enum {
|
|||
|
||||
/* LOGGING -- */
|
||||
|
||||
/* These all interelate for performance.
|
||||
/* These all interelate for performance.
|
||||
**
|
||||
** If the journal block count is smaller than n transactions, you lose speed.
|
||||
** If the journal block count is smaller than n transactions, you lose speed.
|
||||
** I don't know what n is yet, I'm guessing 8-16.
|
||||
**
|
||||
** typical transaction size depends on the application, how often fsync is
|
||||
** called, and how many metadata blocks you dirty in a 30 second period.
|
||||
** called, and how many metadata blocks you dirty in a 30 second period.
|
||||
** The more small files (<16k) you use, the larger your transactions will
|
||||
** be.
|
||||
**
|
||||
**
|
||||
** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
|
||||
** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
|
||||
** to prevent wrapping before dirty meta blocks get to disk.
|
||||
|
@ -242,7 +242,7 @@ struct reiserfs_journal {
|
|||
|
||||
struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */
|
||||
struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */
|
||||
struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
|
||||
struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
|
||||
the transactions */
|
||||
struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
|
||||
int j_persistent_trans;
|
||||
|
@ -426,7 +426,7 @@ enum reiserfs_mount_options {
|
|||
partition will be dealt with in a
|
||||
manner of 3.5.x */
|
||||
|
||||
/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
|
||||
/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
|
||||
** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
|
||||
** is not required. If the normal autodection code can't determine which
|
||||
** hash to use (because both hashes had the same value for a file)
|
||||
|
|
Загрузка…
Ссылка в новой задаче