Fix two races in the timer stats lookup code.  One by ensuring that the
initialization of a new entry is finished upon insertion of that entry.
The other by cleaning up the hash table when the entries array is cleared,
so that we don't have any "pre-inserted" entries.

Thanks to Eric Dumazet for reminding me of the memory barriers.

Signed-off-by: Bjorn Steinbrink <B.Steinbrink@gmx.de>
Signed-off-by: Ian Kumlien <pomac@vapor.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Bjorn Steinbrink 2007-06-01 00:47:15 -07:00 коммит произвёл Linus Torvalds
Родитель c79d9c9e9a
Коммит 9fcc15ec3c
1 изменённых файлов: 21 добавлений и 16 удалений

Просмотреть файл

@ -117,21 +117,6 @@ static struct entry entries[MAX_ENTRIES];
static atomic_t overflow_count; static atomic_t overflow_count;
static void reset_entries(void)
{
nr_entries = 0;
memset(entries, 0, sizeof(entries));
atomic_set(&overflow_count, 0);
}
static struct entry *alloc_entry(void)
{
if (nr_entries >= MAX_ENTRIES)
return NULL;
return entries + nr_entries++;
}
/* /*
* The entries are in a hash-table, for fast lookup: * The entries are in a hash-table, for fast lookup:
*/ */
@ -149,6 +134,22 @@ static struct entry *alloc_entry(void)
static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly; static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
static void reset_entries(void)
{
nr_entries = 0;
memset(entries, 0, sizeof(entries));
memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
atomic_set(&overflow_count, 0);
}
static struct entry *alloc_entry(void)
{
if (nr_entries >= MAX_ENTRIES)
return NULL;
return entries + nr_entries++;
}
static int match_entries(struct entry *entry1, struct entry *entry2) static int match_entries(struct entry *entry1, struct entry *entry2)
{ {
return entry1->timer == entry2->timer && return entry1->timer == entry2->timer &&
@ -202,12 +203,15 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
if (curr) { if (curr) {
*curr = *entry; *curr = *entry;
curr->count = 0; curr->count = 0;
curr->next = NULL;
memcpy(curr->comm, comm, TASK_COMM_LEN); memcpy(curr->comm, comm, TASK_COMM_LEN);
smp_mb(); /* Ensure that curr is initialized before insert */
if (prev) if (prev)
prev->next = curr; prev->next = curr;
else else
*head = curr; *head = curr;
curr->next = NULL;
} }
out_unlock: out_unlock:
spin_unlock(&table_lock); spin_unlock(&table_lock);
@ -360,6 +364,7 @@ static ssize_t tstats_write(struct file *file, const char __user *buf,
if (!active) { if (!active) {
reset_entries(); reset_entries();
time_start = ktime_get(); time_start = ktime_get();
smp_mb();
active = 1; active = 1;
} }
break; break;