Merge branch 'master' into upstream
This commit is contained in:
Коммит
ea6e1e94f2
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 2
|
VERSION = 2
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 17
|
SUBLEVEL = 17
|
||||||
EXTRAVERSION =-rc6
|
EXTRAVERSION =
|
||||||
NAME=Crazed Snow-Weasel
|
NAME=Crazed Snow-Weasel
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -52,7 +52,7 @@ static inline void __tlbie(unsigned long va, unsigned int psize)
|
||||||
default:
|
default:
|
||||||
penc = mmu_psize_defs[psize].penc;
|
penc = mmu_psize_defs[psize].penc;
|
||||||
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
|
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
|
||||||
va |= (0x7f >> (8 - penc)) << 12;
|
va |= penc << 12;
|
||||||
asm volatile("tlbie %0,1" : : "r" (va) : "memory");
|
asm volatile("tlbie %0,1" : : "r" (va) : "memory");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
|
||||||
default:
|
default:
|
||||||
penc = mmu_psize_defs[psize].penc;
|
penc = mmu_psize_defs[psize].penc;
|
||||||
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
|
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
|
||||||
va |= (0x7f >> (8 - penc)) << 12;
|
va |= penc << 12;
|
||||||
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
|
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
|
||||||
: : "r"(va) : "memory");
|
: : "r"(va) : "memory");
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1323,17 +1323,12 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||||
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
|
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
|
||||||
|
|
||||||
if (cic) {
|
if (cic) {
|
||||||
RB_CLEAR(&cic->rb_node);
|
memset(cic, 0, sizeof(*cic));
|
||||||
cic->key = NULL;
|
RB_CLEAR_COLOR(&cic->rb_node);
|
||||||
cic->cfqq[ASYNC] = NULL;
|
|
||||||
cic->cfqq[SYNC] = NULL;
|
|
||||||
cic->last_end_request = jiffies;
|
cic->last_end_request = jiffies;
|
||||||
cic->ttime_total = 0;
|
INIT_LIST_HEAD(&cic->queue_list);
|
||||||
cic->ttime_samples = 0;
|
|
||||||
cic->ttime_mean = 0;
|
|
||||||
cic->dtor = cfq_free_io_context;
|
cic->dtor = cfq_free_io_context;
|
||||||
cic->exit = cfq_exit_io_context;
|
cic->exit = cfq_exit_io_context;
|
||||||
INIT_LIST_HEAD(&cic->queue_list);
|
|
||||||
atomic_inc(&ioc_count);
|
atomic_inc(&ioc_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1009,9 +1009,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
|
||||||
if (fp->f_mode & FMODE_WRITE) {
|
if (fp->f_mode & FMODE_WRITE) {
|
||||||
ret = -EROFS;
|
ret = -EROFS;
|
||||||
if (cdrom_open_write(cdi))
|
if (cdrom_open_write(cdi))
|
||||||
goto err;
|
goto err_release;
|
||||||
if (!CDROM_CAN(CDC_RAM))
|
if (!CDROM_CAN(CDC_RAM))
|
||||||
goto err;
|
goto err_release;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
cdi->media_written = 0;
|
cdi->media_written = 0;
|
||||||
}
|
}
|
||||||
|
@ -1026,6 +1026,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
|
||||||
not be mounting, but opening with O_NONBLOCK */
|
not be mounting, but opening with O_NONBLOCK */
|
||||||
check_disk_change(ip->i_bdev);
|
check_disk_change(ip->i_bdev);
|
||||||
return 0;
|
return 0;
|
||||||
|
err_release:
|
||||||
|
cdi->ops->release(cdi);
|
||||||
err:
|
err:
|
||||||
cdi->use_count--;
|
cdi->use_count--;
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -2255,8 +2255,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
|
||||||
static void sky2_netpoll(struct net_device *dev)
|
static void sky2_netpoll(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct sky2_port *sky2 = netdev_priv(dev);
|
struct sky2_port *sky2 = netdev_priv(dev);
|
||||||
|
struct net_device *dev0 = sky2->hw->dev[0];
|
||||||
|
|
||||||
sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL);
|
if (netif_running(dev) && __netif_rx_schedule_prep(dev0))
|
||||||
|
__netif_rx_schedule(dev0);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -3446,6 +3448,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||||
|
|
||||||
sky2_down(dev);
|
sky2_down(dev);
|
||||||
netif_device_detach(dev);
|
netif_device_detach(dev);
|
||||||
|
netif_poll_disable(dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3474,6 +3477,8 @@ static int sky2_resume(struct pci_dev *pdev)
|
||||||
struct net_device *dev = hw->dev[i];
|
struct net_device *dev = hw->dev[i];
|
||||||
if (dev && netif_running(dev)) {
|
if (dev && netif_running(dev)) {
|
||||||
netif_device_attach(dev);
|
netif_device_attach(dev);
|
||||||
|
netif_poll_enable(dev);
|
||||||
|
|
||||||
err = sky2_up(dev);
|
err = sky2_up(dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
printk(KERN_ERR PFX "%s: could not up: %d\n",
|
printk(KERN_ERR PFX "%s: could not up: %d\n",
|
||||||
|
|
5
fs/bio.c
5
fs/bio.c
|
@ -654,9 +654,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
|
||||||
write_to_vm, 0, &pages[cur_page], NULL);
|
write_to_vm, 0, &pages[cur_page], NULL);
|
||||||
up_read(¤t->mm->mmap_sem);
|
up_read(¤t->mm->mmap_sem);
|
||||||
|
|
||||||
if (ret < local_nr_pages)
|
if (ret < local_nr_pages) {
|
||||||
|
ret = -EFAULT;
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
|
||||||
offset = uaddr & ~PAGE_MASK;
|
offset = uaddr & ~PAGE_MASK;
|
||||||
for (j = cur_page; j < page_limit; j++) {
|
for (j = cur_page; j < page_limit; j++) {
|
||||||
|
|
|
@ -755,6 +755,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
|
||||||
if (request->fl_type == F_UNLCK)
|
if (request->fl_type == F_UNLCK)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
error = -ENOMEM;
|
||||||
new_fl = locks_alloc_lock();
|
new_fl = locks_alloc_lock();
|
||||||
if (new_fl == NULL)
|
if (new_fl == NULL)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -781,6 +782,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
|
||||||
locks_copy_lock(new_fl, request);
|
locks_copy_lock(new_fl, request);
|
||||||
locks_insert_lock(&inode->i_flock, new_fl);
|
locks_insert_lock(&inode->i_flock, new_fl);
|
||||||
new_fl = NULL;
|
new_fl = NULL;
|
||||||
|
error = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
unlock_kernel();
|
unlock_kernel();
|
||||||
|
|
|
@ -329,7 +329,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
|
||||||
#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
|
#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
|
||||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
|
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
|
||||||
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||||
CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO)
|
CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE)
|
||||||
#define CPU_FTRS_COMPATIBLE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
|
#define CPU_FTRS_COMPATIBLE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
|
||||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
|
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -881,14 +881,6 @@ fastcall NORET_TYPE void do_exit(long code)
|
||||||
|
|
||||||
tsk->flags |= PF_EXITING;
|
tsk->flags |= PF_EXITING;
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure we don't try to process any timer firings
|
|
||||||
* while we are already exiting.
|
|
||||||
*/
|
|
||||||
tsk->it_virt_expires = cputime_zero;
|
|
||||||
tsk->it_prof_expires = cputime_zero;
|
|
||||||
tsk->it_sched_expires = 0;
|
|
||||||
|
|
||||||
if (unlikely(in_atomic()))
|
if (unlikely(in_atomic()))
|
||||||
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
||||||
current->comm, current->pid,
|
current->comm, current->pid,
|
||||||
|
|
|
@ -555,9 +555,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
|
||||||
struct cpu_timer_list *next;
|
struct cpu_timer_list *next;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
|
|
||||||
return;
|
|
||||||
|
|
||||||
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
|
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
|
||||||
p->cpu_timers : p->signal->cpu_timers);
|
p->cpu_timers : p->signal->cpu_timers);
|
||||||
head += CPUCLOCK_WHICH(timer->it_clock);
|
head += CPUCLOCK_WHICH(timer->it_clock);
|
||||||
|
@ -1173,6 +1170,9 @@ static void check_process_timers(struct task_struct *tsk,
|
||||||
}
|
}
|
||||||
t = tsk;
|
t = tsk;
|
||||||
do {
|
do {
|
||||||
|
if (unlikely(t->flags & PF_EXITING))
|
||||||
|
continue;
|
||||||
|
|
||||||
ticks = cputime_add(cputime_add(t->utime, t->stime),
|
ticks = cputime_add(cputime_add(t->utime, t->stime),
|
||||||
prof_left);
|
prof_left);
|
||||||
if (!cputime_eq(prof_expires, cputime_zero) &&
|
if (!cputime_eq(prof_expires, cputime_zero) &&
|
||||||
|
@ -1193,11 +1193,7 @@ static void check_process_timers(struct task_struct *tsk,
|
||||||
t->it_sched_expires > sched)) {
|
t->it_sched_expires > sched)) {
|
||||||
t->it_sched_expires = sched;
|
t->it_sched_expires = sched;
|
||||||
}
|
}
|
||||||
|
} while ((t = next_thread(t)) != tsk);
|
||||||
do {
|
|
||||||
t = next_thread(t);
|
|
||||||
} while (unlikely(t->flags & PF_EXITING));
|
|
||||||
} while (t != tsk);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1289,30 +1285,30 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
||||||
|
|
||||||
#undef UNEXPIRED
|
#undef UNEXPIRED
|
||||||
|
|
||||||
BUG_ON(tsk->exit_state);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Double-check with locks held.
|
* Double-check with locks held.
|
||||||
*/
|
*/
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
spin_lock(&tsk->sighand->siglock);
|
if (likely(tsk->signal != NULL)) {
|
||||||
|
spin_lock(&tsk->sighand->siglock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
|
* Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
|
||||||
* all the timers that are firing, and put them on the firing list.
|
* all the timers that are firing, and put them on the firing list.
|
||||||
*/
|
*/
|
||||||
check_thread_timers(tsk, &firing);
|
check_thread_timers(tsk, &firing);
|
||||||
check_process_timers(tsk, &firing);
|
check_process_timers(tsk, &firing);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We must release these locks before taking any timer's lock.
|
* We must release these locks before taking any timer's lock.
|
||||||
* There is a potential race with timer deletion here, as the
|
* There is a potential race with timer deletion here, as the
|
||||||
* siglock now protects our private firing list. We have set
|
* siglock now protects our private firing list. We have set
|
||||||
* the firing flag in each timer, so that a deletion attempt
|
* the firing flag in each timer, so that a deletion attempt
|
||||||
* that gets the timer lock before we do will give it up and
|
* that gets the timer lock before we do will give it up and
|
||||||
* spin until we've taken care of that timer below.
|
* spin until we've taken care of that timer below.
|
||||||
*/
|
*/
|
||||||
spin_unlock(&tsk->sighand->siglock);
|
spin_unlock(&tsk->sighand->siglock);
|
||||||
|
}
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Загрузка…
Ссылка в новой задаче