WorkQueue: Fix up arch-specific work items where possible
Fix up arch-specific work items where possible to use the new work_struct and delayed_work structs. Three places that enqueue bits of their stack and then return have been marked with #error as this is not permitted. Signed-Off-By: David Howells <dhowells@redhat.com>
This commit is contained in:
Родитель
9db7372445
Коммит
6d5aefb8ea
|
@ -60,16 +60,16 @@ static int sharpsl_ac_check(void);
|
||||||
static int sharpsl_fatal_check(void);
|
static int sharpsl_fatal_check(void);
|
||||||
static int sharpsl_average_value(int ad);
|
static int sharpsl_average_value(int ad);
|
||||||
static void sharpsl_average_clear(void);
|
static void sharpsl_average_clear(void);
|
||||||
static void sharpsl_charge_toggle(void *private_);
|
static void sharpsl_charge_toggle(struct work_struct *private_);
|
||||||
static void sharpsl_battery_thread(void *private_);
|
static void sharpsl_battery_thread(struct work_struct *private_);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Variables
|
* Variables
|
||||||
*/
|
*/
|
||||||
struct sharpsl_pm_status sharpsl_pm;
|
struct sharpsl_pm_status sharpsl_pm;
|
||||||
DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
|
DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
|
||||||
DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
|
DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
|
||||||
DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
|
DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
|
||||||
|
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ void sharpsl_battery_kick(void)
|
||||||
EXPORT_SYMBOL(sharpsl_battery_kick);
|
EXPORT_SYMBOL(sharpsl_battery_kick);
|
||||||
|
|
||||||
|
|
||||||
static void sharpsl_battery_thread(void *private_)
|
static void sharpsl_battery_thread(struct work_struct *private_)
|
||||||
{
|
{
|
||||||
int voltage, percent, apm_status, i = 0;
|
int voltage, percent, apm_status, i = 0;
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_)
|
||||||
/* Corgi cannot confirm when battery fully charged so periodically kick! */
|
/* Corgi cannot confirm when battery fully charged so periodically kick! */
|
||||||
if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
|
if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
|
||||||
&& time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
|
&& time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
|
||||||
schedule_work(&toggle_charger);
|
schedule_delayed_work(&toggle_charger, 0);
|
||||||
|
|
||||||
while(1) {
|
while(1) {
|
||||||
voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
|
voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
|
||||||
|
@ -212,7 +212,7 @@ static void sharpsl_charge_off(void)
|
||||||
sharpsl_pm_led(SHARPSL_LED_OFF);
|
sharpsl_pm_led(SHARPSL_LED_OFF);
|
||||||
sharpsl_pm.charge_mode = CHRG_OFF;
|
sharpsl_pm.charge_mode = CHRG_OFF;
|
||||||
|
|
||||||
schedule_work(&sharpsl_bat);
|
schedule_delayed_work(&sharpsl_bat, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sharpsl_charge_error(void)
|
static void sharpsl_charge_error(void)
|
||||||
|
@ -222,7 +222,7 @@ static void sharpsl_charge_error(void)
|
||||||
sharpsl_pm.charge_mode = CHRG_ERROR;
|
sharpsl_pm.charge_mode = CHRG_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sharpsl_charge_toggle(void *private_)
|
static void sharpsl_charge_toggle(struct work_struct *private_)
|
||||||
{
|
{
|
||||||
dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
|
dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
|
||||||
|
|
||||||
|
@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data)
|
||||||
else if (sharpsl_pm.charge_mode == CHRG_ON)
|
else if (sharpsl_pm.charge_mode == CHRG_ON)
|
||||||
sharpsl_charge_off();
|
sharpsl_charge_off();
|
||||||
|
|
||||||
schedule_work(&sharpsl_bat);
|
schedule_delayed_work(&sharpsl_bat, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data)
|
||||||
sharpsl_charge_off();
|
sharpsl_charge_off();
|
||||||
} else if (sharpsl_pm.full_count < 2) {
|
} else if (sharpsl_pm.full_count < 2) {
|
||||||
dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
|
dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
|
||||||
schedule_work(&toggle_charger);
|
schedule_delayed_work(&toggle_charger, 0);
|
||||||
} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
|
} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
|
||||||
dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
|
dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
|
||||||
schedule_work(&toggle_charger);
|
schedule_delayed_work(&toggle_charger, 0);
|
||||||
} else {
|
} else {
|
||||||
sharpsl_charge_off();
|
sharpsl_charge_off();
|
||||||
sharpsl_pm.charge_mode = CHRG_DONE;
|
sharpsl_pm.charge_mode = CHRG_DONE;
|
||||||
|
|
|
@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode)
|
||||||
|
|
||||||
cancel_delayed_work(&irda_config->gpio_expa);
|
cancel_delayed_work(&irda_config->gpio_expa);
|
||||||
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
|
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
|
||||||
schedule_work(&irda_config->gpio_expa);
|
#error this is not permitted - mode is an argument variable
|
||||||
|
schedule_delayed_work(&irda_config->gpio_expa, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = {
|
||||||
.rows = 8,
|
.rows = 8,
|
||||||
.cols = 8,
|
.cols = 8,
|
||||||
.keymap = nokia770_keymap,
|
.keymap = nokia770_keymap,
|
||||||
.keymapsize = ARRAY_SIZE(nokia770_keymap)
|
.keymapsize = ARRAY_SIZE(nokia770_keymap),
|
||||||
.delay = 4,
|
.delay = 4,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void)
|
||||||
printk("HP connected\n");
|
printk("HP connected\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void codec_delayed_power_down(void *arg)
|
static void codec_delayed_power_down(struct work_struct *work)
|
||||||
{
|
{
|
||||||
down(&audio_pwr_sem);
|
down(&audio_pwr_sem);
|
||||||
if (audio_pwr_state == -1)
|
if (audio_pwr_state == -1)
|
||||||
|
@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg)
|
||||||
up(&audio_pwr_sem);
|
up(&audio_pwr_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
|
static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
|
||||||
|
|
||||||
static void nokia770_audio_pwr_down(void)
|
static void nokia770_audio_pwr_down(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -35,7 +35,7 @@ static u8 hw_led_state;
|
||||||
|
|
||||||
static u8 tps_leds_change;
|
static u8 tps_leds_change;
|
||||||
|
|
||||||
static void tps_work(void *unused)
|
static void tps_work(struct work_struct *unused)
|
||||||
{
|
{
|
||||||
for (;;) {
|
for (;;) {
|
||||||
u8 leds;
|
u8 leds;
|
||||||
|
@ -61,7 +61,7 @@ static void tps_work(void *unused)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static DECLARE_WORK(work, tps_work, NULL);
|
static DECLARE_WORK(work, tps_work);
|
||||||
|
|
||||||
#ifdef CONFIG_OMAP_OSK_MISTRAL
|
#ifdef CONFIG_OMAP_OSK_MISTRAL
|
||||||
|
|
||||||
|
|
|
@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode)
|
||||||
|
|
||||||
cancel_delayed_work(&irda_config->gpio_expa);
|
cancel_delayed_work(&irda_config->gpio_expa);
|
||||||
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
|
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
|
||||||
schedule_work(&irda_config->gpio_expa);
|
#error this is not permitted - mode is an argument variable
|
||||||
|
schedule_delayed_work(&irda_config->gpio_expa, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD;
|
||||||
|
|
||||||
static int max7310_write(struct i2c_client *client, int address, int data);
|
static int max7310_write(struct i2c_client *client, int address, int data);
|
||||||
static struct i2c_client max7310_template;
|
static struct i2c_client max7310_template;
|
||||||
static void akita_ioexp_work(void *private_);
|
static void akita_ioexp_work(struct work_struct *private_);
|
||||||
|
|
||||||
static struct device *akita_ioexp_device;
|
static struct device *akita_ioexp_device;
|
||||||
static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
|
static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
|
||||||
DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL);
|
DECLARE_WORK(akita_ioexp, akita_ioexp_work);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit)
|
||||||
EXPORT_SYMBOL(akita_set_ioexp);
|
EXPORT_SYMBOL(akita_set_ioexp);
|
||||||
EXPORT_SYMBOL(akita_reset_ioexp);
|
EXPORT_SYMBOL(akita_reset_ioexp);
|
||||||
|
|
||||||
static void akita_ioexp_work(void *private_)
|
static void akita_ioexp_work(struct work_struct *private_)
|
||||||
{
|
{
|
||||||
if (akita_ioexp_device)
|
if (akita_ioexp_device)
|
||||||
max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
|
max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
|
||||||
|
|
|
@ -209,7 +209,7 @@ static void do_serial_bh(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void do_softint(void *private_)
|
static void do_softint(struct work_struct *private_)
|
||||||
{
|
{
|
||||||
printk(KERN_ERR "simserial: do_softint called\n");
|
printk(KERN_ERR "simserial: do_softint called\n");
|
||||||
}
|
}
|
||||||
|
@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info)
|
||||||
info->flags = sstate->flags;
|
info->flags = sstate->flags;
|
||||||
info->xmit_fifo_size = sstate->xmit_fifo_size;
|
info->xmit_fifo_size = sstate->xmit_fifo_size;
|
||||||
info->line = line;
|
info->line = line;
|
||||||
INIT_WORK(&info->work, do_softint, info);
|
INIT_WORK(&info->work, do_softint);
|
||||||
info->state = sstate;
|
info->state = sstate;
|
||||||
if (sstate->info) {
|
if (sstate->info) {
|
||||||
kfree(info);
|
kfree(info);
|
||||||
|
|
|
@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
|
||||||
* disable the cmc interrupt vector.
|
* disable the cmc interrupt vector.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ia64_mca_cmc_vector_disable_keventd(void *unused)
|
ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
|
||||||
{
|
{
|
||||||
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
|
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
|
||||||
}
|
}
|
||||||
|
@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused)
|
||||||
* enable the cmc interrupt vector.
|
* enable the cmc interrupt vector.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
ia64_mca_cmc_vector_enable_keventd(void *unused)
|
ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
|
||||||
{
|
{
|
||||||
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
|
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
|
||||||
}
|
}
|
||||||
|
@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
|
||||||
monarch_cpu = -1;
|
monarch_cpu = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
|
static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
|
||||||
static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
|
static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ia64_mca_cmc_int_handler
|
* ia64_mca_cmc_int_handler
|
||||||
|
|
|
@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct create_idle {
|
struct create_idle {
|
||||||
|
struct work_struct work;
|
||||||
struct task_struct *idle;
|
struct task_struct *idle;
|
||||||
struct completion done;
|
struct completion done;
|
||||||
int cpu;
|
int cpu;
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
void
|
||||||
do_fork_idle(void *_c_idle)
|
do_fork_idle(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct create_idle *c_idle = _c_idle;
|
struct create_idle *c_idle =
|
||||||
|
container_of(work, struct create_idle, work);
|
||||||
|
|
||||||
c_idle->idle = fork_idle(c_idle->cpu);
|
c_idle->idle = fork_idle(c_idle->cpu);
|
||||||
complete(&c_idle->done);
|
complete(&c_idle->done);
|
||||||
|
@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu)
|
||||||
{
|
{
|
||||||
int timeout;
|
int timeout;
|
||||||
struct create_idle c_idle = {
|
struct create_idle c_idle = {
|
||||||
|
.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
|
||||||
.cpu = cpu,
|
.cpu = cpu,
|
||||||
.done = COMPLETION_INITIALIZER(c_idle.done),
|
.done = COMPLETION_INITIALIZER(c_idle.done),
|
||||||
};
|
};
|
||||||
DECLARE_WORK(work, do_fork_idle, &c_idle);
|
|
||||||
|
|
||||||
c_idle.idle = get_idle_for_cpu(cpu);
|
c_idle.idle = get_idle_for_cpu(cpu);
|
||||||
if (c_idle.idle) {
|
if (c_idle.idle) {
|
||||||
|
@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu)
|
||||||
* We can't use kernel_thread since we must avoid to reschedule the child.
|
* We can't use kernel_thread since we must avoid to reschedule the child.
|
||||||
*/
|
*/
|
||||||
if (!keventd_up() || current_is_keventd())
|
if (!keventd_up() || current_is_keventd())
|
||||||
work.func(work.data);
|
c_idle.work.func(&c_idle.work);
|
||||||
else {
|
else {
|
||||||
schedule_work(&work);
|
schedule_work(&c_idle.work);
|
||||||
wait_for_completion(&c_idle.done);
|
wait_for_completion(&c_idle.done);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -319,7 +319,7 @@ static void sp_cleanup(void)
|
||||||
static int channel_open = 0;
|
static int channel_open = 0;
|
||||||
|
|
||||||
/* the work handler */
|
/* the work handler */
|
||||||
static void sp_work(void *data)
|
static void sp_work(struct work_struct *unused)
|
||||||
{
|
{
|
||||||
if (!channel_open) {
|
if (!channel_open) {
|
||||||
if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
|
if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
|
||||||
|
@ -354,7 +354,7 @@ static void startwork(int vpe)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
INIT_WORK(&work, sp_work, NULL);
|
INIT_WORK(&work, sp_work);
|
||||||
queue_work(workqueue, &work);
|
queue_work(workqueue, &work);
|
||||||
} else
|
} else
|
||||||
queue_work(workqueue, &work);
|
queue_work(workqueue, &work);
|
||||||
|
|
|
@ -14,7 +14,7 @@ static unsigned long avr_clock;
|
||||||
|
|
||||||
static struct work_struct wd_work;
|
static struct work_struct wd_work;
|
||||||
|
|
||||||
static void wd_stop(void *unused)
|
static void wd_stop(struct work_struct *unused)
|
||||||
{
|
{
|
||||||
const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
|
const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
|
||||||
int i = 0, rescue = 8;
|
int i = 0, rescue = 8;
|
||||||
|
@ -122,7 +122,7 @@ static int __init ls_uarts_init(void)
|
||||||
|
|
||||||
ls_uart_init();
|
ls_uart_init();
|
||||||
|
|
||||||
INIT_WORK(&wd_work, wd_stop, NULL);
|
INIT_WORK(&wd_work, wd_stop);
|
||||||
schedule_work(&wd_work);
|
schedule_work(&wd_work);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -18,11 +18,11 @@
|
||||||
|
|
||||||
#define OLD_BACKLIGHT_MAX 15
|
#define OLD_BACKLIGHT_MAX 15
|
||||||
|
|
||||||
static void pmac_backlight_key_worker(void *data);
|
static void pmac_backlight_key_worker(struct work_struct *work);
|
||||||
static void pmac_backlight_set_legacy_worker(void *data);
|
static void pmac_backlight_set_legacy_worker(struct work_struct *work);
|
||||||
|
|
||||||
static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
|
static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
|
||||||
static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
|
static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
|
||||||
|
|
||||||
/* Although these variables are used in interrupt context, it makes no sense to
|
/* Although these variables are used in interrupt context, it makes no sense to
|
||||||
* protect them. No user is able to produce enough key events per second and
|
* protect them. No user is able to produce enough key events per second and
|
||||||
|
@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value)
|
||||||
return level;
|
return level;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pmac_backlight_key_worker(void *data)
|
static void pmac_backlight_key_worker(struct work_struct *work)
|
||||||
{
|
{
|
||||||
if (atomic_read(&kernel_backlight_disabled))
|
if (atomic_read(&kernel_backlight_disabled))
|
||||||
return;
|
return;
|
||||||
|
@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness)
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pmac_backlight_set_legacy_worker(void *data)
|
static void pmac_backlight_set_legacy_worker(struct work_struct *work)
|
||||||
{
|
{
|
||||||
if (atomic_read(&kernel_backlight_disabled))
|
if (atomic_read(&kernel_backlight_disabled))
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -385,6 +385,7 @@ struct fcc_enet_private {
|
||||||
phy_info_t *phy;
|
phy_info_t *phy;
|
||||||
struct work_struct phy_relink;
|
struct work_struct phy_relink;
|
||||||
struct work_struct phy_display_config;
|
struct work_struct phy_display_config;
|
||||||
|
struct net_device *dev;
|
||||||
|
|
||||||
uint sequence_done;
|
uint sequence_done;
|
||||||
|
|
||||||
|
@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = {
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
static void mii_display_status(void *data)
|
static void mii_display_status(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct net_device *dev = data;
|
volatile struct fcc_enet_private *fep =
|
||||||
volatile struct fcc_enet_private *fep = dev->priv;
|
container_of(work, struct fcc_enet_private, phy_relink);
|
||||||
|
struct net_device *dev = fep->dev;
|
||||||
uint s = fep->phy_status;
|
uint s = fep->phy_status;
|
||||||
|
|
||||||
if (!fep->link && !fep->old_link) {
|
if (!fep->link && !fep->old_link) {
|
||||||
|
@ -1428,10 +1430,12 @@ static void mii_display_status(void *data)
|
||||||
printk(".\n");
|
printk(".\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mii_display_config(void *data)
|
static void mii_display_config(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct net_device *dev = data;
|
volatile struct fcc_enet_private *fep =
|
||||||
volatile struct fcc_enet_private *fep = dev->priv;
|
container_of(work, struct fcc_enet_private,
|
||||||
|
phy_display_config);
|
||||||
|
struct net_device *dev = fep->dev;
|
||||||
uint s = fep->phy_status;
|
uint s = fep->phy_status;
|
||||||
|
|
||||||
printk("%s: config: auto-negotiation ", dev->name);
|
printk("%s: config: auto-negotiation ", dev->name);
|
||||||
|
@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void)
|
||||||
cep->phy_id_done = 0;
|
cep->phy_id_done = 0;
|
||||||
cep->phy_addr = fip->fc_phyaddr;
|
cep->phy_addr = fip->fc_phyaddr;
|
||||||
mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
|
mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
|
||||||
INIT_WORK(&cep->phy_relink, mii_display_status, dev);
|
INIT_WORK(&cep->phy_relink, mii_display_status);
|
||||||
INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
|
INIT_WORK(&cep->phy_display_config, mii_display_config);
|
||||||
|
cep->dev = dev;
|
||||||
#endif /* CONFIG_USE_MDIO */
|
#endif /* CONFIG_USE_MDIO */
|
||||||
|
|
||||||
fip++;
|
fip++;
|
||||||
|
|
|
@ -173,6 +173,7 @@ struct fec_enet_private {
|
||||||
uint phy_speed;
|
uint phy_speed;
|
||||||
phy_info_t *phy;
|
phy_info_t *phy;
|
||||||
struct work_struct phy_task;
|
struct work_struct phy_task;
|
||||||
|
struct net_device *dev;
|
||||||
|
|
||||||
uint sequence_done;
|
uint sequence_done;
|
||||||
|
|
||||||
|
@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev)
|
||||||
printk(".\n");
|
printk(".\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mii_display_config(void *priv)
|
static void mii_display_config(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct net_device *dev = (struct net_device *)priv;
|
struct fec_enet_private *fep =
|
||||||
struct fec_enet_private *fep = dev->priv;
|
container_of(work, struct fec_enet_private, phy_task);
|
||||||
|
struct net_device *dev = fep->dev;
|
||||||
volatile uint *s = &(fep->phy_status);
|
volatile uint *s = &(fep->phy_status);
|
||||||
|
|
||||||
printk("%s: config: auto-negotiation ", dev->name);
|
printk("%s: config: auto-negotiation ", dev->name);
|
||||||
|
@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv)
|
||||||
fep->sequence_done = 1;
|
fep->sequence_done = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mii_relink(void *priv)
|
static void mii_relink(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct net_device *dev = (struct net_device *)priv;
|
struct fec_enet_private *fep =
|
||||||
struct fec_enet_private *fep = dev->priv;
|
container_of(work, struct fec_enet_private, phy_task);
|
||||||
|
struct net_device *dev = fep->dev;
|
||||||
int duplex;
|
int duplex;
|
||||||
|
|
||||||
fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
|
fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
|
||||||
|
@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct fec_enet_private *fep = dev->priv;
|
struct fec_enet_private *fep = dev->priv;
|
||||||
|
|
||||||
INIT_WORK(&fep->phy_task, mii_relink, (void *)dev);
|
fep->dev = dev;
|
||||||
|
INIT_WORK(&fep->phy_task, mii_relink);
|
||||||
schedule_work(&fep->phy_task);
|
schedule_work(&fep->phy_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct fec_enet_private *fep = dev->priv;
|
struct fec_enet_private *fep = dev->priv;
|
||||||
|
|
||||||
INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev);
|
fep->dev = dev;
|
||||||
|
INIT_WORK(&fep->phy_task, mii_display_config);
|
||||||
schedule_work(&fep->phy_task);
|
schedule_work(&fep->phy_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,8 +92,8 @@ static int appldata_timer_active;
|
||||||
* Work queue
|
* Work queue
|
||||||
*/
|
*/
|
||||||
static struct workqueue_struct *appldata_wq;
|
static struct workqueue_struct *appldata_wq;
|
||||||
static void appldata_work_fn(void *data);
|
static void appldata_work_fn(struct work_struct *work);
|
||||||
static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
|
static DECLARE_WORK(appldata_work, appldata_work_fn);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data)
|
||||||
*
|
*
|
||||||
* call data gathering function for each (active) module
|
* call data gathering function for each (active) module
|
||||||
*/
|
*/
|
||||||
static void appldata_work_fn(void *data)
|
static void appldata_work_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct list_head *lh;
|
struct list_head *lh;
|
||||||
struct appldata_ops *ops;
|
struct appldata_ops *ops;
|
||||||
|
|
|
@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void chan_interrupt(struct list_head *chans, struct work_struct *task,
|
void chan_interrupt(struct list_head *chans, struct delayed_work *task,
|
||||||
struct tty_struct *tty, int irq)
|
struct tty_struct *tty, int irq)
|
||||||
{
|
{
|
||||||
struct list_head *ele, *next;
|
struct list_head *ele, *next;
|
||||||
|
|
|
@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = {
|
||||||
|
|
||||||
static LIST_HEAD(mc_requests);
|
static LIST_HEAD(mc_requests);
|
||||||
|
|
||||||
static void mc_work_proc(void *unused)
|
static void mc_work_proc(struct work_struct *unused)
|
||||||
{
|
{
|
||||||
struct mconsole_entry *req;
|
struct mconsole_entry *req;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -72,7 +72,7 @@ static void mc_work_proc(void *unused)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
|
static DECLARE_WORK(mconsole_work, mc_work_proc);
|
||||||
|
|
||||||
static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
|
static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
|
|
|
@ -99,6 +99,7 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id)
|
||||||
* same device, since it tests for (dev->flags & IFF_UP). So
|
* same device, since it tests for (dev->flags & IFF_UP). So
|
||||||
* there's no harm in delaying the device shutdown. */
|
* there's no harm in delaying the device shutdown. */
|
||||||
schedule_work(&close_work);
|
schedule_work(&close_work);
|
||||||
|
#error this is not permitted - close_work will go out of scope
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
reactivate_fd(lp->fd, UM_ETH_IRQ);
|
reactivate_fd(lp->fd, UM_ETH_IRQ);
|
||||||
|
|
|
@ -132,7 +132,7 @@ static int port_accept(struct port_list *port)
|
||||||
DECLARE_MUTEX(ports_sem);
|
DECLARE_MUTEX(ports_sem);
|
||||||
struct list_head ports = LIST_HEAD_INIT(ports);
|
struct list_head ports = LIST_HEAD_INIT(ports);
|
||||||
|
|
||||||
void port_work_proc(void *unused)
|
void port_work_proc(struct work_struct *unused)
|
||||||
{
|
{
|
||||||
struct port_list *port;
|
struct port_list *port;
|
||||||
struct list_head *ele;
|
struct list_head *ele;
|
||||||
|
@ -150,7 +150,7 @@ void port_work_proc(void *unused)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLARE_WORK(port_work, port_work_proc, NULL);
|
DECLARE_WORK(port_work, port_work_proc);
|
||||||
|
|
||||||
static irqreturn_t port_interrupt(int irq, void *data)
|
static irqreturn_t port_interrupt(int irq, void *data)
|
||||||
{
|
{
|
||||||
|
|
|
@ -48,7 +48,8 @@ struct rackmeter_dma {
|
||||||
} ____cacheline_aligned;
|
} ____cacheline_aligned;
|
||||||
|
|
||||||
struct rackmeter_cpu {
|
struct rackmeter_cpu {
|
||||||
struct work_struct sniffer;
|
struct delayed_work sniffer;
|
||||||
|
struct rackmeter *rm;
|
||||||
cputime64_t prev_wall;
|
cputime64_t prev_wall;
|
||||||
cputime64_t prev_idle;
|
cputime64_t prev_idle;
|
||||||
int zero;
|
int zero;
|
||||||
|
@ -208,11 +209,12 @@ static void rackmeter_setup_dbdma(struct rackmeter *rm)
|
||||||
rackmeter_do_pause(rm, 0);
|
rackmeter_do_pause(rm, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rackmeter_do_timer(void *data)
|
static void rackmeter_do_timer(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct rackmeter *rm = data;
|
struct rackmeter_cpu *rcpu =
|
||||||
|
container_of(work, struct rackmeter_cpu, sniffer.work);
|
||||||
|
struct rackmeter *rm = rcpu->rm;
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
struct rackmeter_cpu *rcpu = &rm->cpu[cpu];
|
|
||||||
cputime64_t cur_jiffies, total_idle_ticks;
|
cputime64_t cur_jiffies, total_idle_ticks;
|
||||||
unsigned int total_ticks, idle_ticks;
|
unsigned int total_ticks, idle_ticks;
|
||||||
int i, offset, load, cumm, pause;
|
int i, offset, load, cumm, pause;
|
||||||
|
@ -263,8 +265,10 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
|
||||||
* on those machines yet
|
* on those machines yet
|
||||||
*/
|
*/
|
||||||
|
|
||||||
INIT_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer, rm);
|
rm->cpu[0].rm = rm;
|
||||||
INIT_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer, rm);
|
INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
|
||||||
|
rm->cpu[1].rm = rm;
|
||||||
|
INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
struct rackmeter_cpu *rcpu;
|
struct rackmeter_cpu *rcpu;
|
||||||
|
|
|
@ -91,7 +91,7 @@ struct cphy {
|
||||||
int state; /* Link status state machine */
|
int state; /* Link status state machine */
|
||||||
adapter_t *adapter; /* associated adapter */
|
adapter_t *adapter; /* associated adapter */
|
||||||
|
|
||||||
struct work_struct phy_update;
|
struct delayed_work phy_update;
|
||||||
|
|
||||||
u16 bmsr;
|
u16 bmsr;
|
||||||
int count;
|
int count;
|
||||||
|
|
|
@ -93,9 +93,11 @@ static int my3126_interrupt_handler(struct cphy *cphy)
|
||||||
return cphy_cause_link_change;
|
return cphy_cause_link_change;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void my3216_poll(void *arg)
|
static void my3216_poll(struct work_struct *work)
|
||||||
{
|
{
|
||||||
my3126_interrupt_handler(arg);
|
struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
|
||||||
|
|
||||||
|
my3126_interrupt_handler(cphy);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int my3126_set_loopback(struct cphy *cphy, int on)
|
static int my3126_set_loopback(struct cphy *cphy, int on)
|
||||||
|
@ -171,7 +173,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
|
||||||
if (cphy)
|
if (cphy)
|
||||||
cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
|
cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
|
||||||
|
|
||||||
INIT_WORK(&cphy->phy_update, my3216_poll, cphy);
|
INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
|
||||||
cphy->bmsr = 0;
|
cphy->bmsr = 0;
|
||||||
|
|
||||||
return (cphy);
|
return (cphy);
|
||||||
|
|
|
@ -714,6 +714,7 @@ struct netxen_adapter {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct work_struct watchdog_task;
|
struct work_struct watchdog_task;
|
||||||
struct work_struct tx_timeout_task;
|
struct work_struct tx_timeout_task;
|
||||||
|
struct net_device *netdev;
|
||||||
struct timer_list watchdog_timer;
|
struct timer_list watchdog_timer;
|
||||||
|
|
||||||
u32 curr_window;
|
u32 curr_window;
|
||||||
|
@ -921,7 +922,7 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
|
||||||
struct netxen_port *port);
|
struct netxen_port *port);
|
||||||
int netxen_nic_rx_has_work(struct netxen_adapter *adapter);
|
int netxen_nic_rx_has_work(struct netxen_adapter *adapter);
|
||||||
int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
|
int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
|
||||||
void netxen_watchdog_task(unsigned long v);
|
void netxen_watchdog_task(struct work_struct *work);
|
||||||
void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
|
void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
|
||||||
u32 ringid);
|
u32 ringid);
|
||||||
void netxen_process_cmd_ring(unsigned long data);
|
void netxen_process_cmd_ring(unsigned long data);
|
||||||
|
|
|
@ -710,12 +710,13 @@ static inline int netxen_nic_check_temp(struct netxen_adapter *adapter)
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
void netxen_watchdog_task(unsigned long v)
|
void netxen_watchdog_task(struct work_struct *work)
|
||||||
{
|
{
|
||||||
int port_num;
|
int port_num;
|
||||||
struct netxen_port *port;
|
struct netxen_port *port;
|
||||||
struct net_device *netdev;
|
struct net_device *netdev;
|
||||||
struct netxen_adapter *adapter = (struct netxen_adapter *)v;
|
struct netxen_adapter *adapter =
|
||||||
|
container_of(work, struct netxen_adapter, watchdog_task);
|
||||||
|
|
||||||
if (netxen_nic_check_temp(adapter))
|
if (netxen_nic_check_temp(adapter))
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -64,7 +64,7 @@ static int netxen_nic_open(struct net_device *netdev);
|
||||||
static int netxen_nic_close(struct net_device *netdev);
|
static int netxen_nic_close(struct net_device *netdev);
|
||||||
static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
|
static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
|
||||||
static void netxen_tx_timeout(struct net_device *netdev);
|
static void netxen_tx_timeout(struct net_device *netdev);
|
||||||
static void netxen_tx_timeout_task(struct net_device *netdev);
|
static void netxen_tx_timeout_task(struct work_struct *work);
|
||||||
static void netxen_watchdog(unsigned long);
|
static void netxen_watchdog(unsigned long);
|
||||||
static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
|
static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
|
||||||
static int netxen_nic_ioctl(struct net_device *netdev,
|
static int netxen_nic_ioctl(struct net_device *netdev,
|
||||||
|
@ -274,8 +274,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
adapter->ahw.xg_linkup = 0;
|
adapter->ahw.xg_linkup = 0;
|
||||||
adapter->watchdog_timer.function = &netxen_watchdog;
|
adapter->watchdog_timer.function = &netxen_watchdog;
|
||||||
adapter->watchdog_timer.data = (unsigned long)adapter;
|
adapter->watchdog_timer.data = (unsigned long)adapter;
|
||||||
INIT_WORK(&adapter->watchdog_task,
|
INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
|
||||||
(void (*)(void *))netxen_watchdog_task, adapter);
|
|
||||||
adapter->ahw.pdev = pdev;
|
adapter->ahw.pdev = pdev;
|
||||||
adapter->proc_cmd_buf_counter = 0;
|
adapter->proc_cmd_buf_counter = 0;
|
||||||
pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id);
|
pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id);
|
||||||
|
@ -379,8 +378,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
dev_addr);
|
dev_addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
INIT_WORK(&adapter->tx_timeout_task,
|
adapter->netdev = netdev;
|
||||||
(void (*)(void *))netxen_tx_timeout_task, netdev);
|
INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
|
||||||
netif_carrier_off(netdev);
|
netif_carrier_off(netdev);
|
||||||
netif_stop_queue(netdev);
|
netif_stop_queue(netdev);
|
||||||
|
|
||||||
|
@ -938,18 +937,20 @@ static void netxen_tx_timeout(struct net_device *netdev)
|
||||||
schedule_work(&adapter->tx_timeout_task);
|
schedule_work(&adapter->tx_timeout_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void netxen_tx_timeout_task(struct net_device *netdev)
|
static void netxen_tx_timeout_task(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
|
struct netxen_adapter *adapter =
|
||||||
|
container_of(work, struct netxen_adapter, tx_timeout_task);
|
||||||
|
struct net_device *netdev = adapter->netdev;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
|
printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
|
||||||
netxen_nic_driver_name, netdev->name);
|
netxen_nic_driver_name, netdev->name);
|
||||||
|
|
||||||
spin_lock_irqsave(&port->adapter->lock, flags);
|
spin_lock_irqsave(&adapter->lock, flags);
|
||||||
netxen_nic_close(netdev);
|
netxen_nic_close(netdev);
|
||||||
netxen_nic_open(netdev);
|
netxen_nic_open(netdev);
|
||||||
spin_unlock_irqrestore(&port->adapter->lock, flags);
|
spin_unlock_irqrestore(&adapter->lock, flags);
|
||||||
netdev->trans_start = jiffies;
|
netdev->trans_start = jiffies;
|
||||||
netif_wake_queue(netdev);
|
netif_wake_queue(netdev);
|
||||||
}
|
}
|
||||||
|
|
|
@ -210,6 +210,7 @@ struct smc_local {
|
||||||
|
|
||||||
/* work queue */
|
/* work queue */
|
||||||
struct work_struct phy_configure;
|
struct work_struct phy_configure;
|
||||||
|
struct net_device *dev;
|
||||||
int work_pending;
|
int work_pending;
|
||||||
|
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
@ -1114,10 +1115,11 @@ static void smc_phy_check_media(struct net_device *dev, int init)
|
||||||
* of autonegotiation.) If the RPC ANEG bit is cleared, the selection
|
* of autonegotiation.) If the RPC ANEG bit is cleared, the selection
|
||||||
* is controlled by the RPC SPEED and RPC DPLX bits.
|
* is controlled by the RPC SPEED and RPC DPLX bits.
|
||||||
*/
|
*/
|
||||||
static void smc_phy_configure(void *data)
|
static void smc_phy_configure(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct net_device *dev = data;
|
struct smc_local *lp =
|
||||||
struct smc_local *lp = netdev_priv(dev);
|
container_of(work, struct smc_local, phy_configure);
|
||||||
|
struct net_device *dev = lp->dev;
|
||||||
void __iomem *ioaddr = lp->base;
|
void __iomem *ioaddr = lp->base;
|
||||||
int phyaddr = lp->mii.phy_id;
|
int phyaddr = lp->mii.phy_id;
|
||||||
int my_phy_caps; /* My PHY capabilities */
|
int my_phy_caps; /* My PHY capabilities */
|
||||||
|
@ -1592,7 +1594,7 @@ smc_open(struct net_device *dev)
|
||||||
|
|
||||||
/* Configure the PHY, initialize the link state */
|
/* Configure the PHY, initialize the link state */
|
||||||
if (lp->phy_type != 0)
|
if (lp->phy_type != 0)
|
||||||
smc_phy_configure(dev);
|
smc_phy_configure(&lp->phy_configure);
|
||||||
else {
|
else {
|
||||||
spin_lock_irq(&lp->lock);
|
spin_lock_irq(&lp->lock);
|
||||||
smc_10bt_check_media(dev, 1);
|
smc_10bt_check_media(dev, 1);
|
||||||
|
@ -1972,7 +1974,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
|
tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
|
||||||
INIT_WORK(&lp->phy_configure, smc_phy_configure, dev);
|
INIT_WORK(&lp->phy_configure, smc_phy_configure);
|
||||||
|
lp->dev = dev;
|
||||||
lp->mii.phy_id_mask = 0x1f;
|
lp->mii.phy_id_mask = 0x1f;
|
||||||
lp->mii.reg_num_mask = 0x1f;
|
lp->mii.reg_num_mask = 0x1f;
|
||||||
lp->mii.force_media = 0;
|
lp->mii.force_media = 0;
|
||||||
|
@ -2322,7 +2325,7 @@ static int smc_drv_resume(struct platform_device *dev)
|
||||||
smc_reset(ndev);
|
smc_reset(ndev);
|
||||||
smc_enable(ndev);
|
smc_enable(ndev);
|
||||||
if (lp->phy_type != 0)
|
if (lp->phy_type != 0)
|
||||||
smc_phy_configure(ndev);
|
smc_phy_configure(&lp->phy_configure);
|
||||||
netif_device_attach(ndev);
|
netif_device_attach(ndev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,8 +32,8 @@
|
||||||
|
|
||||||
static void ieee_init(struct ieee80211_device *ieee);
|
static void ieee_init(struct ieee80211_device *ieee);
|
||||||
static void softmac_init(struct ieee80211softmac_device *sm);
|
static void softmac_init(struct ieee80211softmac_device *sm);
|
||||||
static void set_rts_cts_work(void *d);
|
static void set_rts_cts_work(struct work_struct *work);
|
||||||
static void set_basic_rates_work(void *d);
|
static void set_basic_rates_work(struct work_struct *work);
|
||||||
|
|
||||||
static void housekeeping_init(struct zd_mac *mac);
|
static void housekeeping_init(struct zd_mac *mac);
|
||||||
static void housekeeping_enable(struct zd_mac *mac);
|
static void housekeeping_enable(struct zd_mac *mac);
|
||||||
|
@ -48,8 +48,8 @@ int zd_mac_init(struct zd_mac *mac,
|
||||||
memset(mac, 0, sizeof(*mac));
|
memset(mac, 0, sizeof(*mac));
|
||||||
spin_lock_init(&mac->lock);
|
spin_lock_init(&mac->lock);
|
||||||
mac->netdev = netdev;
|
mac->netdev = netdev;
|
||||||
INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac);
|
INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
|
||||||
INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac);
|
INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work);
|
||||||
|
|
||||||
ieee_init(ieee);
|
ieee_init(ieee);
|
||||||
softmac_init(ieee80211_priv(netdev));
|
softmac_init(ieee80211_priv(netdev));
|
||||||
|
@ -366,9 +366,10 @@ static void try_enable_tx(struct zd_mac *mac)
|
||||||
spin_unlock_irqrestore(&mac->lock, flags);
|
spin_unlock_irqrestore(&mac->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_rts_cts_work(void *d)
|
static void set_rts_cts_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct zd_mac *mac = d;
|
struct zd_mac *mac =
|
||||||
|
container_of(work, struct zd_mac, set_rts_cts_work.work);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u8 rts_rate;
|
u8 rts_rate;
|
||||||
unsigned int short_preamble;
|
unsigned int short_preamble;
|
||||||
|
@ -387,9 +388,10 @@ static void set_rts_cts_work(void *d)
|
||||||
try_enable_tx(mac);
|
try_enable_tx(mac);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_basic_rates_work(void *d)
|
static void set_basic_rates_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct zd_mac *mac = d;
|
struct zd_mac *mac =
|
||||||
|
container_of(work, struct zd_mac, set_basic_rates_work.work);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u16 basic_rates;
|
u16 basic_rates;
|
||||||
|
|
||||||
|
@ -467,12 +469,13 @@ static void bssinfo_change(struct net_device *netdev, u32 changes)
|
||||||
if (need_set_rts_cts && !mac->updating_rts_rate) {
|
if (need_set_rts_cts && !mac->updating_rts_rate) {
|
||||||
mac->updating_rts_rate = 1;
|
mac->updating_rts_rate = 1;
|
||||||
netif_stop_queue(mac->netdev);
|
netif_stop_queue(mac->netdev);
|
||||||
queue_work(zd_workqueue, &mac->set_rts_cts_work);
|
queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0);
|
||||||
}
|
}
|
||||||
if (need_set_rates && !mac->updating_basic_rates) {
|
if (need_set_rates && !mac->updating_basic_rates) {
|
||||||
mac->updating_basic_rates = 1;
|
mac->updating_basic_rates = 1;
|
||||||
netif_stop_queue(mac->netdev);
|
netif_stop_queue(mac->netdev);
|
||||||
queue_work(zd_workqueue, &mac->set_basic_rates_work);
|
queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work,
|
||||||
|
0);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&mac->lock, flags);
|
spin_unlock_irqrestore(&mac->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,8 +133,8 @@ struct zd_mac {
|
||||||
struct iw_statistics iw_stats;
|
struct iw_statistics iw_stats;
|
||||||
|
|
||||||
struct housekeeping housekeeping;
|
struct housekeeping housekeeping;
|
||||||
struct work_struct set_rts_cts_work;
|
struct delayed_work set_rts_cts_work;
|
||||||
struct work_struct set_basic_rates_work;
|
struct delayed_work set_basic_rates_work;
|
||||||
|
|
||||||
unsigned int stats_count;
|
unsigned int stats_count;
|
||||||
u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
|
u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
|
||||||
|
|
|
@ -148,7 +148,7 @@ struct chip_data {
|
||||||
void (*cs_control)(u32 command);
|
void (*cs_control)(u32 command);
|
||||||
};
|
};
|
||||||
|
|
||||||
static void pump_messages(void *data);
|
static void pump_messages(struct work_struct *work);
|
||||||
|
|
||||||
static int flush(struct driver_data *drv_data)
|
static int flush(struct driver_data *drv_data)
|
||||||
{
|
{
|
||||||
|
@ -884,9 +884,10 @@ static void pump_transfers(unsigned long data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pump_messages(void *data)
|
static void pump_messages(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct driver_data *drv_data = data;
|
struct driver_data *drv_data =
|
||||||
|
container_of(work, struct driver_data, pump_messages);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* Lock queue and check for queue work */
|
/* Lock queue and check for queue work */
|
||||||
|
@ -1098,7 +1099,7 @@ static int init_queue(struct driver_data *drv_data)
|
||||||
tasklet_init(&drv_data->pump_transfers,
|
tasklet_init(&drv_data->pump_transfers,
|
||||||
pump_transfers, (unsigned long)drv_data);
|
pump_transfers, (unsigned long)drv_data);
|
||||||
|
|
||||||
INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
|
INIT_WORK(&drv_data->pump_messages, pump_messages);
|
||||||
drv_data->workqueue = create_singlethread_workqueue(
|
drv_data->workqueue = create_singlethread_workqueue(
|
||||||
drv_data->master->cdev.dev->bus_id);
|
drv_data->master->cdev.dev->bus_id);
|
||||||
if (drv_data->workqueue == NULL)
|
if (drv_data->workqueue == NULL)
|
||||||
|
|
|
@ -68,7 +68,7 @@ struct usb_hub {
|
||||||
|
|
||||||
unsigned has_indicators:1;
|
unsigned has_indicators:1;
|
||||||
u8 indicator[USB_MAXCHILDREN];
|
u8 indicator[USB_MAXCHILDREN];
|
||||||
struct work_struct leds;
|
struct delayed_work leds;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ struct appledisplay {
|
||||||
char *urbdata; /* interrupt URB data buffer */
|
char *urbdata; /* interrupt URB data buffer */
|
||||||
char *msgdata; /* control message data buffer */
|
char *msgdata; /* control message data buffer */
|
||||||
|
|
||||||
struct work_struct work;
|
struct delayed_work work;
|
||||||
int button_pressed;
|
int button_pressed;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
};
|
};
|
||||||
|
@ -117,7 +117,7 @@ static void appledisplay_complete(struct urb *urb)
|
||||||
case ACD_BTN_BRIGHT_UP:
|
case ACD_BTN_BRIGHT_UP:
|
||||||
case ACD_BTN_BRIGHT_DOWN:
|
case ACD_BTN_BRIGHT_DOWN:
|
||||||
pdata->button_pressed = 1;
|
pdata->button_pressed = 1;
|
||||||
queue_work(wq, &pdata->work);
|
queue_delayed_work(wq, &pdata->work, 0);
|
||||||
break;
|
break;
|
||||||
case ACD_BTN_NONE:
|
case ACD_BTN_NONE:
|
||||||
default:
|
default:
|
||||||
|
@ -184,9 +184,10 @@ static struct backlight_properties appledisplay_bl_data = {
|
||||||
.max_brightness = 0xFF
|
.max_brightness = 0xFF
|
||||||
};
|
};
|
||||||
|
|
||||||
static void appledisplay_work(void *private)
|
static void appledisplay_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct appledisplay *pdata = private;
|
struct appledisplay *pdata =
|
||||||
|
container_of(work, struct appledisplay, work.work);
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
up(&pdata->bd->sem);
|
up(&pdata->bd->sem);
|
||||||
|
@ -238,7 +239,7 @@ static int appledisplay_probe(struct usb_interface *iface,
|
||||||
pdata->udev = udev;
|
pdata->udev = udev;
|
||||||
|
|
||||||
spin_lock_init(&pdata->lock);
|
spin_lock_init(&pdata->lock);
|
||||||
INIT_WORK(&pdata->work, appledisplay_work, pdata);
|
INIT_DELAYED_WORK(&pdata->work, appledisplay_work);
|
||||||
|
|
||||||
/* Allocate buffer for control messages */
|
/* Allocate buffer for control messages */
|
||||||
pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
|
pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
|
||||||
|
|
|
@ -964,9 +964,10 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
|
||||||
* Our LCD controller task (which is called when we blank or unblank)
|
* Our LCD controller task (which is called when we blank or unblank)
|
||||||
* via keventd.
|
* via keventd.
|
||||||
*/
|
*/
|
||||||
static void pxafb_task(void *dummy)
|
static void pxafb_task(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct pxafb_info *fbi = dummy;
|
struct pxafb_info *fbi =
|
||||||
|
container_of(work, struct pxafb_info, task);
|
||||||
u_int state = xchg(&fbi->task_state, -1);
|
u_int state = xchg(&fbi->task_state, -1);
|
||||||
|
|
||||||
set_ctrlr_state(fbi, state);
|
set_ctrlr_state(fbi, state);
|
||||||
|
@ -1159,7 +1160,7 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
init_waitqueue_head(&fbi->ctrlr_wait);
|
init_waitqueue_head(&fbi->ctrlr_wait);
|
||||||
INIT_WORK(&fbi->task, pxafb_task, fbi);
|
INIT_WORK(&fbi->task, pxafb_task);
|
||||||
init_MUTEX(&fbi->ctrlr_sem);
|
init_MUTEX(&fbi->ctrlr_sem);
|
||||||
|
|
||||||
return fbi;
|
return fbi;
|
||||||
|
|
|
@ -24,7 +24,7 @@ struct omap_irda_config {
|
||||||
/* Very specific to the needs of some platforms (h3,h4)
|
/* Very specific to the needs of some platforms (h3,h4)
|
||||||
* having calls which can sleep in irda_set_speed.
|
* having calls which can sleep in irda_set_speed.
|
||||||
*/
|
*/
|
||||||
struct work_struct gpio_expa;
|
struct delayed_work gpio_expa;
|
||||||
int rx_channel;
|
int rx_channel;
|
||||||
int tx_channel;
|
int tx_channel;
|
||||||
unsigned long dest_start;
|
unsigned long dest_start;
|
||||||
|
|
|
@ -32,7 +32,7 @@ struct netpoll_info {
|
||||||
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
||||||
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
|
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
|
||||||
struct sk_buff_head txq;
|
struct sk_buff_head txq;
|
||||||
struct work_struct tx_work;
|
struct delayed_work tx_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
void netpoll_poll(struct netpoll *np);
|
void netpoll_poll(struct netpoll *np);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче