Merge branch 'for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue changes from Tejun Heo: "Surprisingly, Lai and I didn't break too many things implementing custom pools and stuff last time around and there aren't any follow-up changes necessary at this point. The only change in this pull request is Viresh's patches to make some per-cpu workqueues to behave as unbound workqueues dependent on a boot param whose default can be configured via a config option. This leads to higher processing overhead / lower bandwidth as more work items are bounced across CPUs; however, it can lead to noticeable powersave in certain configurations - ~10% w/ idlish constant workload on a big.LITTLE configuration according to Viresh. This is because per-cpu workqueues interfere with how the scheduler perceives whether or not each CPU is idle by forcing pinned tasks on them, which makes the scheduler's power-aware scheduling decisions less effective. Its effectiveness is likely less pronounced on homogenous configurations and this type of optimization can probably be made automatic; however, the changes are pretty minimal and the affected workqueues are clearly marked, so it's an easy gain for some configurations for the time being with pretty unintrusive changes." * 'for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: fbcon: queue work on power efficient wq block: queue work on power efficient wq PHYLIB: queue work on system_power_efficient_wq workqueue: Add system wide power_efficient workqueues workqueues: Introduce new flag WQ_POWER_EFFICIENT for power oriented workqueues
This commit is contained in:
@@ -439,7 +439,7 @@ void phy_start_machine(struct phy_device *phydev,
|
||||
{
|
||||
phydev->adjust_state = handler;
|
||||
|
||||
schedule_delayed_work(&phydev->state_queue, HZ);
|
||||
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -500,7 +500,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
|
||||
disable_irq_nosync(irq);
|
||||
atomic_inc(&phydev->irq_disable);
|
||||
|
||||
schedule_work(&phydev->phy_queue);
|
||||
queue_work(system_power_efficient_wq, &phydev->phy_queue);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -655,7 +655,7 @@ static void phy_change(struct work_struct *work)
|
||||
|
||||
/* reschedule state queue work to run as soon as possible */
|
||||
cancel_delayed_work_sync(&phydev->state_queue);
|
||||
schedule_delayed_work(&phydev->state_queue, 0);
|
||||
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
|
||||
|
||||
return;
|
||||
|
||||
@@ -918,7 +918,8 @@ void phy_state_machine(struct work_struct *work)
|
||||
if (err < 0)
|
||||
phy_error(phydev);
|
||||
|
||||
schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
|
||||
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
|
||||
PHY_STATE_TIME * HZ);
|
||||
}
|
||||
|
||||
static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
|
||||
|
Reference in New Issue
Block a user