Commit 11d8ec40 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-extra-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management updates from Rafael Wysocki:
 "A few more fixes and cleanups in the x86-64 low-level hibernation
  code, PM core, cpufreq (Kconfig and intel_pstate), and the operating
  points framework.

  Specifics:

   - Prevent the low-level assembly hibernate code on x86-64 from
     referring to __PAGE_OFFSET directly as a symbol which doesn't work
     when the kernel identity mapping base is randomized, in which case
     __PAGE_OFFSET is a variable (Rafael Wysocki).

   - Avoid selecting CPU_FREQ_STAT by default as the statistics are not
     required for proper cpufreq operation (Borislav Petkov).

   - Add Skylake-X and Broadwell-X IDs to the intel_pstate's list of
     processors where out-of-band (OBB) control of P-states is possible
     and if that is in use, intel_pstate should not attempt to manage
     P-states (Srinivas Pandruvada).

   - Drop some unnecessary checks from the wakeup IRQ handling code in
     the PM core (Markus Elfring).

   - Reduce the number operating performance point (OPP) lookups in one
     of the OPP framework's helper functions (Jisheng Zhang)"

* tag 'pm-extra-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  x86/power/64: Do not refer to __PAGE_OFFSET from assembly code
  cpufreq: Do not default-yes CPU_FREQ_STAT
  cpufreq: intel_pstate: Add more out-of-band IDs
  PM / OPP: optimize dev_pm_opp_set_rate() performance a bit
  PM-wakeup: Delete unnecessary checks before three function calls
parents 39fada55 e2b3b80d
...@@ -37,11 +37,11 @@ unsigned long jump_address_phys; ...@@ -37,11 +37,11 @@ unsigned long jump_address_phys;
*/ */
unsigned long restore_cr3 __visible; unsigned long restore_cr3 __visible;
pgd_t *temp_level4_pgt __visible; unsigned long temp_level4_pgt __visible;
unsigned long relocated_restore_code __visible; unsigned long relocated_restore_code __visible;
static int set_up_temporary_text_mapping(void) static int set_up_temporary_text_mapping(pgd_t *pgd)
{ {
pmd_t *pmd; pmd_t *pmd;
pud_t *pud; pud_t *pud;
...@@ -71,7 +71,7 @@ static int set_up_temporary_text_mapping(void) ...@@ -71,7 +71,7 @@ static int set_up_temporary_text_mapping(void)
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
set_pud(pud + pud_index(restore_jump_address), set_pud(pud + pud_index(restore_jump_address),
__pud(__pa(pmd) | _KERNPG_TABLE)); __pud(__pa(pmd) | _KERNPG_TABLE));
set_pgd(temp_level4_pgt + pgd_index(restore_jump_address), set_pgd(pgd + pgd_index(restore_jump_address),
__pgd(__pa(pud) | _KERNPG_TABLE)); __pgd(__pa(pud) | _KERNPG_TABLE));
return 0; return 0;
...@@ -90,15 +90,16 @@ static int set_up_temporary_mappings(void) ...@@ -90,15 +90,16 @@ static int set_up_temporary_mappings(void)
.kernel_mapping = true, .kernel_mapping = true,
}; };
unsigned long mstart, mend; unsigned long mstart, mend;
pgd_t *pgd;
int result; int result;
int i; int i;
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!temp_level4_pgt) if (!pgd)
return -ENOMEM; return -ENOMEM;
/* Prepare a temporary mapping for the kernel text */ /* Prepare a temporary mapping for the kernel text */
result = set_up_temporary_text_mapping(); result = set_up_temporary_text_mapping(pgd);
if (result) if (result)
return result; return result;
...@@ -107,13 +108,12 @@ static int set_up_temporary_mappings(void) ...@@ -107,13 +108,12 @@ static int set_up_temporary_mappings(void)
mstart = pfn_mapped[i].start << PAGE_SHIFT; mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT; mend = pfn_mapped[i].end << PAGE_SHIFT;
result = kernel_ident_mapping_init(&info, temp_level4_pgt, result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
mstart, mend);
if (result) if (result)
return result; return result;
} }
temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
return 0; return 0;
} }
......
...@@ -72,8 +72,6 @@ ENTRY(restore_image) ...@@ -72,8 +72,6 @@ ENTRY(restore_image)
/* code below has been relocated to a safe page */ /* code below has been relocated to a safe page */
ENTRY(core_restore_code) ENTRY(core_restore_code)
/* switch to temporary page tables */ /* switch to temporary page tables */
movq $__PAGE_OFFSET, %rcx
subq %rcx, %rax
movq %rax, %cr3 movq %rax, %cr3
/* flush TLB */ /* flush TLB */
movq %rbx, %rcx movq %rbx, %rcx
......
...@@ -402,6 +402,22 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, ...@@ -402,6 +402,22 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
break;
}
}
return opp;
}
/** /**
* dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
* @dev: device for which we do this operation * @dev: device for which we do this operation
...@@ -427,7 +443,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, ...@@ -427,7 +443,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq) unsigned long *freq)
{ {
struct opp_table *opp_table; struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert(); opp_rcu_lockdep_assert();
...@@ -440,15 +455,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, ...@@ -440,15 +455,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
if (IS_ERR(opp_table)) if (IS_ERR(opp_table))
return ERR_CAST(opp_table); return ERR_CAST(opp_table);
list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { return _find_freq_ceil(opp_table, freq);
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
break;
}
}
return opp;
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
...@@ -612,7 +619,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) ...@@ -612,7 +619,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
return PTR_ERR(opp_table); return PTR_ERR(opp_table);
} }
old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq); old_opp = _find_freq_ceil(opp_table, &old_freq);
if (!IS_ERR(old_opp)) { if (!IS_ERR(old_opp)) {
ou_volt = old_opp->u_volt; ou_volt = old_opp->u_volt;
ou_volt_min = old_opp->u_volt_min; ou_volt_min = old_opp->u_volt_min;
...@@ -622,7 +629,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) ...@@ -622,7 +629,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
__func__, old_freq, PTR_ERR(old_opp)); __func__, old_freq, PTR_ERR(old_opp));
} }
opp = dev_pm_opp_find_freq_ceil(dev, &freq); opp = _find_freq_ceil(opp_table, &freq);
if (IS_ERR(opp)) { if (IS_ERR(opp)) {
ret = PTR_ERR(opp); ret = PTR_ERR(opp);
dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
......
...@@ -334,10 +334,9 @@ void device_wakeup_arm_wake_irqs(void) ...@@ -334,10 +334,9 @@ void device_wakeup_arm_wake_irqs(void)
struct wakeup_source *ws; struct wakeup_source *ws;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) { list_for_each_entry_rcu(ws, &wakeup_sources, entry)
if (ws->wakeirq) dev_pm_arm_wake_irq(ws->wakeirq);
dev_pm_arm_wake_irq(ws->wakeirq);
}
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -351,10 +350,9 @@ void device_wakeup_disarm_wake_irqs(void) ...@@ -351,10 +350,9 @@ void device_wakeup_disarm_wake_irqs(void)
struct wakeup_source *ws; struct wakeup_source *ws;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) { list_for_each_entry_rcu(ws, &wakeup_sources, entry)
if (ws->wakeirq) dev_pm_disarm_wake_irq(ws->wakeirq);
dev_pm_disarm_wake_irq(ws->wakeirq);
}
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -390,9 +388,7 @@ int device_wakeup_disable(struct device *dev) ...@@ -390,9 +388,7 @@ int device_wakeup_disable(struct device *dev)
return -EINVAL; return -EINVAL;
ws = device_wakeup_detach(dev); ws = device_wakeup_detach(dev);
if (ws) wakeup_source_unregister(ws);
wakeup_source_unregister(ws);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(device_wakeup_disable); EXPORT_SYMBOL_GPL(device_wakeup_disable);
......
...@@ -32,7 +32,6 @@ config CPU_FREQ_BOOST_SW ...@@ -32,7 +32,6 @@ config CPU_FREQ_BOOST_SW
config CPU_FREQ_STAT config CPU_FREQ_STAT
bool "CPU frequency transition statistics" bool "CPU frequency transition statistics"
default y
help help
Export CPU frequency statistics information through sysfs. Export CPU frequency statistics information through sysfs.
......
...@@ -1374,6 +1374,8 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); ...@@ -1374,6 +1374,8 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
ICPU(INTEL_FAM6_BROADWELL_X, core_params),
ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
{} {}
}; };
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment