Merge branch 'acpi-scan' to satisfy dependencies.
这个提交包含在:
@@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
|
||||
#define LPSS_GPIODEF0_DMA_LLP BIT(13)
|
||||
|
||||
static DEFINE_MUTEX(lpss_iosf_mutex);
|
||||
static bool lpss_iosf_d3_entered;
|
||||
|
||||
static void lpss_iosf_enter_d3_state(void)
|
||||
{
|
||||
@@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void)
|
||||
|
||||
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
|
||||
LPSS_IOSF_GPIODEF0, value1, mask1);
|
||||
|
||||
lpss_iosf_d3_entered = true;
|
||||
|
||||
exit:
|
||||
mutex_unlock(&lpss_iosf_mutex);
|
||||
}
|
||||
@@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void)
|
||||
|
||||
mutex_lock(&lpss_iosf_mutex);
|
||||
|
||||
if (!lpss_iosf_d3_entered)
|
||||
goto exit;
|
||||
|
||||
lpss_iosf_d3_entered = false;
|
||||
|
||||
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
|
||||
LPSS_IOSF_GPIODEF0, value1, mask1);
|
||||
|
||||
@@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void)
|
||||
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
|
||||
LPSS_IOSF_PMCSR, value2, mask2);
|
||||
|
||||
exit:
|
||||
mutex_unlock(&lpss_iosf_mutex);
|
||||
}
|
||||
|
||||
static int acpi_lpss_suspend(struct device *dev, bool runtime)
|
||||
static int acpi_lpss_suspend(struct device *dev, bool wakeup)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
bool wakeup = runtime || device_may_wakeup(dev);
|
||||
int ret;
|
||||
|
||||
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
||||
@@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime)
|
||||
* wrong status for devices being about to be powered off. See
|
||||
* lpss_iosf_enter_d3_state() for further information.
|
||||
*/
|
||||
if ((runtime || !pm_suspend_via_firmware()) &&
|
||||
if (acpi_target_system_state() == ACPI_STATE_S0 &&
|
||||
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
lpss_iosf_enter_d3_state();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int acpi_lpss_resume(struct device *dev, bool runtime)
|
||||
static int acpi_lpss_resume(struct device *dev)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
int ret;
|
||||
@@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime)
|
||||
* This call is kept first to be in symmetry with
|
||||
* acpi_lpss_runtime_suspend() one.
|
||||
*/
|
||||
if ((runtime || !pm_resume_via_firmware()) &&
|
||||
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
lpss_iosf_exit_d3_state();
|
||||
|
||||
ret = acpi_dev_resume(dev);
|
||||
@@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
|
||||
return 0;
|
||||
|
||||
ret = pm_generic_suspend_late(dev);
|
||||
return ret ? ret : acpi_lpss_suspend(dev, false);
|
||||
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
|
||||
}
|
||||
|
||||
static int acpi_lpss_resume_early(struct device *dev)
|
||||
{
|
||||
int ret = acpi_lpss_resume(dev, false);
|
||||
int ret = acpi_lpss_resume(dev);
|
||||
|
||||
return ret ? ret : pm_generic_resume_early(dev);
|
||||
}
|
||||
@@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
|
||||
|
||||
static int acpi_lpss_runtime_resume(struct device *dev)
|
||||
{
|
||||
int ret = acpi_lpss_resume(dev, true);
|
||||
int ret = acpi_lpss_resume(dev);
|
||||
|
||||
return ret ? ret : pm_generic_runtime_resume(dev);
|
||||
}
|
||||
|
@@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/*
|
||||
* 1) Disable all GPEs
|
||||
* 2) Enable all wakeup GPEs
|
||||
*/
|
||||
/* Disable all GPEs */
|
||||
status = acpi_hw_disable_all_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
/*
|
||||
* If the target sleep state is S5, clear all GPEs and fixed events too
|
||||
*/
|
||||
if (sleep_state == ACPI_STATE_S5) {
|
||||
status = acpi_hw_clear_acpi_status();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
acpi_gbl_system_awake_and_running = FALSE;
|
||||
|
||||
/* Enable all wakeup GPEs */
|
||||
status = acpi_hw_enable_all_wakeup_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
|
@@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
|
||||
status =
|
||||
acpi_ps_create_op(walk_state, aml_op_start, &op);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
/*
|
||||
* ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
|
||||
* executing it as a control method. However, if we encounter
|
||||
* an error while loading the table, we need to keep trying to
|
||||
* load the table rather than aborting the table load. Set the
|
||||
* status to AE_OK to proceed with the table load.
|
||||
*/
|
||||
if ((walk_state->
|
||||
parse_flags & ACPI_PARSE_MODULE_LEVEL)
|
||||
&& status == AE_ALREADY_EXISTS) {
|
||||
status = AE_OK;
|
||||
}
|
||||
if (status == AE_CTRL_PARSE_CONTINUE) {
|
||||
continue;
|
||||
}
|
||||
@@ -694,6 +706,25 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
|
||||
acpi_ps_next_parse_state(walk_state, op, status);
|
||||
if (status == AE_CTRL_PENDING) {
|
||||
status = AE_OK;
|
||||
} else
|
||||
if ((walk_state->
|
||||
parse_flags & ACPI_PARSE_MODULE_LEVEL)
|
||||
&& status != AE_CTRL_TRANSFER
|
||||
&& ACPI_FAILURE(status)) {
|
||||
/*
|
||||
* ACPI_PARSE_MODULE_LEVEL flag means that we are currently
|
||||
* loading a table by executing it as a control method.
|
||||
* However, if we encounter an error while loading the table,
|
||||
* we need to keep trying to load the table rather than
|
||||
* aborting the table load (setting the status to AE_OK
|
||||
* continues the table load). If we get a failure at this
|
||||
* point, it means that the dispatcher got an error while
|
||||
* processing Op (most likely an AML operand error) or a
|
||||
* control method was called from module level and the
|
||||
* dispatcher returned AE_CTRL_TRANSFER. In the latter case,
|
||||
* leave the status alone, there's nothing wrong with it.
|
||||
*/
|
||||
status = AE_OK;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
|
||||
switch (lookup_status) {
|
||||
case AE_ALREADY_EXISTS:
|
||||
|
||||
acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
|
||||
acpi_os_printf(ACPI_MSG_BIOS_ERROR);
|
||||
message = "Failure creating";
|
||||
break;
|
||||
|
||||
case AE_NOT_FOUND:
|
||||
|
||||
acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
|
||||
acpi_os_printf(ACPI_MSG_BIOS_ERROR);
|
||||
message = "Could not resolve";
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
acpi_os_printf("\n" ACPI_MSG_ERROR);
|
||||
acpi_os_printf(ACPI_MSG_ERROR);
|
||||
message = "Failure resolving";
|
||||
break;
|
||||
}
|
||||
|
@@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
|
||||
*/
|
||||
pr_err("extension failed to load: %s", hook->name);
|
||||
__battery_hook_unregister(hook, 0);
|
||||
return;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
pr_info("new extension: %s\n", hook->name);
|
||||
end:
|
||||
mutex_unlock(&hook_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(battery_hook_register);
|
||||
@@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
|
||||
*/
|
||||
static void battery_hook_add_battery(struct acpi_battery *battery)
|
||||
{
|
||||
struct acpi_battery_hook *hook_node;
|
||||
struct acpi_battery_hook *hook_node, *tmp;
|
||||
|
||||
mutex_lock(&hook_mutex);
|
||||
INIT_LIST_HEAD(&battery->list);
|
||||
@@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
|
||||
* when a battery gets hotplugged or initialized
|
||||
* during the battery module initialization.
|
||||
*/
|
||||
list_for_each_entry(hook_node, &battery_hook_list, list) {
|
||||
list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
|
||||
if (hook_node->add_battery(battery->bat)) {
|
||||
/*
|
||||
* The notification of the extensions has failed, to
|
||||
* prevent further errors we will unload the extension.
|
||||
*/
|
||||
__battery_hook_unregister(hook_node, 0);
|
||||
pr_err("error in extension, unloading: %s",
|
||||
hook_node->name);
|
||||
__battery_hook_unregister(hook_node, 0);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&hook_mutex);
|
||||
|
@@ -2042,7 +2042,7 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
|
||||
.ident = "Thinkpad X1 Carbon 6th",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"),
|
||||
DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
|
||||
},
|
||||
},
|
||||
{ },
|
||||
|
@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
const guid_t *guid;
|
||||
int rc, i;
|
||||
|
||||
if (cmd_rc)
|
||||
*cmd_rc = -EINVAL;
|
||||
func = cmd;
|
||||
if (cmd == ND_CMD_CALL) {
|
||||
call_pkg = buf;
|
||||
@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
* If we return an error (like elsewhere) then caller wouldn't
|
||||
* be able to rely upon data returned to make calculation.
|
||||
*/
|
||||
if (cmd_rc)
|
||||
*cmd_rc = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
|
||||
|
||||
mutex_lock(&acpi_desc->init_mutex);
|
||||
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
|
||||
work_busy(&acpi_desc->dwork.work)
|
||||
acpi_desc->scrub_busy
|
||||
&& !acpi_desc->cancel ? "+\n" : "\n");
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
}
|
||||
@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
|
||||
{
|
||||
lockdep_assert_held(&acpi_desc->init_mutex);
|
||||
|
||||
acpi_desc->scrub_busy = 1;
|
||||
/* note this should only be set from within the workqueue */
|
||||
if (tmo)
|
||||
acpi_desc->scrub_tmo = tmo;
|
||||
queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
|
||||
}
|
||||
|
||||
static void sched_ars(struct acpi_nfit_desc *acpi_desc)
|
||||
{
|
||||
__sched_ars(acpi_desc, 0);
|
||||
}
|
||||
|
||||
static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
|
||||
{
|
||||
lockdep_assert_held(&acpi_desc->init_mutex);
|
||||
|
||||
acpi_desc->scrub_busy = 0;
|
||||
acpi_desc->scrub_count++;
|
||||
if (acpi_desc->scrub_count_state)
|
||||
sysfs_notify_dirent(acpi_desc->scrub_count_state);
|
||||
}
|
||||
|
||||
static void acpi_nfit_scrub(struct work_struct *work)
|
||||
{
|
||||
struct acpi_nfit_desc *acpi_desc;
|
||||
@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
|
||||
mutex_lock(&acpi_desc->init_mutex);
|
||||
query_rc = acpi_nfit_query_poison(acpi_desc);
|
||||
tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
|
||||
if (tmo) {
|
||||
queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
|
||||
acpi_desc->scrub_tmo = tmo;
|
||||
} else {
|
||||
acpi_desc->scrub_count++;
|
||||
if (acpi_desc->scrub_count_state)
|
||||
sysfs_notify_dirent(acpi_desc->scrub_count_state);
|
||||
}
|
||||
if (tmo)
|
||||
__sched_ars(acpi_desc, tmo);
|
||||
else
|
||||
notify_ars_done(acpi_desc);
|
||||
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
}
|
||||
@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
|
||||
break;
|
||||
}
|
||||
|
||||
queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
|
||||
sched_ars(acpi_desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
|
||||
}
|
||||
}
|
||||
if (scheduled) {
|
||||
queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
|
||||
sched_ars(acpi_desc);
|
||||
dev_dbg(dev, "ars_scan triggered\n");
|
||||
}
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
|
@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
|
||||
unsigned int max_ars;
|
||||
unsigned int scrub_count;
|
||||
unsigned int scrub_mode;
|
||||
unsigned int scrub_busy:1;
|
||||
unsigned int cancel:1;
|
||||
unsigned long dimm_cmd_force_en;
|
||||
unsigned long bus_cmd_force_en;
|
||||
|
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
|
||||
if (cpu_node) {
|
||||
cpu_node = acpi_find_processor_package_id(table, cpu_node,
|
||||
level, flag);
|
||||
/* Only the first level has a guaranteed id */
|
||||
if (level == 0)
|
||||
/*
|
||||
* As per specification if the processor structure represents
|
||||
* an actual processor, then ACPI processor ID must be valid.
|
||||
* For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
|
||||
* should be set if the UID is valid
|
||||
*/
|
||||
if (level == 0 ||
|
||||
cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
|
||||
return cpu_node->acpi_processor_id;
|
||||
return ACPI_PTR_DIFF(cpu_node, table);
|
||||
}
|
||||
|
@@ -1528,7 +1528,7 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
|
||||
static bool acpi_is_indirect_io_slave(struct acpi_device *device)
|
||||
{
|
||||
struct acpi_device *parent = device->parent;
|
||||
const struct acpi_device_id indirect_io_hosts[] = {
|
||||
static const struct acpi_device_id indirect_io_hosts[] = {
|
||||
{"HISI0191", 0},
|
||||
{}
|
||||
};
|
||||
@@ -1540,6 +1540,18 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
|
||||
{
|
||||
struct list_head resource_list;
|
||||
bool is_serial_bus_slave = false;
|
||||
/*
|
||||
* These devices have multiple I2cSerialBus resources and an i2c-client
|
||||
* must be instantiated for each, each with its own i2c_device_id.
|
||||
* Normally we only instantiate an i2c-client for the first resource,
|
||||
* using the ACPI HID as id. These special cases are handled by the
|
||||
* drivers/platform/x86/i2c-multi-instantiate.c driver, which knows
|
||||
* which i2c_device_id to use for each resource.
|
||||
*/
|
||||
static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
|
||||
{"BSG1160", },
|
||||
{}
|
||||
};
|
||||
|
||||
if (acpi_is_indirect_io_slave(device))
|
||||
return true;
|
||||
@@ -1551,6 +1563,10 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
|
||||
fwnode_property_present(&device->fwnode, "baud")))
|
||||
return true;
|
||||
|
||||
/* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */
|
||||
if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids))
|
||||
return false;
|
||||
|
||||
INIT_LIST_HEAD(&resource_list);
|
||||
acpi_dev_get_resources(device, &resource_list,
|
||||
acpi_check_serial_bus_slave,
|
||||
@@ -1612,7 +1628,8 @@ static int acpi_add_single_object(struct acpi_device **child,
|
||||
* Note this must be done before the get power-/wakeup_dev-flags calls.
|
||||
*/
|
||||
if (type == ACPI_BUS_TYPE_DEVICE)
|
||||
acpi_bus_get_status(device);
|
||||
if (acpi_bus_get_status(device) < 0)
|
||||
acpi_set_device_status(device, 0);
|
||||
|
||||
acpi_bus_get_power_flags(device);
|
||||
acpi_bus_get_wakeup_device_flags(device);
|
||||
@@ -1690,7 +1707,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
|
||||
* acpi_add_single_object updates this once we've an acpi_device
|
||||
* so that acpi_bus_get_status' quirk handling can be used.
|
||||
*/
|
||||
*sta = 0;
|
||||
*sta = ACPI_STA_DEFAULT;
|
||||
break;
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
*type = ACPI_BUS_TYPE_PROCESSOR;
|
||||
|
在新工单中引用
屏蔽一个用户