powerpc/pseries: Remove unneeded uses of dlpar work queue

There are three instances in which dlpar hotplug events are invoked;
handling a hotplug interrupt (in a kvm guest), handling a dlpar
request through sysfs, and updating LMB affinity when handling a
PRRN event. Only in the case of handling a hotplug interrupt do we
have to put the work on a workqueue, the other cases can handle the
dlpar request directly.

This patch exports the handle_dlpar_errorlog() function so that
dlpar hotplug events can be handled directly and updates the two
instances mentioned above to use the direct invocation.

Signed-off-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Nathan Fontenot 2018-09-10 09:57:07 -05:00 committed by Michael Ellerman
parent cd24e457fd
commit fd12527a1d
4 changed files with 19 additions and 43 deletions

View File

@ -32,8 +32,6 @@ static struct workqueue_struct *pseries_hp_wq;
struct pseries_hp_work {
struct work_struct work;
struct pseries_hp_errorlog *errlog;
struct completion *hp_completion;
int *rc;
};
struct cc_workarea {
@ -329,7 +327,7 @@ int dlpar_release_drc(u32 drc_index)
return 0;
}
static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
{
int rc;
@ -371,20 +369,13 @@ static void pseries_hp_work_fn(struct work_struct *work)
struct pseries_hp_work *hp_work =
container_of(work, struct pseries_hp_work, work);
if (hp_work->rc)
*(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog);
else
handle_dlpar_errorlog(hp_work->errlog);
if (hp_work->hp_completion)
complete(hp_work->hp_completion);
handle_dlpar_errorlog(hp_work->errlog);
kfree(hp_work->errlog);
kfree((void *)work);
}
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
struct completion *hotplug_done, int *rc)
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
{
struct pseries_hp_work *work;
struct pseries_hp_errorlog *hp_errlog_copy;
@ -397,13 +388,9 @@ void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
if (work) {
INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
work->errlog = hp_errlog_copy;
work->hp_completion = hotplug_done;
work->rc = rc;
queue_work(pseries_hp_wq, (struct work_struct *)work);
} else {
*rc = -ENOMEM;
kfree(hp_errlog_copy);
complete(hotplug_done);
}
}
@ -521,18 +508,15 @@ static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
struct pseries_hp_errorlog *hp_elog;
struct completion hotplug_done;
struct pseries_hp_errorlog hp_elog;
char *argbuf;
char *args;
int rc;
args = argbuf = kstrdup(buf, GFP_KERNEL);
hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
if (!hp_elog || !argbuf) {
if (!argbuf) {
pr_info("Could not allocate resources for DLPAR operation\n");
kfree(argbuf);
kfree(hp_elog);
return -ENOMEM;
}
@ -540,25 +524,22 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
* Parse out the request from the user, this will be in the form:
* <resource> <action> <id_type> <id>
*/
rc = dlpar_parse_resource(&args, hp_elog);
rc = dlpar_parse_resource(&args, &hp_elog);
if (rc)
goto dlpar_store_out;
rc = dlpar_parse_action(&args, hp_elog);
rc = dlpar_parse_action(&args, &hp_elog);
if (rc)
goto dlpar_store_out;
rc = dlpar_parse_id_type(&args, hp_elog);
rc = dlpar_parse_id_type(&args, &hp_elog);
if (rc)
goto dlpar_store_out;
init_completion(&hotplug_done);
queue_hotplug_event(hp_elog, &hotplug_done, &rc);
wait_for_completion(&hotplug_done);
rc = handle_dlpar_errorlog(&hp_elog);
dlpar_store_out:
kfree(argbuf);
kfree(hp_elog);
if (rc)
pr_err("Could not handle DLPAR request \"%s\"\n", buf);

View File

@ -242,7 +242,7 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
static void prrn_update_node(__be32 phandle)
{
struct pseries_hp_errorlog *hp_elog;
struct pseries_hp_errorlog hp_elog;
struct device_node *dn;
/*
@ -255,18 +255,12 @@ static void prrn_update_node(__be32 phandle)
return;
}
hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
if(!hp_elog)
return;
hp_elog.resource = PSERIES_HP_ELOG_RESOURCE_MEM;
hp_elog.action = PSERIES_HP_ELOG_ACTION_READD;
hp_elog.id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
hp_elog._drc_u.drc_index = phandle;
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
hp_elog->action = PSERIES_HP_ELOG_ACTION_READD;
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
hp_elog->_drc_u.drc_index = phandle;
queue_hotplug_event(hp_elog, NULL, NULL);
kfree(hp_elog);
handle_dlpar_errorlog(&hp_elog);
}
int pseries_devicetree_update(s32 scope)

View File

@ -60,8 +60,9 @@ extern int dlpar_detach_node(struct device_node *);
extern int dlpar_acquire_drc(u32 drc_index);
extern int dlpar_release_drc(u32 drc_index);
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
struct completion *hotplug_done, int *rc);
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog);
int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_errlog);
#ifdef CONFIG_MEMORY_HOTPLUG
int dlpar_memory(struct pseries_hp_errorlog *hp_elog);
#else

View File

@ -334,7 +334,7 @@ static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id)
*/
if (hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_MEM ||
hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_CPU)
queue_hotplug_event(hp_elog, NULL, NULL);
queue_hotplug_event(hp_elog);
else
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);