staging: unisys: visorbus: added struct visorchipset_device

Added the structure visorchipset_device and moved the globals to
the struct. The visorchipset_init() function saves acpi_device
within this structure.

Reported-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Sameer Wadgaonkar <sameer.wadgaonkar@unisys.com>
Signed-off-by: David Kershner <david.kershner@unisys.com>
Reviewed-by: Tim Sell <timothy.sell@unisys.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Sameer Wadgaonkar 2017-02-21 12:53:29 -05:00 committed by Greg Kroah-Hartman
parent c0487e7e83
commit 765b2f8280
1 changed files with 112 additions and 85 deletions

View File

@ -72,9 +72,6 @@ visorchipset_release(struct inode *inode, struct file *file)
* message, we switch back to fast polling mode.
*/
#define MIN_IDLE_SECONDS 10
static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
/* when we got our last controlvm message */
static unsigned long most_recent_message_jiffies;
struct parser_context {
unsigned long allocbytes;
@ -85,22 +82,28 @@ struct parser_context {
char data[0];
};
static struct delayed_work periodic_controlvm_work;
struct visorchipset_device {
struct acpi_device *acpi_device;
unsigned long poll_jiffies;
/* when we got our last controlvm message */
unsigned long most_recent_message_jiffies;
struct delayed_work periodic_controlvm_work;
struct cdev file_cdev;
struct visorchannel **file_controlvm_channel;
struct visorchannel *controlvm_channel;
unsigned long controlvm_payload_bytes_buffered;
/*
* The following variables are used to handle the scenario where we are
* unable to offload the payload from a controlvm message due to memory
* requirements. In this scenario, we simply stash the controlvm
* message, then attempt to process it again the next time
* controlvm_periodic_work() runs.
*/
struct controlvm_message controlvm_pending_msg;
bool controlvm_pending_msg_valid;
};
static struct cdev file_cdev;
static struct visorchannel **file_controlvm_channel;
static struct visorchannel *controlvm_channel;
static unsigned long controlvm_payload_bytes_buffered;
/*
* The following globals are used to handle the scenario where we are unable to
* offload the payload from a controlvm message due to memory requirements. In
* this scenario, we simply stash the controlvm message, then attempt to
* process it again the next time controlvm_periodic_work() runs.
*/
static struct controlvm_message controlvm_pending_msg;
static bool controlvm_pending_msg_valid;
static struct visorchipset_device *chipset_dev;
struct parahotplug_request {
struct list_head list;
@ -119,7 +122,7 @@ static ssize_t toolaction_show(struct device *dev,
{
u8 tool_action = 0;
visorchannel_read(controlvm_channel,
visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
tool_action), &tool_action, sizeof(u8));
return sprintf(buf, "%u\n", tool_action);
@ -136,7 +139,7 @@ static ssize_t toolaction_store(struct device *dev,
return -EINVAL;
ret = visorchannel_write
(controlvm_channel,
(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
tool_action),
&tool_action, sizeof(u8));
@ -153,7 +156,7 @@ static ssize_t boottotool_show(struct device *dev,
{
struct efi_spar_indication efi_spar_indication;
visorchannel_read(controlvm_channel,
visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
efi_spar_ind), &efi_spar_indication,
sizeof(struct efi_spar_indication));
@ -172,7 +175,7 @@ static ssize_t boottotool_store(struct device *dev,
efi_spar_indication.boot_to_tool = val;
ret = visorchannel_write
(controlvm_channel,
(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
efi_spar_ind), &(efi_spar_indication),
sizeof(struct efi_spar_indication));
@ -188,7 +191,7 @@ static ssize_t error_show(struct device *dev, struct device_attribute *attr,
{
u32 error = 0;
visorchannel_read(controlvm_channel,
visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_error),
&error, sizeof(u32));
@ -205,7 +208,7 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
ret = visorchannel_write
(controlvm_channel,
(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_error),
&error, sizeof(u32));
@ -221,7 +224,7 @@ static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
u32 text_id = 0;
visorchannel_read
(controlvm_channel,
(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_text_id),
&text_id, sizeof(u32));
@ -238,7 +241,7 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
ret = visorchannel_write
(controlvm_channel,
(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_text_id),
&text_id, sizeof(u32));
@ -253,7 +256,7 @@ static ssize_t remaining_steps_show(struct device *dev,
{
u16 remaining_steps = 0;
visorchannel_read(controlvm_channel,
visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_remaining_steps),
&remaining_steps, sizeof(u16));
@ -271,7 +274,7 @@ static ssize_t remaining_steps_store(struct device *dev,
return -EINVAL;
ret = visorchannel_write
(controlvm_channel,
(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_remaining_steps),
&remaining_steps, sizeof(u16));
@ -292,7 +295,7 @@ parser_id_get(struct parser_context *ctx)
static void parser_done(struct parser_context *ctx)
{
controlvm_payload_bytes_buffered -= ctx->param_bytes;
chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
kfree(ctx);
}
@ -405,7 +408,7 @@ controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.init_chipset.features = features;
return visorchannel_signalinsert(controlvm_channel,
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@ -455,7 +458,7 @@ controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
if (outmsg.hdr.flags.test_message == 1)
return -EINVAL;
return visorchannel_signalinsert(controlvm_channel,
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@ -468,7 +471,7 @@ static int controlvm_respond_physdev_changestate(
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.device_change_state.state = state;
outmsg.cmd.device_change_state.flags.phys_device = 1;
return visorchannel_signalinsert(controlvm_channel,
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@ -484,7 +487,7 @@ save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
u16 local_crash_msg_count;
int err;
err = visorchannel_read(controlvm_channel,
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16));
@ -501,7 +504,7 @@ save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
return -EIO;
}
err = visorchannel_read(controlvm_channel,
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32));
@ -514,7 +517,7 @@ save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
switch (typ) {
case CRASH_DEV:
local_crash_msg_offset += sizeof(struct controlvm_message);
err = visorchannel_write(controlvm_channel,
err = visorchannel_write(chipset_dev->controlvm_channel,
local_crash_msg_offset,
msg,
sizeof(struct controlvm_message));
@ -525,7 +528,7 @@ save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
}
break;
case CRASH_BUS:
err = visorchannel_write(controlvm_channel,
err = visorchannel_write(chipset_dev->controlvm_channel,
local_crash_msg_offset,
msg,
sizeof(struct controlvm_message));
@ -576,7 +579,7 @@ device_changestate_responder(enum controlvm_id cmd_id,
outmsg.cmd.device_change_state.dev_no = dev_no;
outmsg.cmd.device_change_state.state = response_state;
return visorchannel_signalinsert(controlvm_channel,
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@ -1398,7 +1401,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
chipset_init(&msg);
/* get saved message count */
if (visorchannel_read(controlvm_channel,
if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16)) < 0) {
@ -1415,7 +1418,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
}
/* get saved crash message offset */
if (visorchannel_read(controlvm_channel,
if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32)) < 0) {
@ -1425,7 +1428,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
}
/* read create device message for storage bus offset */
if (visorchannel_read(controlvm_channel,
if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset,
&local_crash_bus_msg,
sizeof(struct controlvm_message)) < 0) {
@ -1435,7 +1438,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
}
/* read create device message for storage device */
if (visorchannel_read(controlvm_channel,
if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset +
sizeof(struct controlvm_message),
&local_crash_dev_msg,
@ -1548,11 +1551,11 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
switch (offset) {
case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
vma->vm_flags |= VM_IO;
if (!*file_controlvm_channel)
if (!*chipset_dev->file_controlvm_channel)
return -ENXIO;
visorchannel_read
(*file_controlvm_channel,
(*chipset_dev->file_controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
gp_control_channel),
&addr, sizeof(addr));
@ -1633,9 +1636,9 @@ visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
{
int rc = 0;
file_controlvm_channel = controlvm_channel;
cdev_init(&file_cdev, &visorchipset_fops);
file_cdev.owner = THIS_MODULE;
chipset_dev->file_controlvm_channel = controlvm_channel;
cdev_init(&chipset_dev->file_cdev, &visorchipset_fops);
chipset_dev->file_cdev.owner = THIS_MODULE;
if (MAJOR(major_dev) == 0) {
rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
/* dynamic major device number registration required */
@ -1647,7 +1650,8 @@ visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
if (rc < 0)
return rc;
}
rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
rc = cdev_add(&chipset_dev->file_cdev,
MKDEV(MAJOR(major_dev), 0), 1);
if (rc < 0) {
unregister_chrdev_region(major_dev, 1);
return rc;
@ -1658,9 +1662,9 @@ visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
static void
visorchipset_file_cleanup(dev_t major_dev)
{
if (file_cdev.ops)
cdev_del(&file_cdev);
file_cdev.ops = NULL;
if (chipset_dev->file_cdev.ops)
cdev_del(&chipset_dev->file_cdev);
chipset_dev->file_cdev.ops = NULL;
unregister_chrdev_region(major_dev, 1);
}
@ -1677,7 +1681,7 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
* '\0'-terminated
*/
allocbytes++;
if ((controlvm_payload_bytes_buffered + bytes)
if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
> MAX_CONTROLVM_PAYLOAD_BYTES) {
*retry = true;
return NULL;
@ -1710,7 +1714,7 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
}
ctx->byte_stream = true;
controlvm_payload_bytes_buffered += ctx->param_bytes;
chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
return ctx;
@ -1769,10 +1773,10 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
if (!local_addr) {
controlvm_init_response(&ackmsg, &inmsg.hdr,
CONTROLVM_RESP_SUCCESS);
if (controlvm_channel)
visorchannel_signalinsert(controlvm_channel,
CONTROLVM_QUEUE_ACK,
&ackmsg);
if (chipset_dev->controlvm_channel)
visorchannel_signalinsert(
chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_ACK, &ackmsg);
}
switch (inmsg.hdr.id) {
case CONTROLVM_CHIPSET_INIT:
@ -1844,7 +1848,7 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
static bool
read_controlvm_event(struct controlvm_message *msg)
{
if (!visorchannel_signalremove(controlvm_channel,
if (!visorchannel_signalremove(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_EVENT, msg)) {
/* got a message */
if (msg->hdr.flags.test_message == 1)
@ -1892,19 +1896,19 @@ controlvm_periodic_work(struct work_struct *work)
bool got_command = false;
bool handle_command_failed = false;
while (!visorchannel_signalremove(controlvm_channel,
while (!visorchannel_signalremove(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_RESPONSE,
&inmsg))
;
if (!got_command) {
if (controlvm_pending_msg_valid) {
if (chipset_dev->controlvm_pending_msg_valid) {
/*
* we throttled processing of a prior
* msg, so try to process it again
* rather than reading a new one
*/
inmsg = controlvm_pending_msg;
controlvm_pending_msg_valid = false;
inmsg = chipset_dev->controlvm_pending_msg;
chipset_dev->controlvm_pending_msg_valid = false;
got_command = true;
} else {
got_command = read_controlvm_event(&inmsg);
@ -1913,10 +1917,10 @@ controlvm_periodic_work(struct work_struct *work)
handle_command_failed = false;
while (got_command && (!handle_command_failed)) {
most_recent_message_jiffies = jiffies;
chipset_dev->most_recent_message_jiffies = jiffies;
if (handle_command(inmsg,
visorchannel_get_physaddr
(controlvm_channel)))
(chipset_dev->controlvm_channel)))
got_command = read_controlvm_event(&inmsg);
else {
/*
@ -1927,29 +1931,34 @@ controlvm_periodic_work(struct work_struct *work)
* reprocess it on our next loop
*/
handle_command_failed = true;
controlvm_pending_msg = inmsg;
controlvm_pending_msg_valid = true;
chipset_dev->controlvm_pending_msg = inmsg;
chipset_dev->controlvm_pending_msg_valid = true;
}
}
/* parahotplug_worker */
parahotplug_process_list();
if (time_after(jiffies,
most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
(HZ * MIN_IDLE_SECONDS))) {
/*
* it's been longer than MIN_IDLE_SECONDS since we
* processed our last controlvm message; slow down the
* polling
*/
if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
if (chipset_dev->poll_jiffies !=
POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
chipset_dev->poll_jiffies =
POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
} else {
if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
if (chipset_dev->poll_jiffies !=
POLLJIFFIES_CONTROLVMCHANNEL_FAST)
chipset_dev->poll_jiffies =
POLLJIFFIES_CONTROLVMCHANNEL_FAST;
}
schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
chipset_dev->poll_jiffies);
}
static int
@ -1958,36 +1967,49 @@ visorchipset_init(struct acpi_device *acpi_device)
int err = -ENODEV;
u64 addr;
uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
struct visorchannel *controlvm_channel;
addr = controlvm_get_channel_address();
if (!addr)
goto error;
controlvm_channel = visorchannel_create_with_lock(addr, 0,
GFP_KERNEL, uuid);
if (!controlvm_channel)
chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
if (!chipset_dev)
goto error;
acpi_device->driver_data = chipset_dev;
chipset_dev->acpi_device = acpi_device;
chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
controlvm_channel = visorchannel_create_with_lock(addr,
0, GFP_KERNEL, uuid);
if (!controlvm_channel)
goto error_free_chipset_dev;
chipset_dev->controlvm_channel = controlvm_channel;
if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
visorchannel_get_header(controlvm_channel)))
goto error_destroy_channel;
major_dev = MKDEV(visorchipset_major, 0);
err = visorchipset_file_init(major_dev, &controlvm_channel);
err = visorchipset_file_init(major_dev,
&chipset_dev->controlvm_channel);
if (err < 0)
goto error_destroy_channel;
/* if booting in a crash kernel */
if (is_kdump_kernel())
INIT_DELAYED_WORK(&periodic_controlvm_work,
INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
setup_crash_devices_work_queue);
else
INIT_DELAYED_WORK(&periodic_controlvm_work,
INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
controlvm_periodic_work);
most_recent_message_jiffies = jiffies;
poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
chipset_dev->most_recent_message_jiffies = jiffies;
chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
chipset_dev->poll_jiffies);
visorchipset_platform_device.dev.devt = major_dev;
if (platform_device_register(&visorchipset_platform_device) < 0) {
@ -2008,11 +2030,14 @@ visorchipset_init(struct acpi_device *acpi_device)
platform_device_unregister(&visorchipset_platform_device);
error_cancel_work:
cancel_delayed_work_sync(&periodic_controlvm_work);
cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
visorchipset_file_cleanup(major_dev);
error_destroy_channel:
visorchannel_destroy(controlvm_channel);
visorchannel_destroy(chipset_dev->controlvm_channel);
error_free_chipset_dev:
kfree(chipset_dev);
error:
POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
@ -2026,12 +2051,14 @@ visorchipset_exit(struct acpi_device *acpi_device)
visorbus_exit();
cancel_delayed_work_sync(&periodic_controlvm_work);
cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
visorchannel_destroy(controlvm_channel);
visorchannel_destroy(chipset_dev->controlvm_channel);
visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
platform_device_unregister(&visorchipset_platform_device);
kfree(chipset_dev);
POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
return 0;