summaryrefslogtreecommitdiffstats
path: root/drivers/staging/unisys/visorbus/visorchipset.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/unisys/visorbus/visorchipset.c')
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c1043
1 files changed, 453 insertions, 590 deletions
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 97778d7..4cfd0fa 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -15,13 +15,11 @@
*/
#include <linux/acpi.h>
-#include <linux/cdev.h>
#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/nls.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
#include <linux/uuid.h>
#include <linux/crash_dump.h>
@@ -31,13 +29,11 @@
#define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
-#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
+#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
-#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
-
#define UNISYS_SPAR_LEAF_ID 0x40000000
/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
@@ -46,35 +42,11 @@
#define UNISYS_SPAR_ID_EDX 0x34367261
/*
- * Module parameters
- */
-static int visorchipset_major;
-
-static int
-visorchipset_open(struct inode *inode, struct file *file)
-{
- unsigned int minor_number = iminor(inode);
-
- if (minor_number)
- return -ENODEV;
- return 0;
-}
-
-static int
-visorchipset_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-/*
* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
* we switch to slow polling mode. As soon as we get a controlvm
* message, we switch back to fast polling mode.
*/
#define MIN_IDLE_SECONDS 10
-static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
-/* when we got our last controlvm message */
-static unsigned long most_recent_message_jiffies;
struct parser_context {
unsigned long allocbytes;
@@ -85,22 +57,33 @@ struct parser_context {
char data[0];
};
-static struct delayed_work periodic_controlvm_work;
-
-static struct cdev file_cdev;
-static struct visorchannel **file_controlvm_channel;
+struct vmcall_controlvm_addr {
+ struct vmcall_io_controlvm_addr_params params;
+ int err;
+ u64 physaddr;
+};
-static struct visorchannel *controlvm_channel;
-static unsigned long controlvm_payload_bytes_buffered;
+struct visorchipset_device {
+ struct acpi_device *acpi_device;
+ unsigned long poll_jiffies;
+ /* when we got our last controlvm message */
+ unsigned long most_recent_message_jiffies;
+ struct delayed_work periodic_controlvm_work;
+ struct visorchannel *controlvm_channel;
+ unsigned long controlvm_payload_bytes_buffered;
+ /*
+ * The following variables are used to handle the scenario where we are
+ * unable to offload the payload from a controlvm message due to memory
+ * requirements. In this scenario, we simply stash the controlvm
+ * message, then attempt to process it again the next time
+ * controlvm_periodic_work() runs.
+ */
+ struct controlvm_message controlvm_pending_msg;
+ bool controlvm_pending_msg_valid;
+ struct vmcall_controlvm_addr controlvm_addr;
+};
-/*
- * The following globals are used to handle the scenario where we are unable to
- * offload the payload from a controlvm message due to memory requirements. In
- * this scenario, we simply stash the controlvm message, then attempt to
- * process it again the next time controlvm_periodic_work() runs.
- */
-static struct controlvm_message controlvm_pending_msg;
-static bool controlvm_pending_msg_valid;
+static struct visorchipset_device *chipset_dev;
struct parahotplug_request {
struct list_head list;
@@ -109,19 +92,21 @@ struct parahotplug_request {
struct controlvm_message msg;
};
-/* info for /dev/visorchipset */
-static dev_t major_dev = -1; /*< indicates major num for device */
-
/* prototypes for attributes */
static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u8 tool_action = 0;
+ int err;
+
+ err = visorchannel_read(chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action),
+ &tool_action, sizeof(u8));
+ if (err)
+ return err;
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- tool_action), &tool_action, sizeof(u8));
return sprintf(buf, "%u\n", tool_action);
}
@@ -130,19 +115,19 @@ static ssize_t toolaction_store(struct device *dev,
const char *buf, size_t count)
{
u8 tool_action;
- int ret;
+ int err;
if (kstrtou8(buf, 10, &tool_action))
return -EINVAL;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
tool_action),
&tool_action, sizeof(u8));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(toolaction);
@@ -152,11 +137,16 @@ static ssize_t boottotool_show(struct device *dev,
char *buf)
{
struct efi_spar_indication efi_spar_indication;
+ int err;
+
+ err = visorchannel_read(chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind),
+ &efi_spar_indication,
+ sizeof(struct efi_spar_indication));
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- efi_spar_ind), &efi_spar_indication,
- sizeof(struct efi_spar_indication));
+ if (err)
+ return err;
return sprintf(buf, "%u\n", efi_spar_indication.boot_to_tool);
}
@@ -164,21 +154,21 @@ static ssize_t boottotool_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int val, ret;
+ int val, err;
struct efi_spar_indication efi_spar_indication;
if (kstrtoint(buf, 10, &val))
return -EINVAL;
efi_spar_indication.boot_to_tool = val;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
efi_spar_ind), &(efi_spar_indication),
sizeof(struct efi_spar_indication));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(boottotool);
@@ -187,11 +177,14 @@ static ssize_t error_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u32 error = 0;
+ int err;
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_error),
- &error, sizeof(u32));
+ err = visorchannel_read(chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_error),
+ &error, sizeof(u32));
+ if (err)
+ return err;
return sprintf(buf, "%i\n", error);
}
@@ -199,18 +192,18 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 error;
- int ret;
+ int err;
if (kstrtou32(buf, 10, &error))
return -EINVAL;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_error),
&error, sizeof(u32));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(error);
@@ -219,12 +212,16 @@ static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u32 text_id = 0;
+ int err;
+
+ err = visorchannel_read
+ (chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
+ if (err)
+ return err;
- visorchannel_read
- (controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_text_id),
- &text_id, sizeof(u32));
return sprintf(buf, "%i\n", text_id);
}
@@ -232,18 +229,18 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 text_id;
- int ret;
+ int err;
if (kstrtou32(buf, 10, &text_id))
return -EINVAL;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_text_id),
&text_id, sizeof(u32));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(textid);
@@ -252,11 +249,15 @@ static ssize_t remaining_steps_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u16 remaining_steps = 0;
+ int err;
+
+ err = visorchannel_read(chipset_dev->controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
+ &remaining_steps, sizeof(u16));
+ if (err)
+ return err;
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_remaining_steps),
- &remaining_steps, sizeof(u16));
return sprintf(buf, "%hu\n", remaining_steps);
}
@@ -265,18 +266,18 @@ static ssize_t remaining_steps_store(struct device *dev,
const char *buf, size_t count)
{
u16 remaining_steps;
- int ret;
+ int err;
if (kstrtou16(buf, 10, &remaining_steps))
return -EINVAL;
- ret = visorchannel_write
- (controlvm_channel,
+ err = visorchannel_write
+ (chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
installation_remaining_steps),
&remaining_steps, sizeof(u16));
- if (ret)
- return ret;
+ if (err)
+ return err;
return count;
}
static DEVICE_ATTR_RW(remaining_steps);
@@ -292,7 +293,7 @@ parser_id_get(struct parser_context *ctx)
static void parser_done(struct parser_context *ctx)
{
- controlvm_payload_bytes_buffered -= ctx->param_bytes;
+ chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
kfree(ctx);
}
@@ -318,7 +319,7 @@ parser_string_get(struct parser_context *ctx)
}
if (value_length < 0) /* '\0' was not included in the length */
value_length = nscan;
- value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
+ value = kmalloc(value_length + 1, GFP_KERNEL);
if (!value)
return NULL;
if (value_length > 0)
@@ -405,7 +406,7 @@ controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.init_chipset.features = features;
- return visorchannel_signalinsert(controlvm_channel,
+ return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@@ -417,14 +418,12 @@ chipset_init(struct controlvm_message *inmsg)
int rc = CONTROLVM_RESP_SUCCESS;
int res = 0;
- POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
if (chipset_inited) {
rc = -CONTROLVM_RESP_ALREADY_DONE;
res = -EIO;
goto out_respond;
}
chipset_inited = 1;
- POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
/*
* Set features to indicate we support parahotplug (if Command
@@ -447,7 +446,8 @@ out_respond:
}
static int
-controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
+controlvm_respond(struct controlvm_message_header *msg_hdr, int response,
+ struct spar_segment_state *state)
{
struct controlvm_message outmsg;
@@ -455,20 +455,12 @@ controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
if (outmsg.hdr.flags.test_message == 1)
return -EINVAL;
- return visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_REQUEST, &outmsg);
-}
-
-static int controlvm_respond_physdev_changestate(
- struct controlvm_message_header *msg_hdr, int response,
- struct spar_segment_state state)
-{
- struct controlvm_message outmsg;
+ if (state) {
+ outmsg.cmd.device_change_state.state = *state;
+ outmsg.cmd.device_change_state.flags.phys_device = 1;
+ }
- controlvm_init_response(&outmsg, msg_hdr, response);
- outmsg.cmd.device_change_state.state = state;
- outmsg.cmd.device_change_state.flags.phys_device = 1;
- return visorchannel_signalinsert(controlvm_channel,
+ return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@@ -484,68 +476,68 @@ save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
u16 local_crash_msg_count;
int err;
- err = visorchannel_read(controlvm_channel,
+ err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16));
if (err) {
- POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read message count\n");
return err;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
- local_crash_msg_count,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "invalid number of messages\n");
return -EIO;
}
- err = visorchannel_read(controlvm_channel,
+ err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32));
if (err) {
- POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read offset\n");
return err;
}
switch (typ) {
case CRASH_DEV:
local_crash_msg_offset += sizeof(struct controlvm_message);
- err = visorchannel_write(controlvm_channel,
+ err = visorchannel_write(chipset_dev->controlvm_channel,
local_crash_msg_offset,
msg,
sizeof(struct controlvm_message));
if (err) {
- POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to write dev msg\n");
return err;
}
break;
case CRASH_BUS:
- err = visorchannel_write(controlvm_channel,
+ err = visorchannel_write(chipset_dev->controlvm_channel,
local_crash_msg_offset,
msg,
sizeof(struct controlvm_message));
if (err) {
- POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to write bus msg\n");
return err;
}
break;
default:
- pr_info("Invalid crash_obj_type\n");
+ dev_err(&chipset_dev->acpi_device->dev,
+ "Invalid crash_obj_type\n");
break;
}
return 0;
}
static int
-bus_responder(enum controlvm_id cmd_id,
- struct controlvm_message_header *pending_msg_hdr,
- int response)
+controlvm_responder(enum controlvm_id cmd_id,
+ struct controlvm_message_header *pending_msg_hdr,
+ int response)
{
if (!pending_msg_hdr)
return -EIO;
@@ -553,7 +545,7 @@ bus_responder(enum controlvm_id cmd_id,
if (pending_msg_hdr->id != (u32)cmd_id)
return -EINVAL;
- return controlvm_respond(pending_msg_hdr, response);
+ return controlvm_respond(pending_msg_hdr, response, NULL);
}
static int
@@ -576,25 +568,11 @@ device_changestate_responder(enum controlvm_id cmd_id,
outmsg.cmd.device_change_state.dev_no = dev_no;
outmsg.cmd.device_change_state.state = response_state;
- return visorchannel_signalinsert(controlvm_channel,
+ return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
static int
-device_responder(enum controlvm_id cmd_id,
- struct controlvm_message_header *pending_msg_hdr,
- int response)
-{
- if (!pending_msg_hdr)
- return -EIO;
-
- if (pending_msg_hdr->id != (u32)cmd_id)
- return -EINVAL;
-
- return controlvm_respond(pending_msg_hdr, response);
-}
-
-static int
bus_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
@@ -606,16 +584,14 @@ bus_create(struct controlvm_message *inmsg)
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (bus_info && (bus_info->state.created == 1)) {
- POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed bus_create: already exists\n");
err = -EEXIST;
goto err_respond;
}
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
- POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_respond;
}
@@ -624,8 +600,6 @@ bus_create(struct controlvm_message *inmsg)
bus_info->chipset_bus_no = bus_no;
bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
- POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
-
if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
err = save_crash_message(inmsg, CRASH_BUS);
if (err)
@@ -636,9 +610,6 @@ bus_create(struct controlvm_message *inmsg)
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
GFP_KERNEL);
if (!pmsg_hdr) {
- POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
- bus_info->chipset_bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_free_bus_info;
}
@@ -654,19 +625,22 @@ bus_create(struct controlvm_message *inmsg)
cmd->create_bus.bus_data_type_uuid);
if (!visorchannel) {
- POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_free_pending_msg;
}
bus_info->visorchannel = visorchannel;
/* Response will be handled by chipset_bus_create */
- chipset_bus_create(bus_info);
+ err = chipset_bus_create(bus_info);
+ /* If error chipset_bus_create didn't respond, need to respond here */
+ if (err)
+ goto err_destroy_channel;
- POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
return 0;
+err_destroy_channel:
+ visorchannel_destroy(visorchannel);
+
err_free_pending_msg:
kfree(bus_info->pending_msg_hdr);
@@ -675,7 +649,7 @@ err_free_bus_info:
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
- bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -705,9 +679,6 @@ bus_destroy(struct controlvm_message *inmsg)
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
- POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
- bus_info->chipset_bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_respond;
}
@@ -723,7 +694,7 @@ bus_destroy(struct controlvm_message *inmsg)
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
- bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -737,23 +708,14 @@ bus_configure(struct controlvm_message *inmsg,
int err = 0;
bus_no = cmd->configure_bus.bus_no;
- POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
- DIAG_SEVERITY_PRINT);
-
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
- POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -EINVAL;
goto err_respond;
} else if (bus_info->state.created == 0) {
- POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -EINVAL;
goto err_respond;
} else if (bus_info->pending_msg_hdr) {
- POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
- DIAG_SEVERITY_ERR);
err = -EIO;
goto err_respond;
}
@@ -769,16 +731,15 @@ bus_configure(struct controlvm_message *inmsg,
bus_info->name = parser_name_get(parser_ctx);
}
- POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
- DIAG_SEVERITY_PRINT);
-
if (inmsg->hdr.flags.response_expected == 1)
- bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return 0;
err_respond:
+ dev_err(&chipset_dev->acpi_device->dev,
+ "bus_configured exited with err: %d\n", err);
if (inmsg->hdr.flags.response_expected == 1)
- bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -796,31 +757,29 @@ my_device_create(struct controlvm_message *inmsg)
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to get bus by id: %d\n", bus_no);
err = -ENODEV;
goto err_respond;
}
if (bus_info->state.created == 0) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "bus not created, id: %d\n", bus_no);
err = -EINVAL;
goto err_respond;
}
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (dev_info && (dev_info->state.created == 1)) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to get bus by id: %d/%d\n", bus_no, dev_no);
err = -EEXIST;
goto err_respond;
}
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
err = -ENOMEM;
goto err_respond;
}
@@ -832,9 +791,6 @@ my_device_create(struct controlvm_message *inmsg)
/* not sure where the best place to set the 'parent' */
dev_info->device.parent = &bus_info->device;
- POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
- DIAG_SEVERITY_PRINT);
-
visorchannel =
visorchannel_create_with_lock(cmd->create_device.channel_addr,
cmd->create_device.channel_bytes,
@@ -842,8 +798,9 @@ my_device_create(struct controlvm_message *inmsg)
cmd->create_device.data_type_uuid);
if (!visorchannel) {
- POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to create visorchannel: %d/%d\n",
+ bus_no, dev_no);
err = -ENOMEM;
goto err_free_dev_info;
}
@@ -853,14 +810,14 @@ my_device_create(struct controlvm_message *inmsg)
spar_vhba_channel_protocol_uuid) == 0) {
err = save_crash_message(inmsg, CRASH_DEV);
if (err)
- goto err_free_dev_info;
+ goto err_destroy_visorchannel;
}
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
- goto err_free_dev_info;
+ goto err_destroy_visorchannel;
}
memcpy(pmsg_hdr, &inmsg->hdr,
@@ -868,17 +825,21 @@ my_device_create(struct controlvm_message *inmsg)
dev_info->pending_msg_hdr = pmsg_hdr;
}
/* Chipset_device_create will send response */
- chipset_device_create(dev_info);
- POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
- DIAG_SEVERITY_PRINT);
+ err = chipset_device_create(dev_info);
+ if (err)
+ goto err_destroy_visorchannel;
+
return 0;
+err_destroy_visorchannel:
+ visorchannel_destroy(visorchannel);
+
err_free_dev_info:
kfree(dev_info);
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
- device_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -891,18 +852,14 @@ my_device_changestate(struct controlvm_message *inmsg)
u32 dev_no = cmd->device_change_state.dev_no;
struct spar_segment_state state = cmd->device_change_state.state;
struct visor_device *dev_info;
- int err;
+ int err = 0;
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (!dev_info) {
- POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
err = -ENODEV;
goto err_respond;
}
if (dev_info->state.created == 0) {
- POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
- DIAG_SEVERITY_ERR);
err = -EINVAL;
goto err_respond;
}
@@ -926,7 +883,7 @@ my_device_changestate(struct controlvm_message *inmsg)
if (state.alive == segment_state_running.alive &&
state.operating == segment_state_running.operating)
/* Response will be sent from chipset_device_resume */
- chipset_device_resume(dev_info);
+ err = chipset_device_resume(dev_info);
/* ServerNotReady / ServerLost / SegmentStateStandby */
else if (state.alive == segment_state_standby.alive &&
state.operating == segment_state_standby.operating)
@@ -934,12 +891,16 @@ my_device_changestate(struct controlvm_message *inmsg)
* technically this is standby case where server is lost.
* Response will be sent from chipset_device_pause.
*/
- chipset_device_pause(dev_info);
+ err = chipset_device_pause(dev_info);
+ if (err)
+ goto err_respond;
+
return 0;
err_respond:
+ dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
if (inmsg->hdr.flags.response_expected == 1)
- device_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -985,7 +946,7 @@ my_device_destroy(struct controlvm_message *inmsg)
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
- device_responder(inmsg->hdr.id, &inmsg->hdr, err);
+ controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
@@ -1004,7 +965,7 @@ err_respond:
#define PARAHOTPLUG_TIMEOUT_MS 2000
-/**
+/*
* parahotplug_next_id() - generate unique int to match an outstanding
* CONTROLVM message with a udev script /sys
* response
@@ -1019,7 +980,7 @@ parahotplug_next_id(void)
return atomic_inc_return(&id);
}
-/**
+/*
* parahotplug_next_expiration() - returns the time (in jiffies) when a
* CONTROLVM message on the list should expire
* -- PARAHOTPLUG_TIMEOUT_MS in the future
@@ -1032,7 +993,7 @@ parahotplug_next_expiration(void)
return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
}
-/**
+/*
* parahotplug_request_create() - create a parahotplug_request, which is
* basically a wrapper for a CONTROLVM_MESSAGE
* that we can stick on a list
@@ -1045,7 +1006,7 @@ parahotplug_request_create(struct controlvm_message *msg)
{
struct parahotplug_request *req;
- req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
@@ -1056,7 +1017,7 @@ parahotplug_request_create(struct controlvm_message *msg)
return req;
}
-/**
+/*
* parahotplug_request_destroy() - free a parahotplug_request
* @req: the request to deallocate
*/
@@ -1069,7 +1030,7 @@ parahotplug_request_destroy(struct parahotplug_request *req)
static LIST_HEAD(parahotplug_request_list);
static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
-/**
+/*
* parahotplug_request_complete() - mark request as complete
* @id: the id of the request
* @active: indicates whether the request is assigned to active partition
@@ -1101,9 +1062,9 @@ parahotplug_request_complete(int id, u16 active)
spin_unlock(&parahotplug_request_list_lock);
req->msg.cmd.device_change_state.state.active = active;
if (req->msg.hdr.flags.response_expected)
- controlvm_respond_physdev_changestate(
- &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
- req->msg.cmd.device_change_state.state);
+ controlvm_respond(
+ &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
+ &req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
return 0;
}
@@ -1113,7 +1074,7 @@ parahotplug_request_complete(int id, u16 active)
return -EINVAL;
}
-/**
+/*
* devicedisabled_store() - disables the hotplug device
* @dev: sysfs interface variable not utilized in this function
* @attr: sysfs interface variable not utilized in this function
@@ -1143,7 +1104,7 @@ static ssize_t devicedisabled_store(struct device *dev,
}
static DEVICE_ATTR_WO(devicedisabled);
-/**
+/*
* deviceenabled_store() - enables the hotplug device
* @dev: sysfs interface variable not utilized in this function
* @attr: sysfs interface variable not utilized in this function
@@ -1201,26 +1162,14 @@ static const struct attribute_group *visorchipset_dev_groups[] = {
NULL
};
-static void visorchipset_dev_release(struct device *dev)
-{
-}
-
-/* /sys/devices/platform/visorchipset */
-static struct platform_device visorchipset_platform_device = {
- .name = "visorchipset",
- .id = -1,
- .dev.groups = visorchipset_dev_groups,
- .dev.release = visorchipset_dev_release,
-};
-
-/**
+/*
* parahotplug_request_kickoff() - initiate parahotplug request
* @req: the request to initiate
*
* Cause uevent to run the user level script to do the disable/enable specified
* in the parahotplug_request.
*/
-static void
+static int
parahotplug_request_kickoff(struct parahotplug_request *req)
{
struct controlvm_message_packet *cmd = &req->msg.cmd;
@@ -1241,56 +1190,59 @@ parahotplug_request_kickoff(struct parahotplug_request *req)
sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
cmd->device_change_state.dev_no & 0x7);
- kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
- envp);
+ return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
+ KOBJ_CHANGE, envp);
}
-/**
+/*
* parahotplug_process_message() - enables or disables a PCI device by kicking
* off a udev script
* @inmsg: the message indicating whether to enable or disable
*/
-static void
+static int
parahotplug_process_message(struct controlvm_message *inmsg)
{
struct parahotplug_request *req;
+ int err;
req = parahotplug_request_create(inmsg);
if (!req)
- return;
+ return -ENOMEM;
+ /*
+ * For enable messages, just respond with success right away, we don't
+ * need to wait to see if the enable was successful.
+ */
if (inmsg->cmd.device_change_state.state.active) {
- /*
- * For enable messages, just respond with success
- * right away. This is a bit of a hack, but there are
- * issues with the early enable messages we get (with
- * either the udev script not detecting that the device
- * is up, or not getting called at all). Fortunately
- * the messages that get lost don't matter anyway, as
- *
- * devices are automatically enabled at
- * initialization.
- */
- parahotplug_request_kickoff(req);
- controlvm_respond_physdev_changestate
- (&inmsg->hdr,
- CONTROLVM_RESP_SUCCESS,
- inmsg->cmd.device_change_state.state);
+ err = parahotplug_request_kickoff(req);
+ if (err)
+ goto err_respond;
+ controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
+ &inmsg->cmd.device_change_state.state);
parahotplug_request_destroy(req);
- } else {
- /*
- * For disable messages, add the request to the
- * request list before kicking off the udev script. It
- * won't get responded to until the script has
- * indicated it's done.
- */
- spin_lock(&parahotplug_request_list_lock);
- list_add_tail(&req->list, &parahotplug_request_list);
- spin_unlock(&parahotplug_request_list_lock);
-
- parahotplug_request_kickoff(req);
+ return 0;
}
+
+ /*
+ * For disable messages, add the request to the
+ * request list before kicking off the udev script. It
+ * won't get responded to until the script has
+ * indicated it's done.
+ */
+ spin_lock(&parahotplug_request_list_lock);
+ list_add_tail(&req->list, &parahotplug_request_list);
+ spin_unlock(&parahotplug_request_list_lock);
+
+ err = parahotplug_request_kickoff(req);
+ if (err)
+ goto err_respond;
+ return 0;
+
+err_respond:
+ controlvm_respond(&inmsg->hdr, err,
+ &inmsg->cmd.device_change_state.state);
+ return err;
}
/*
@@ -1303,12 +1255,15 @@ parahotplug_process_message(struct controlvm_message *inmsg)
static int
chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
{
- kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
+ int res;
+
+ res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
+ KOBJ_ONLINE);
if (msg_hdr->flags.response_expected)
- return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
+ controlvm_respond(msg_hdr, res, NULL);
- return 0;
+ return res;
}
/*
@@ -1323,15 +1278,16 @@ chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
{
char env_selftest[20];
char *envp[] = { env_selftest, NULL };
+ int res;
sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
- kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
- envp);
+ res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
+ KOBJ_CHANGE, envp);
if (msg_hdr->flags.response_expected)
- return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
+ controlvm_respond(msg_hdr, res, NULL);
- return 0;
+ return res;
}
/*
@@ -1344,28 +1300,62 @@ chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
static int
chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
{
- kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
+ int res;
+ res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
+ KOBJ_OFFLINE);
if (msg_hdr->flags.response_expected)
- return controlvm_respond(msg_hdr, CONTROLVM_RESP_SUCCESS);
+ controlvm_respond(msg_hdr, res, NULL);
- return 0;
+ return res;
}
-static inline unsigned int
-issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
+static int unisys_vmcall(unsigned long tuple, unsigned long param)
{
- struct vmcall_io_controlvm_addr_params params;
- int result = VMCALL_SUCCESS;
- u64 physaddr;
+ int result = 0;
+ unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
+ unsigned long reg_ebx;
+ unsigned long reg_ecx;
+
+ reg_ebx = param & 0xFFFFFFFF;
+ reg_ecx = param >> 32;
+
+ cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
+ if (!(cpuid_ecx & 0x80000000))
+ return -EPERM;
+
+ __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
+
+ if (result)
+ goto error;
- physaddr = virt_to_phys(&params);
- ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
- if (VMCALL_SUCCESSFUL(result)) {
- *control_addr = params.address;
- *control_bytes = params.channel_bytes;
+ return 0;
+
+error: /* Need to convert from VMCALL error codes to Linux */
+ switch (result) {
+ case VMCALL_RESULT_INVALID_PARAM:
+ return -EINVAL;
+ case VMCALL_RESULT_DATA_UNAVAILABLE:
+ return -ENODEV;
+ default:
+ return -EFAULT;
}
- return result;
+}
+static unsigned int
+issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
+{
+ chipset_dev->controlvm_addr.physaddr = virt_to_phys(
+ &chipset_dev->controlvm_addr.params);
+ chipset_dev->controlvm_addr.err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
+ chipset_dev->controlvm_addr.physaddr);
+ if (chipset_dev->controlvm_addr.err)
+ return chipset_dev->controlvm_addr.err;
+
+ *control_addr = chipset_dev->controlvm_addr.params.address;
+ *control_bytes = chipset_dev->controlvm_addr.params.channel_bytes;
+
+ return 0;
}
static u64 controlvm_get_channel_address(void)
@@ -1373,7 +1363,7 @@ static u64 controlvm_get_channel_address(void)
u64 addr = 0;
u32 size = 0;
- if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
+ if (issue_vmcall_io_controlvm_addr(&addr, &size))
return 0;
return addr;
@@ -1388,8 +1378,6 @@ setup_crash_devices_work_queue(struct work_struct *work)
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
- POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
-
/* send init chipset msg */
msg.hdr.id = CONTROLVM_CHIPSET_INIT;
msg.cmd.init_chipset.bus_count = 23;
@@ -1398,71 +1386,67 @@ setup_crash_devices_work_queue(struct work_struct *work)
chipset_init(&msg);
/* get saved message count */
- if (visorchannel_read(controlvm_channel,
+ if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16)) < 0) {
- POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read channel\n");
return;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
- local_crash_msg_count,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "invalid count\n");
return;
}
/* get saved crash message offset */
- if (visorchannel_read(controlvm_channel,
+ if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32)) < 0) {
- POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read channel\n");
return;
}
/* read create device message for storage bus offset */
- if (visorchannel_read(controlvm_channel,
+ if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset,
&local_crash_bus_msg,
sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read channel\n");
return;
}
/* read create device message for storage device */
- if (visorchannel_read(controlvm_channel,
+ if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset +
sizeof(struct controlvm_message),
&local_crash_dev_msg,
sizeof(struct controlvm_message)) < 0) {
- POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ dev_err(&chipset_dev->acpi_device->dev,
+ "failed to read channel\n");
return;
}
/* reuse IOVM create bus message */
- if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
- bus_create(&local_crash_bus_msg);
- } else {
- POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
+ dev_err(&chipset_dev->acpi_device->dev,
+ "no valid create_bus message\n");
return;
}
+ bus_create(&local_crash_bus_msg);
/* reuse create device message for storage device */
- if (local_crash_dev_msg.cmd.create_device.channel_addr) {
- my_device_create(&local_crash_dev_msg);
- } else {
- POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
+ if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
+ dev_err(&chipset_dev->acpi_device->dev,
+ "no valid create_device message\n");
return;
}
- POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
+ my_device_create(&local_crash_dev_msg);
}
void
@@ -1471,8 +1455,8 @@ bus_create_response(struct visor_device *bus_info, int response)
if (response >= 0)
bus_info->state.created = 1;
- bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
- response);
+ controlvm_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
+ response);
kfree(bus_info->pending_msg_hdr);
bus_info->pending_msg_hdr = NULL;
@@ -1481,8 +1465,8 @@ bus_create_response(struct visor_device *bus_info, int response)
void
bus_destroy_response(struct visor_device *bus_info, int response)
{
- bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
- response);
+ controlvm_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
+ response);
kfree(bus_info->pending_msg_hdr);
bus_info->pending_msg_hdr = NULL;
@@ -1494,8 +1478,8 @@ device_create_response(struct visor_device *dev_info, int response)
if (response >= 0)
dev_info->state.created = 1;
- device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
- response);
+ controlvm_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
+ response);
kfree(dev_info->pending_msg_hdr);
dev_info->pending_msg_hdr = NULL;
@@ -1504,8 +1488,8 @@ device_create_response(struct visor_device *dev_info, int response)
void
device_destroy_response(struct visor_device *dev_info, int response)
{
- device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
- response);
+ controlvm_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
+ response);
kfree(dev_info->pending_msg_hdr);
dev_info->pending_msg_hdr = NULL;
@@ -1534,136 +1518,6 @@ device_resume_response(struct visor_device *dev_info, int response)
dev_info->pending_msg_hdr = NULL;
}
-static int
-visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
-{
- unsigned long physaddr = 0;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- u64 addr = 0;
-
- /* sv_enable_dfp(); */
- if (offset & (PAGE_SIZE - 1))
- return -ENXIO; /* need aligned offsets */
-
- switch (offset) {
- case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
- vma->vm_flags |= VM_IO;
- if (!*file_controlvm_channel)
- return -ENXIO;
-
- visorchannel_read
- (*file_controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- gp_control_channel),
- &addr, sizeof(addr));
- if (!addr)
- return -ENXIO;
-
- physaddr = (unsigned long)addr;
- if (remap_pfn_range(vma, vma->vm_start,
- physaddr >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- /*pgprot_noncached */
- (vma->vm_page_prot))) {
- return -EAGAIN;
- }
- break;
- default:
- return -ENXIO;
- }
- return 0;
-}
-
-static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
-{
- u64 result = VMCALL_SUCCESS;
- u64 physaddr = 0;
-
- ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
- result);
- return result;
-}
-
-static inline int issue_vmcall_update_physical_time(u64 adjustment)
-{
- int result = VMCALL_SUCCESS;
-
- ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
- return result;
-}
-
-static long visorchipset_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- u64 adjustment;
- s64 vrtc_offset;
-
- switch (cmd) {
- case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
- /* get the physical rtc offset */
- vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
- if (copy_to_user((void __user *)arg, &vrtc_offset,
- sizeof(vrtc_offset))) {
- return -EFAULT;
- }
- return 0;
- case VMCALL_UPDATE_PHYSICAL_TIME:
- if (copy_from_user(&adjustment, (void __user *)arg,
- sizeof(adjustment))) {
- return -EFAULT;
- }
- return issue_vmcall_update_physical_time(adjustment);
- default:
- return -EFAULT;
- }
-}
-
-static const struct file_operations visorchipset_fops = {
- .owner = THIS_MODULE,
- .open = visorchipset_open,
- .read = NULL,
- .write = NULL,
- .unlocked_ioctl = visorchipset_ioctl,
- .release = visorchipset_release,
- .mmap = visorchipset_mmap,
-};
-
-static int
-visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
-{
- int rc = 0;
-
- file_controlvm_channel = controlvm_channel;
- cdev_init(&file_cdev, &visorchipset_fops);
- file_cdev.owner = THIS_MODULE;
- if (MAJOR(major_dev) == 0) {
- rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
- /* dynamic major device number registration required */
- if (rc < 0)
- return rc;
- } else {
- /* static major device number registration required */
- rc = register_chrdev_region(major_dev, 1, "visorchipset");
- if (rc < 0)
- return rc;
- }
- rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
- if (rc < 0) {
- unregister_chrdev_region(major_dev, 1);
- return rc;
- }
- return 0;
-}
-
-static void
-visorchipset_file_cleanup(dev_t major_dev)
-{
- if (file_cdev.ops)
- cdev_del(&file_cdev);
- file_cdev.ops = NULL;
- unregister_chrdev_region(major_dev, 1);
-}
-
static struct parser_context *
parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
{
@@ -1677,12 +1531,12 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
* '\0'-terminated
*/
allocbytes++;
- if ((controlvm_payload_bytes_buffered + bytes)
+ if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
> MAX_CONTROLVM_PAYLOAD_BYTES) {
*retry = true;
return NULL;
}
- ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
+ ctx = kzalloc(allocbytes, GFP_KERNEL);
if (!ctx) {
*retry = true;
return NULL;
@@ -1710,7 +1564,7 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
}
ctx->byte_stream = true;
- controlvm_payload_bytes_buffered += ctx->param_bytes;
+ chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
return ctx;
@@ -1719,22 +1573,20 @@ err_finish_ctx:
return NULL;
}
-/**
+/*
* handle_command() - process a controlvm message
* @inmsg: the message to process
* @channel_addr: address of the controlvm channel
*
* Return:
- * false - this function will return false only in the case where the
- * controlvm message was NOT processed, but processing must be
- * retried before reading the next controlvm message; a
- * scenario where this can occur is when we need to throttle
- * the allocation of memory in which to copy out controlvm
- * payload data
- * true - processing of the controlvm message completed,
- * either successfully or with an error
+ * 0 - Successfully processed the message
+ * -EAGAIN - ControlVM message was not processed and should be retried
+ * reading the next controlvm message; a scenario where this can
+ * occur is when we need to throttle the allocation of memory in
+ * which to copy out controlvm payload data.
+ * < 0 - error: ControlVM message was processed but an error occurred.
*/
-static bool
+static int
handle_command(struct controlvm_message inmsg, u64 channel_addr)
{
struct controlvm_message_packet *cmd = &inmsg.cmd;
@@ -1743,11 +1595,13 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
struct parser_context *parser_ctx = NULL;
bool local_addr;
struct controlvm_message ackmsg;
+ int err = 0;
/* create parsing context if necessary */
local_addr = (inmsg.hdr.flags.test_message == 1);
if (channel_addr == 0)
- return true;
+ return -EINVAL;
+
parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
parm_bytes = inmsg.hdr.payload_bytes;
@@ -1763,66 +1617,69 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
parser_init_byte_stream(parm_addr, parm_bytes,
local_addr, &retry);
if (!parser_ctx && retry)
- return false;
+ return -EAGAIN;
}
if (!local_addr) {
controlvm_init_response(&ackmsg, &inmsg.hdr,
CONTROLVM_RESP_SUCCESS);
- if (controlvm_channel)
- visorchannel_signalinsert(controlvm_channel,
- CONTROLVM_QUEUE_ACK,
- &ackmsg);
+ err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
+ CONTROLVM_QUEUE_ACK,
+ &ackmsg);
+ if (err)
+ return err;
}
switch (inmsg.hdr.id) {
case CONTROLVM_CHIPSET_INIT:
- chipset_init(&inmsg);
+ err = chipset_init(&inmsg);
break;
case CONTROLVM_BUS_CREATE:
- bus_create(&inmsg);
+ err = bus_create(&inmsg);
break;
case CONTROLVM_BUS_DESTROY:
- bus_destroy(&inmsg);
+ err = bus_destroy(&inmsg);
break;
case CONTROLVM_BUS_CONFIGURE:
- bus_configure(&inmsg, parser_ctx);
+ err = bus_configure(&inmsg, parser_ctx);
break;
case CONTROLVM_DEVICE_CREATE:
- my_device_create(&inmsg);
+ err = my_device_create(&inmsg);
break;
case CONTROLVM_DEVICE_CHANGESTATE:
if (cmd->device_change_state.flags.phys_device) {
- parahotplug_process_message(&inmsg);
+ err = parahotplug_process_message(&inmsg);
} else {
/*
* save the hdr and cmd structures for later use
* when sending back the response to Command
*/
- my_device_changestate(&inmsg);
+ err = my_device_changestate(&inmsg);
break;
}
break;
case CONTROLVM_DEVICE_DESTROY:
- my_device_destroy(&inmsg);
+ err = my_device_destroy(&inmsg);
break;
case CONTROLVM_DEVICE_CONFIGURE:
- /* no op for now, just send a respond that we passed */
+ /* no op just send a respond that we passed */
if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
+ controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
+ NULL);
break;
case CONTROLVM_CHIPSET_READY:
- chipset_ready_uevent(&inmsg.hdr);
+ err = chipset_ready_uevent(&inmsg.hdr);
break;
case CONTROLVM_CHIPSET_SELFTEST:
- chipset_selftest_uevent(&inmsg.hdr);
+ err = chipset_selftest_uevent(&inmsg.hdr);
break;
case CONTROLVM_CHIPSET_STOP:
- chipset_notready_uevent(&inmsg.hdr);
+ err = chipset_notready_uevent(&inmsg.hdr);
break;
default:
+ err = -ENOMSG;
if (inmsg.hdr.flags.response_expected)
- controlvm_respond
- (&inmsg.hdr, -CONTROLVM_RESP_ID_UNKNOWN);
+ controlvm_respond(&inmsg.hdr,
+ -CONTROLVM_RESP_ID_UNKNOWN, NULL);
break;
}
@@ -1830,31 +1687,35 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
parser_done(parser_ctx);
parser_ctx = NULL;
}
- return true;
+ return err;
}
-/**
+/*
* read_controlvm_event() - retreives the next message from the
* CONTROLVM_QUEUE_EVENT queue in the controlvm
* channel
* @msg: pointer to the retrieved message
*
- * Return: true if a valid message was retrieved or false otherwise
+ * Return: 0 if valid message was retrieved or -error
*/
-static bool
+static int
read_controlvm_event(struct controlvm_message *msg)
{
- if (!visorchannel_signalremove(controlvm_channel,
- CONTROLVM_QUEUE_EVENT, msg)) {
- /* got a message */
- if (msg->hdr.flags.test_message == 1)
- return false;
- return true;
- }
- return false;
+ int err;
+
+ err = visorchannel_signalremove(chipset_dev->controlvm_channel,
+ CONTROLVM_QUEUE_EVENT, msg);
+ if (err)
+ return err;
+
+ /* got a message */
+ if (msg->hdr.flags.test_message == 1)
+ return -EINVAL;
+
+ return 0;
}
-/**
+/*
* parahotplug_process_list() - remove any request from the list that's been on
* there too long and respond with an error
*/
@@ -1875,10 +1736,10 @@ parahotplug_process_list(void)
list_del(pos);
if (req->msg.hdr.flags.response_expected)
- controlvm_respond_physdev_changestate(
+ controlvm_respond(
&req->msg.hdr,
CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
- req->msg.cmd.device_change_state.state);
+ &req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
}
@@ -1889,67 +1750,70 @@ static void
controlvm_periodic_work(struct work_struct *work)
{
struct controlvm_message inmsg;
- bool got_command = false;
- bool handle_command_failed = false;
-
- while (!visorchannel_signalremove(controlvm_channel,
- CONTROLVM_QUEUE_RESPONSE,
- &inmsg))
- ;
- if (!got_command) {
- if (controlvm_pending_msg_valid) {
- /*
- * we throttled processing of a prior
- * msg, so try to process it again
- * rather than reading a new one
- */
- inmsg = controlvm_pending_msg;
- controlvm_pending_msg_valid = false;
- got_command = true;
- } else {
- got_command = read_controlvm_event(&inmsg);
- }
+ int count = 0;
+ int err;
+
+ /* Drain the RESPONSE queue make it empty */
+ do {
+ err = visorchannel_signalremove(chipset_dev->controlvm_channel,
+ CONTROLVM_QUEUE_RESPONSE,
+ &inmsg);
+ } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
+
+ if (err != -EAGAIN)
+ goto schedule_out;
+
+ if (chipset_dev->controlvm_pending_msg_valid) {
+ /*
+ * we throttled processing of a prior
+ * msg, so try to process it again
+ * rather than reading a new one
+ */
+ inmsg = chipset_dev->controlvm_pending_msg;
+ chipset_dev->controlvm_pending_msg_valid = false;
+ err = 0;
+ } else {
+ err = read_controlvm_event(&inmsg);
}
- handle_command_failed = false;
- while (got_command && (!handle_command_failed)) {
- most_recent_message_jiffies = jiffies;
- if (handle_command(inmsg,
- visorchannel_get_physaddr
- (controlvm_channel)))
- got_command = read_controlvm_event(&inmsg);
- else {
- /*
- * this is a scenario where throttling
- * is required, but probably NOT an
- * error...; we stash the current
- * controlvm msg so we will attempt to
- * reprocess it on our next loop
- */
- handle_command_failed = true;
- controlvm_pending_msg = inmsg;
- controlvm_pending_msg_valid = true;
+ while (!err) {
+ chipset_dev->most_recent_message_jiffies = jiffies;
+ err = handle_command(inmsg,
+ visorchannel_get_physaddr
+ (chipset_dev->controlvm_channel));
+ if (err == -EAGAIN) {
+ chipset_dev->controlvm_pending_msg = inmsg;
+ chipset_dev->controlvm_pending_msg_valid = true;
+ break;
}
+
+ err = read_controlvm_event(&inmsg);
}
/* parahotplug_worker */
parahotplug_process_list();
- if (time_after(jiffies,
- most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
+schedule_out:
+ if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
+ (HZ * MIN_IDLE_SECONDS))) {
/*
* it's been longer than MIN_IDLE_SECONDS since we
* processed our last controlvm message; slow down the
* polling
*/
- if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ if (chipset_dev->poll_jiffies !=
+ POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
+ chipset_dev->poll_jiffies =
+ POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
} else {
- if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ if (chipset_dev->poll_jiffies !=
+ POLLJIFFIES_CONTROLVMCHANNEL_FAST)
+ chipset_dev->poll_jiffies =
+ POLLJIFFIES_CONTROLVMCHANNEL_FAST;
}
- schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
+ schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
+ chipset_dev->poll_jiffies);
}
static int
@@ -1958,81 +1822,84 @@ visorchipset_init(struct acpi_device *acpi_device)
int err = -ENODEV;
u64 addr;
uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
+ struct visorchannel *controlvm_channel;
+
+ chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
+ if (!chipset_dev)
+ goto error;
addr = controlvm_get_channel_address();
if (!addr)
goto error;
- controlvm_channel = visorchannel_create_with_lock(addr, 0,
- GFP_KERNEL, uuid);
+ acpi_device->driver_data = chipset_dev;
+
+ chipset_dev->acpi_device = acpi_device;
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ controlvm_channel = visorchannel_create_with_lock(addr,
+ 0, GFP_KERNEL, uuid);
+
if (!controlvm_channel)
- goto error;
+ goto error_free_chipset_dev;
- if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
- visorchannel_get_header(controlvm_channel)))
- goto error_destroy_channel;
+ chipset_dev->controlvm_channel = controlvm_channel;
- major_dev = MKDEV(visorchipset_major, 0);
- err = visorchipset_file_init(major_dev, &controlvm_channel);
+ err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
+ visorchipset_dev_groups);
if (err < 0)
goto error_destroy_channel;
+ if (!SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
+ visorchannel_get_header(controlvm_channel)))
+ goto error_delete_groups;
+
/* if booting in a crash kernel */
if (is_kdump_kernel())
- INIT_DELAYED_WORK(&periodic_controlvm_work,
+ INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
setup_crash_devices_work_queue);
else
- INIT_DELAYED_WORK(&periodic_controlvm_work,
+ INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
controlvm_periodic_work);
- most_recent_message_jiffies = jiffies;
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
- schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
-
- visorchipset_platform_device.dev.devt = major_dev;
- if (platform_device_register(&visorchipset_platform_device) < 0) {
- POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC, 0, 0,
- DIAG_SEVERITY_ERR);
- err = -ENODEV;
- goto error_cancel_work;
- }
- POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
+ chipset_dev->most_recent_message_jiffies = jiffies;
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
+ chipset_dev->poll_jiffies);
err = visorbus_init();
if (err < 0)
- goto error_unregister;
+ goto error_cancel_work;
return 0;
-error_unregister:
- platform_device_unregister(&visorchipset_platform_device);
-
error_cancel_work:
- cancel_delayed_work_sync(&periodic_controlvm_work);
- visorchipset_file_cleanup(major_dev);
+ cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
+
+error_delete_groups:
+ sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
+ visorchipset_dev_groups);
error_destroy_channel:
- visorchannel_destroy(controlvm_channel);
+ visorchannel_destroy(chipset_dev->controlvm_channel);
+
+error_free_chipset_dev:
+ kfree(chipset_dev);
error:
- POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
+ dev_err(&acpi_device->dev, "failed with error %d\n", err);
return err;
}
static int
visorchipset_exit(struct acpi_device *acpi_device)
{
- POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
-
visorbus_exit();
+ cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
+ sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
+ visorchipset_dev_groups);
- cancel_delayed_work_sync(&periodic_controlvm_work);
-
- visorchannel_destroy(controlvm_channel);
-
- visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
- platform_device_unregister(&visorchipset_platform_device);
- POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
+ visorchannel_destroy(chipset_dev->controlvm_channel);
+ kfree(chipset_dev);
return 0;
}
@@ -2050,12 +1917,12 @@ static struct acpi_driver unisys_acpi_driver = {
.ops = {
.add = visorchipset_init,
.remove = visorchipset_exit,
- },
+ },
};
MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
-static __init uint32_t visorutil_spar_detect(void)
+static __init int visorutil_spar_detect(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -2090,10 +1957,6 @@ static void exit_unisys(void)
acpi_bus_unregister_driver(&unisys_acpi_driver);
}
-module_param_named(major, visorchipset_major, int, 0444);
-MODULE_PARM_DESC(visorchipset_major,
- "major device number to use for the device node");
-
module_init(init_unisys);
module_exit(exit_unisys);
OpenPOWER on IntegriCloud