summaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/agp/generic.c4
-rw-r--r--drivers/char/agp/intel-agp.c3
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/virtio-rng.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c211
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c150
-rw-r--r--drivers/char/mem.c27
-rw-r--r--drivers/char/sysrq.c1
-rw-r--r--drivers/char/vt.c2
9 files changed, 313 insertions, 91 deletions
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 10d6cbd..2224b76 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -1226,7 +1226,7 @@ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *m
int i, ret = -ENOMEM;
for (i = 0; i < num_pages; i++) {
- page = alloc_page(GFP_KERNEL | GFP_DMA32);
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
/* agp_free_memory() needs gart address */
if (page == NULL)
goto out;
@@ -1257,7 +1257,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
{
struct page * page;
- page = alloc_page(GFP_KERNEL | GFP_DMA32);
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return NULL;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 9d9490e..3686912 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2131,6 +2131,8 @@ static const struct intel_driver_description {
{ PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M",
&intel_845_driver, &intel_830_driver },
{ PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL },
+ { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854",
+ &intel_845_driver, &intel_830_driver },
{ PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL },
{ PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM",
&intel_845_driver, &intel_830_driver },
@@ -2355,6 +2357,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82845_HB),
ID(PCI_DEVICE_ID_INTEL_82845G_HB),
ID(PCI_DEVICE_ID_INTEL_82850_HB),
+ ID(PCI_DEVICE_ID_INTEL_82854_HB),
ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
ID(PCI_DEVICE_ID_INTEL_82860_HB),
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 50dfa3b..340ba4f 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -72,7 +72,7 @@ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
#ifdef CONFIG_IA64
static void __iomem *hpet_mctr;
-static cycle_t read_hpet(void)
+static cycle_t read_hpet(struct clocksource *cs)
{
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
}
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index d0e563e..86e83f8 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -37,9 +37,9 @@ static void random_recv_done(struct virtqueue *vq)
{
int len;
- /* We never get spurious callbacks. */
+ /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
if (!vq->vq_ops->get_buf(vq, &len))
- BUG();
+ return;
data_left = len / sizeof(random_data[0]);
complete(&have_data);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e93fc8d..aa83a08 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -285,6 +285,11 @@ enum ipmi_stat_indexes {
/* Events that were received with the proper format. */
IPMI_STAT_events,
+ /* Retransmissions on IPMB that failed. */
+ IPMI_STAT_dropped_rexmit_ipmb_commands,
+
+ /* Retransmissions on LAN that failed. */
+ IPMI_STAT_dropped_rexmit_lan_commands,
/* This *must* remain last, add new values above this. */
IPMI_NUM_STATS
@@ -445,6 +450,20 @@ static DEFINE_MUTEX(smi_watchers_mutex);
#define ipmi_get_stat(intf, stat) \
((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+static int is_lan_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_LAN_ADDR_TYPE;
+}
+
+static int is_ipmb_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
+}
+
+static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
+}
static void free_recv_msg_list(struct list_head *q)
{
@@ -601,8 +620,7 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
return (smi_addr1->lun == smi_addr2->lun);
}
- if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
+ if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
struct ipmi_ipmb_addr *ipmb_addr1
= (struct ipmi_ipmb_addr *) addr1;
struct ipmi_ipmb_addr *ipmb_addr2
@@ -612,7 +630,7 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
&& (ipmb_addr1->lun == ipmb_addr2->lun));
}
- if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
+ if (is_lan_addr(addr1)) {
struct ipmi_lan_addr *lan_addr1
= (struct ipmi_lan_addr *) addr1;
struct ipmi_lan_addr *lan_addr2
@@ -644,14 +662,13 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len)
|| (addr->channel < 0))
return -EINVAL;
- if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
+ if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
if (len < sizeof(struct ipmi_ipmb_addr))
return -EINVAL;
return 0;
}
- if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
+ if (is_lan_addr(addr)) {
if (len < sizeof(struct ipmi_lan_addr))
return -EINVAL;
return 0;
@@ -1503,8 +1520,7 @@ static int i_ipmi_request(ipmi_user_t user,
memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
ipmi_inc_stat(intf, sent_local_commands);
- } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
- || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
+ } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
struct ipmi_ipmb_addr *ipmb_addr;
unsigned char ipmb_seq;
long seqid;
@@ -1583,8 +1599,6 @@ static int i_ipmi_request(ipmi_user_t user,
spin_lock_irqsave(&(intf->seq_lock), flags);
- ipmi_inc_stat(intf, sent_ipmb_commands);
-
/*
* Create a sequence number with a 1 second
* timeout and 4 retries.
@@ -1606,6 +1620,8 @@ static int i_ipmi_request(ipmi_user_t user,
goto out_err;
}
+ ipmi_inc_stat(intf, sent_ipmb_commands);
+
/*
* Store the sequence number in the message,
* so that when the send message response
@@ -1635,7 +1651,7 @@ static int i_ipmi_request(ipmi_user_t user,
*/
spin_unlock_irqrestore(&(intf->seq_lock), flags);
}
- } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
+ } else if (is_lan_addr(addr)) {
struct ipmi_lan_addr *lan_addr;
unsigned char ipmb_seq;
long seqid;
@@ -1696,8 +1712,6 @@ static int i_ipmi_request(ipmi_user_t user,
spin_lock_irqsave(&(intf->seq_lock), flags);
- ipmi_inc_stat(intf, sent_lan_commands);
-
/*
* Create a sequence number with a 1 second
* timeout and 4 retries.
@@ -1719,6 +1733,8 @@ static int i_ipmi_request(ipmi_user_t user,
goto out_err;
}
+ ipmi_inc_stat(intf, sent_lan_commands);
+
/*
* Store the sequence number in the message,
* so that when the send message response
@@ -1937,6 +1953,10 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
ipmi_get_stat(intf, invalid_events));
out += sprintf(out, "events: %u\n",
ipmi_get_stat(intf, events));
+ out += sprintf(out, "failed rexmit LAN msgs: %u\n",
+ ipmi_get_stat(intf, dropped_rexmit_lan_commands));
+ out += sprintf(out, "failed rexmit IPMB msgs: %u\n",
+ ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
return (out - ((char *) page));
}
@@ -3264,6 +3284,114 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
return rv;
}
+/*
+ * This routine will handle "Get Message" command responses with
+ * channels that use an OEM Medium. The message format belongs to
+ * the OEM. See IPMI 2.0 specification, Chapter 6 and
+ * Chapter 22, sections 22.6 and 22.24 for more details.
+ */
+static int handle_oem_get_msg_cmd(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ unsigned char netfn;
+ unsigned char cmd;
+ unsigned char chan;
+ ipmi_user_t user = NULL;
+ struct ipmi_system_interface_addr *smi_addr;
+ struct ipmi_recv_msg *recv_msg;
+
+ /*
+ * We expect the OEM SW to perform error checking
+ * so we just do some basic sanity checks
+ */
+ if (msg->rsp_size < 4) {
+ /* Message not big enough, just ignore it. */
+ ipmi_inc_stat(intf, invalid_commands);
+ return 0;
+ }
+
+ if (msg->rsp[2] != 0) {
+ /* An error getting the response, just ignore it. */
+ return 0;
+ }
+
+ /*
+ * This is an OEM Message so the OEM needs to know how
+ * handle the message. We do no interpretation.
+ */
+ netfn = msg->rsp[0] >> 2;
+ cmd = msg->rsp[1];
+ chan = msg->rsp[3] & 0xf;
+
+ rcu_read_lock();
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, just give up. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ /*
+ * Don't do anything with these messages, just allow
+ * them to be freed.
+ */
+
+ rv = 0;
+ } else {
+ /* Deliver the message to the user. */
+ ipmi_inc_stat(intf, handled_commands);
+
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &(recv_msg->addr));
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user = user;
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ /*
+ * The message starts at byte 4 which follows the
+ * the Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data,
+ &(msg->rsp[4]),
+ msg->rsp_size - 4);
+ deliver_response(recv_msg);
+ }
+ }
+
+ return rv;
+}
+
static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
struct ipmi_smi_msg *msg)
{
@@ -3519,6 +3647,17 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
goto out;
}
+ /*
+ ** We need to make sure the channels have been initialized.
+ ** The channel_handler routine will set the "curr_channel"
+ ** equal to or greater than IPMI_MAX_CHANNELS when all the
+ ** channels for this interface have been initialized.
+ */
+ if (intf->curr_channel < IPMI_MAX_CHANNELS) {
+ requeue = 1; /* Just put the message back for now */
+ goto out;
+ }
+
switch (intf->channels[chan].medium) {
case IPMI_CHANNEL_MEDIUM_IPMB:
if (msg->rsp[4] & 0x04) {
@@ -3554,11 +3693,20 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
break;
default:
- /*
- * We don't handle the channel type, so just
- * free the message.
- */
- requeue = 0;
+ /* Check for OEM Channels. Clients had better
+ register for these commands. */
+ if ((intf->channels[chan].medium
+ >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
+ && (intf->channels[chan].medium
+ <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
+ requeue = handle_oem_get_msg_cmd(intf, msg);
+ } else {
+ /*
+ * We don't handle the channel type, so just
+ * free the message.
+ */
+ requeue = 0;
+ }
}
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
@@ -3730,7 +3878,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
list_add_tail(&msg->link, timeouts);
if (ent->broadcast)
ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
- else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
+ else if (is_lan_addr(&ent->recv_msg->addr))
ipmi_inc_stat(intf, timed_out_lan_commands);
else
ipmi_inc_stat(intf, timed_out_ipmb_commands);
@@ -3744,15 +3892,17 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
*/
ent->timeout = MAX_MSG_TIMEOUT;
ent->retries_left--;
- if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
- ipmi_inc_stat(intf, retransmitted_lan_commands);
- else
- ipmi_inc_stat(intf, retransmitted_ipmb_commands);
-
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
ent->seqid);
- if (!smi_msg)
+ if (!smi_msg) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ dropped_rexmit_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ dropped_rexmit_ipmb_commands);
return;
+ }
spin_unlock_irqrestore(&intf->seq_lock, *flags);
@@ -3764,10 +3914,17 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
* resent.
*/
handlers = intf->handlers;
- if (handlers)
+ if (handlers) {
+ if (is_lan_addr(&ent->recv_msg->addr))
+ ipmi_inc_stat(intf,
+ retransmitted_lan_commands);
+ else
+ ipmi_inc_stat(intf,
+ retransmitted_ipmb_commands);
+
intf->handlers->sender(intf->send_info,
smi_msg, 0);
- else
+ } else
ipmi_free_smi_msg(smi_msg);
spin_lock_irqsave(&intf->seq_lock, *flags);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index e58ea4c..2596446 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -82,12 +82,6 @@
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
-/* Bit for BMC global enables. */
-#define IPMI_BMC_RCV_MSG_INTR 0x01
-#define IPMI_BMC_EVT_MSG_INTR 0x02
-#define IPMI_BMC_EVT_MSG_BUFF 0x04
-#define IPMI_BMC_SYS_LOG 0x08
-
enum si_intf_state {
SI_NORMAL,
SI_GETTING_FLAGS,
@@ -220,6 +214,9 @@ struct smi_info {
OEM2_DATA_AVAIL)
unsigned char msg_flags;
+ /* Does the BMC have an event buffer? */
+ char has_event_buffer;
+
/*
* If set to true, this will request events the next time the
* state machine is idle.
@@ -968,7 +965,8 @@ static void request_events(void *send_info)
{
struct smi_info *smi_info = send_info;
- if (atomic_read(&smi_info->stop_operation))
+ if (atomic_read(&smi_info->stop_operation) ||
+ !smi_info->has_event_buffer)
return;
atomic_set(&smi_info->req_events, 1);
@@ -2407,26 +2405,9 @@ static struct of_platform_driver ipmi_of_platform_driver = {
};
#endif /* CONFIG_PPC_OF */
-
-static int try_get_dev_id(struct smi_info *smi_info)
+static int wait_for_msg_done(struct smi_info *smi_info)
{
- unsigned char msg[2];
- unsigned char *resp;
- unsigned long resp_len;
enum si_sm_result smi_result;
- int rv = 0;
-
- resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
- if (!resp)
- return -ENOMEM;
-
- /*
- * Do a Get Device ID command, since it comes back with some
- * useful info.
- */
- msg[0] = IPMI_NETFN_APP_REQUEST << 2;
- msg[1] = IPMI_GET_DEVICE_ID_CMD;
- smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
for (;;) {
@@ -2441,16 +2422,39 @@ static int try_get_dev_id(struct smi_info *smi_info)
} else
break;
}
- if (smi_result == SI_SM_HOSED) {
+ if (smi_result == SI_SM_HOSED)
/*
* We couldn't get the state machine to run, so whatever's at
* the port is probably not an IPMI SMI interface.
*/
- rv = -ENODEV;
+ return -ENODEV;
+
+ return 0;
+}
+
+static int try_get_dev_id(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ /*
+ * Do a Get Device ID command, since it comes back with some
+ * useful info.
+ */
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_DEVICE_ID_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv)
goto out;
- }
- /* Otherwise, we got some data. */
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
@@ -2462,6 +2466,88 @@ static int try_get_dev_id(struct smi_info *smi_info)
return rv;
}
+static int try_enable_event_buffer(struct smi_info *smi_info)
+{
+ unsigned char msg[3];
+ unsigned char *resp;
+ unsigned long resp_len;
+ int rv = 0;
+
+ resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ printk(KERN_WARNING
+ "ipmi_si: Error getting response from get global,"
+ " enables command, the event buffer is not"
+ " enabled.\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 4 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
+ resp[2] != 0) {
+ printk(KERN_WARNING
+ "ipmi_si: Invalid return from get global"
+ " enables command, cannot enable the event"
+ " buffer.\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
+ /* buffer is already enabled, nothing to do. */
+ goto out;
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+ smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+
+ rv = wait_for_msg_done(smi_info);
+ if (rv) {
+ printk(KERN_WARNING
+ "ipmi_si: Error getting response from set global,"
+ " enables command, the event buffer is not"
+ " enabled.\n");
+ goto out;
+ }
+
+ resp_len = smi_info->handlers->get_result(smi_info->si_sm,
+ resp, IPMI_MAX_MSG_LENGTH);
+
+ if (resp_len < 3 ||
+ resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
+ resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
+ printk(KERN_WARNING
+ "ipmi_si: Invalid return from get global,"
+ "enables command, not enable the event"
+ " buffer.\n");
+ rv = -EINVAL;
+ goto out;
+ }
+
+ if (resp[2] != 0)
+ /*
+ * An error when setting the event buffer bit means
+ * that the event buffer is not supported.
+ */
+ rv = -ENOENT;
+ out:
+ kfree(resp);
+ return rv;
+}
+
static int type_file_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
@@ -2847,6 +2933,10 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->intf_num = smi_num;
smi_num++;
+ rv = try_enable_event_buffer(new_smi);
+ if (rv == 0)
+ new_smi->has_event_buffer = 1;
+
/*
* Start clearing the flags before we enable interrupts or the
* timer to avoid racing with the timer.
@@ -2863,7 +2953,7 @@ static int try_smi_init(struct smi_info *new_smi)
*/
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->intf_num);
- if (rv) {
+ if (!new_smi->pdev) {
printk(KERN_ERR
"ipmi_si_intf:"
" Unable to allocate platform device\n");
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 3586b3b..8f05c38 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -301,33 +301,7 @@ static inline int private_mapping_ok(struct vm_area_struct *vma)
}
#endif
-void __attribute__((weak))
-map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
-{
- /* nothing. architectures can override. */
-}
-
-void __attribute__((weak))
-unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
-{
- /* nothing. architectures can override. */
-}
-
-static void mmap_mem_open(struct vm_area_struct *vma)
-{
- map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-}
-
-static void mmap_mem_close(struct vm_area_struct *vma)
-{
- unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-}
-
static struct vm_operations_struct mmap_mem_ops = {
- .open = mmap_mem_open,
- .close = mmap_mem_close,
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys
#endif
@@ -362,7 +336,6 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
vma->vm_pgoff,
size,
vma->vm_page_prot)) {
- unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
return -EAGAIN;
}
return 0;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 0540d5d..aed2b29 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -36,7 +36,6 @@
#include <linux/vt_kern.h>
#include <linux/workqueue.h>
#include <linux/kexec.h>
-#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <linux/oom.h>
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 2c1d133..08151d4 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2274,7 +2274,7 @@ rescan_last_byte:
continue; /* nothing to display */
}
/* Glyph not found */
- if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) {
+ if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
/* In legacy mode use the glyph we get by a 1:1 mapping.
This would make absolutely no sense with Unicode in mind,
but do this for ASCII characters since a font may lack
OpenPOWER on IntegriCloud