|
@@ -1,5 +1,7 @@
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
-// Copyright (c) 2015-2018, Intel Corporation.
|
|
|
|
|
|
+/*
|
|
|
|
+ * Copyright (c) 2015-2018, Intel Corporation.
|
|
|
|
+ */
|
|
|
|
|
|
#define pr_fmt(fmt) "kcs-bmc: " fmt
|
|
#define pr_fmt(fmt) "kcs-bmc: " fmt
|
|
|
|
|
|
@@ -242,14 +244,14 @@ out_unlock:
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(kcs_bmc_handle_event);
|
|
EXPORT_SYMBOL(kcs_bmc_handle_event);
|
|
|
|
|
|
-static inline struct kcs_bmc *file_to_kcs_bmc(struct file *filp)
|
|
|
|
|
|
+static inline struct kcs_bmc *to_kcs_bmc(struct file *filp)
|
|
{
|
|
{
|
|
return container_of(filp->private_data, struct kcs_bmc, miscdev);
|
|
return container_of(filp->private_data, struct kcs_bmc, miscdev);
|
|
}
|
|
}
|
|
|
|
|
|
static int kcs_bmc_open(struct inode *inode, struct file *filp)
|
|
static int kcs_bmc_open(struct inode *inode, struct file *filp)
|
|
{
|
|
{
|
|
- struct kcs_bmc *kcs_bmc = file_to_kcs_bmc(filp);
|
|
|
|
|
|
+ struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
spin_lock_irq(&kcs_bmc->lock);
|
|
spin_lock_irq(&kcs_bmc->lock);
|
|
@@ -262,25 +264,25 @@ static int kcs_bmc_open(struct inode *inode, struct file *filp)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static unsigned int kcs_bmc_poll(struct file *filp, poll_table *wait)
|
|
|
|
|
|
+static __poll_t kcs_bmc_poll(struct file *filp, poll_table *wait)
|
|
{
|
|
{
|
|
- struct kcs_bmc *kcs_bmc = file_to_kcs_bmc(filp);
|
|
|
|
- unsigned int mask = 0;
|
|
|
|
|
|
+ struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
|
|
|
|
+ __poll_t mask = 0;
|
|
|
|
|
|
poll_wait(filp, &kcs_bmc->queue, wait);
|
|
poll_wait(filp, &kcs_bmc->queue, wait);
|
|
|
|
|
|
spin_lock_irq(&kcs_bmc->lock);
|
|
spin_lock_irq(&kcs_bmc->lock);
|
|
if (kcs_bmc->data_in_avail)
|
|
if (kcs_bmc->data_in_avail)
|
|
- mask |= POLLIN;
|
|
|
|
|
|
+ mask |= EPOLLIN;
|
|
spin_unlock_irq(&kcs_bmc->lock);
|
|
spin_unlock_irq(&kcs_bmc->lock);
|
|
|
|
|
|
return mask;
|
|
return mask;
|
|
}
|
|
}
|
|
|
|
|
|
-static ssize_t kcs_bmc_read(struct file *filp, char *buf,
|
|
|
|
- size_t count, loff_t *offset)
|
|
|
|
|
|
+static ssize_t kcs_bmc_read(struct file *filp, char __user *buf,
|
|
|
|
+ size_t count, loff_t *ppos)
|
|
{
|
|
{
|
|
- struct kcs_bmc *kcs_bmc = file_to_kcs_bmc(filp);
|
|
|
|
|
|
+ struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
|
|
bool data_avail;
|
|
bool data_avail;
|
|
size_t data_len;
|
|
size_t data_len;
|
|
ssize_t ret;
|
|
ssize_t ret;
|
|
@@ -339,10 +341,10 @@ out_unlock:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static ssize_t kcs_bmc_write(struct file *filp, const char *buf,
|
|
|
|
- size_t count, loff_t *offset)
|
|
|
|
|
|
+static ssize_t kcs_bmc_write(struct file *filp, const char __user *buf,
|
|
|
|
+ size_t count, loff_t *ppos)
|
|
{
|
|
{
|
|
- struct kcs_bmc *kcs_bmc = file_to_kcs_bmc(filp);
|
|
|
|
|
|
+ struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
|
|
ssize_t ret;
|
|
ssize_t ret;
|
|
|
|
|
|
/* a minimum response size '3' : netfn + cmd + ccode */
|
|
/* a minimum response size '3' : netfn + cmd + ccode */
|
|
@@ -378,7 +380,7 @@ out_unlock:
|
|
static long kcs_bmc_ioctl(struct file *filp, unsigned int cmd,
|
|
static long kcs_bmc_ioctl(struct file *filp, unsigned int cmd,
|
|
unsigned long arg)
|
|
unsigned long arg)
|
|
{
|
|
{
|
|
- struct kcs_bmc *kcs_bmc = file_to_kcs_bmc(filp);
|
|
|
|
|
|
+ struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
|
|
long ret = 0;
|
|
long ret = 0;
|
|
|
|
|
|
spin_lock_irq(&kcs_bmc->lock);
|
|
spin_lock_irq(&kcs_bmc->lock);
|
|
@@ -410,7 +412,7 @@ static long kcs_bmc_ioctl(struct file *filp, unsigned int cmd,
|
|
|
|
|
|
static int kcs_bmc_release(struct inode *inode, struct file *filp)
|
|
static int kcs_bmc_release(struct inode *inode, struct file *filp)
|
|
{
|
|
{
|
|
- struct kcs_bmc *kcs_bmc = file_to_kcs_bmc(filp);
|
|
|
|
|
|
+ struct kcs_bmc *kcs_bmc = to_kcs_bmc(filp);
|
|
|
|
|
|
spin_lock_irq(&kcs_bmc->lock);
|
|
spin_lock_irq(&kcs_bmc->lock);
|
|
kcs_bmc->running = 0;
|
|
kcs_bmc->running = 0;
|