|
@@ -151,6 +151,14 @@ static int apply_constraint(struct dev_pm_qos_request *req,
|
|
|
req);
|
|
|
}
|
|
|
break;
|
|
|
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
|
|
|
+ ret = pm_qos_update_target(&qos->latency_tolerance,
|
|
|
+ &req->data.pnode, action, value);
|
|
|
+ if (ret) {
|
|
|
+ value = pm_qos_read_value(&qos->latency_tolerance);
|
|
|
+ req->dev->power.set_latency_tolerance(req->dev, value);
|
|
|
+ }
|
|
|
+ break;
|
|
|
case DEV_PM_QOS_FLAGS:
|
|
|
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
|
|
|
action, value);
|
|
@@ -194,6 +202,13 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
|
|
|
c->type = PM_QOS_MIN;
|
|
|
c->notifiers = n;
|
|
|
|
|
|
+ c = &qos->latency_tolerance;
|
|
|
+ plist_head_init(&c->list);
|
|
|
+ c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
|
|
|
+ c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
|
|
|
+ c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
|
|
|
+ c->type = PM_QOS_MIN;
|
|
|
+
|
|
|
INIT_LIST_HEAD(&qos->flags.list);
|
|
|
|
|
|
spin_lock_irq(&dev->power.lock);
|
|
@@ -247,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
|
|
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
|
|
memset(req, 0, sizeof(*req));
|
|
|
}
|
|
|
+ c = &qos->latency_tolerance;
|
|
|
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
|
|
|
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
|
|
+ memset(req, 0, sizeof(*req));
|
|
|
+ }
|
|
|
f = &qos->flags;
|
|
|
list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
|
|
|
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
|
@@ -266,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
|
|
mutex_unlock(&dev_pm_qos_sysfs_mtx);
|
|
|
}
|
|
|
|
|
|
+static bool dev_pm_qos_invalid_request(struct device *dev,
|
|
|
+ struct dev_pm_qos_request *req)
|
|
|
+{
|
|
|
+ return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
|
|
|
+ && !dev->power.set_latency_tolerance);
|
|
|
+}
|
|
|
+
|
|
|
+static int __dev_pm_qos_add_request(struct device *dev,
|
|
|
+ struct dev_pm_qos_request *req,
|
|
|
+ enum dev_pm_qos_req_type type, s32 value)
|
|
|
+{
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (!dev || dev_pm_qos_invalid_request(dev, req))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (WARN(dev_pm_qos_request_active(req),
|
|
|
+ "%s() called for already added request\n", __func__))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (IS_ERR(dev->power.qos))
|
|
|
+ ret = -ENODEV;
|
|
|
+ else if (!dev->power.qos)
|
|
|
+ ret = dev_pm_qos_constraints_allocate(dev);
|
|
|
+
|
|
|
+ trace_dev_pm_qos_add_request(dev_name(dev), type, value);
|
|
|
+ if (!ret) {
|
|
|
+ req->dev = dev;
|
|
|
+ req->type = type;
|
|
|
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* dev_pm_qos_add_request - inserts new qos request into the list
|
|
|
* @dev: target device for the constraint
|
|
@@ -291,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
|
|
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
|
|
enum dev_pm_qos_req_type type, s32 value)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- if (!dev || !req) /*guard against callers passing in null */
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (WARN(dev_pm_qos_request_active(req),
|
|
|
- "%s() called for already added request\n", __func__))
|
|
|
- return -EINVAL;
|
|
|
+ int ret;
|
|
|
|
|
|
mutex_lock(&dev_pm_qos_mtx);
|
|
|
-
|
|
|
- if (IS_ERR(dev->power.qos))
|
|
|
- ret = -ENODEV;
|
|
|
- else if (!dev->power.qos)
|
|
|
- ret = dev_pm_qos_constraints_allocate(dev);
|
|
|
-
|
|
|
- trace_dev_pm_qos_add_request(dev_name(dev), type, value);
|
|
|
- if (!ret) {
|
|
|
- req->dev = dev;
|
|
|
- req->type = type;
|
|
|
- ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
|
|
|
- }
|
|
|
-
|
|
|
+ ret = __dev_pm_qos_add_request(dev, req, type, value);
|
|
|
mutex_unlock(&dev_pm_qos_mtx);
|
|
|
-
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
|
|
@@ -343,6 +377,7 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
|
|
|
|
|
switch(req->type) {
|
|
|
case DEV_PM_QOS_RESUME_LATENCY:
|
|
|
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
|
|
|
curr_value = req->data.pnode.prio;
|
|
|
break;
|
|
|
case DEV_PM_QOS_FLAGS:
|
|
@@ -563,6 +598,10 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
|
|
|
req = dev->power.qos->resume_latency_req;
|
|
|
dev->power.qos->resume_latency_req = NULL;
|
|
|
break;
|
|
|
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
|
|
|
+ req = dev->power.qos->latency_tolerance_req;
|
|
|
+ dev->power.qos->latency_tolerance_req = NULL;
|
|
|
+ break;
|
|
|
case DEV_PM_QOS_FLAGS:
|
|
|
req = dev->power.qos->flags_req;
|
|
|
dev->power.qos->flags_req = NULL;
|
|
@@ -768,6 +807,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
|
|
|
pm_runtime_put(dev);
|
|
|
return ret;
|
|
|
}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
|
|
|
+ * @dev: Device to obtain the user space latency tolerance for.
|
|
|
+ */
|
|
|
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
|
|
|
+{
|
|
|
+ s32 ret;
|
|
|
+
|
|
|
+ mutex_lock(&dev_pm_qos_mtx);
|
|
|
+ ret = IS_ERR_OR_NULL(dev->power.qos)
|
|
|
+ || !dev->power.qos->latency_tolerance_req ?
|
|
|
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
|
|
|
+ dev->power.qos->latency_tolerance_req->data.pnode.prio;
|
|
|
+ mutex_unlock(&dev_pm_qos_mtx);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
|
|
|
+ * @dev: Device to update the user space latency tolerance for.
|
|
|
+ * @val: New user space latency tolerance for @dev (negative values disable).
|
|
|
+ */
|
|
|
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ mutex_lock(&dev_pm_qos_mtx);
|
|
|
+
|
|
|
+ if (IS_ERR_OR_NULL(dev->power.qos)
|
|
|
+ || !dev->power.qos->latency_tolerance_req) {
|
|
|
+ struct dev_pm_qos_request *req;
|
|
|
+
|
|
|
+ if (val < 0) {
|
|
|
+ ret = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
|
|
|
+ if (!req) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
|
|
|
+ if (ret < 0) {
|
|
|
+ kfree(req);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ dev->power.qos->latency_tolerance_req = req;
|
|
|
+ } else {
|
|
|
+ if (val < 0) {
|
|
|
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
|
|
|
+ ret = 0;
|
|
|
+ } else {
|
|
|
+ ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ out:
|
|
|
+ mutex_unlock(&dev_pm_qos_mtx);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
#else /* !CONFIG_PM_RUNTIME */
|
|
|
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
|
|
|
static void __dev_pm_qos_hide_flags(struct device *dev) {}
|