|
@@ -136,7 +136,7 @@ static int __clockevents_set_state(struct clock_event_device *dev,
|
|
|
|
|
|
|
|
case CLOCK_EVT_STATE_ONESHOT_STOPPED:
|
|
case CLOCK_EVT_STATE_ONESHOT_STOPPED:
|
|
|
/* Core internal bug */
|
|
/* Core internal bug */
|
|
|
- if (WARN_ONCE(dev->state != CLOCK_EVT_STATE_ONESHOT,
|
|
|
|
|
|
|
+ if (WARN_ONCE(!clockevent_state_oneshot(dev),
|
|
|
"Current state: %d\n", dev->state))
|
|
"Current state: %d\n", dev->state))
|
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
|
@@ -170,7 +170,7 @@ void clockevents_set_state(struct clock_event_device *dev,
|
|
|
* A nsec2cyc multiplicator of 0 is invalid and we'd crash
|
|
* A nsec2cyc multiplicator of 0 is invalid and we'd crash
|
|
|
* on it, so fix it up and emit a warning:
|
|
* on it, so fix it up and emit a warning:
|
|
|
*/
|
|
*/
|
|
|
- if (state == CLOCK_EVT_STATE_ONESHOT) {
|
|
|
|
|
|
|
+ if (clockevent_state_oneshot(dev)) {
|
|
|
if (unlikely(!dev->mult)) {
|
|
if (unlikely(!dev->mult)) {
|
|
|
dev->mult = 1;
|
|
dev->mult = 1;
|
|
|
WARN_ON(1);
|
|
WARN_ON(1);
|
|
@@ -259,7 +259,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
|
|
|
delta = dev->min_delta_ns;
|
|
delta = dev->min_delta_ns;
|
|
|
dev->next_event = ktime_add_ns(ktime_get(), delta);
|
|
dev->next_event = ktime_add_ns(ktime_get(), delta);
|
|
|
|
|
|
|
|
- if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
|
|
|
|
|
|
|
+ if (clockevent_state_shutdown(dev))
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
dev->retries++;
|
|
dev->retries++;
|
|
@@ -296,7 +296,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
|
|
|
delta = dev->min_delta_ns;
|
|
delta = dev->min_delta_ns;
|
|
|
dev->next_event = ktime_add_ns(ktime_get(), delta);
|
|
dev->next_event = ktime_add_ns(ktime_get(), delta);
|
|
|
|
|
|
|
|
- if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
|
|
|
|
|
|
|
+ if (clockevent_state_shutdown(dev))
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
dev->retries++;
|
|
dev->retries++;
|
|
@@ -328,11 +328,11 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
|
|
|
|
|
|
|
|
dev->next_event = expires;
|
|
dev->next_event = expires;
|
|
|
|
|
|
|
|
- if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
|
|
|
|
|
|
|
+ if (clockevent_state_shutdown(dev))
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
/* We must be in ONESHOT state here */
|
|
/* We must be in ONESHOT state here */
|
|
|
- WARN_ONCE(dev->state != CLOCK_EVT_STATE_ONESHOT, "Current state: %d\n",
|
|
|
|
|
|
|
+ WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
|
|
|
dev->state);
|
|
dev->state);
|
|
|
|
|
|
|
|
/* Shortcut for clockevent devices that can deal with ktime. */
|
|
/* Shortcut for clockevent devices that can deal with ktime. */
|
|
@@ -377,7 +377,7 @@ static int clockevents_replace(struct clock_event_device *ced)
|
|
|
struct clock_event_device *dev, *newdev = NULL;
|
|
struct clock_event_device *dev, *newdev = NULL;
|
|
|
|
|
|
|
|
list_for_each_entry(dev, &clockevent_devices, list) {
|
|
list_for_each_entry(dev, &clockevent_devices, list) {
|
|
|
- if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED)
|
|
|
|
|
|
|
+ if (dev == ced || !clockevent_state_detached(dev))
|
|
|
continue;
|
|
continue;
|
|
|
|
|
|
|
|
if (!tick_check_replacement(newdev, dev))
|
|
if (!tick_check_replacement(newdev, dev))
|
|
@@ -403,7 +403,7 @@ static int clockevents_replace(struct clock_event_device *ced)
|
|
|
static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
|
|
static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
|
|
|
{
|
|
{
|
|
|
/* Fast track. Device is unused */
|
|
/* Fast track. Device is unused */
|
|
|
- if (ced->state == CLOCK_EVT_STATE_DETACHED) {
|
|
|
|
|
|
|
+ if (clockevent_state_detached(ced)) {
|
|
|
list_del_init(&ced->list);
|
|
list_del_init(&ced->list);
|
|
|
return 0;
|
|
return 0;
|
|
|
}
|
|
}
|
|
@@ -561,10 +561,10 @@ int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
|
|
|
{
|
|
{
|
|
|
clockevents_config(dev, freq);
|
|
clockevents_config(dev, freq);
|
|
|
|
|
|
|
|
- if (dev->state == CLOCK_EVT_STATE_ONESHOT)
|
|
|
|
|
|
|
+ if (clockevent_state_oneshot(dev))
|
|
|
return clockevents_program_event(dev, dev->next_event, false);
|
|
return clockevents_program_event(dev, dev->next_event, false);
|
|
|
|
|
|
|
|
- if (dev->state == CLOCK_EVT_STATE_PERIODIC)
|
|
|
|
|
|
|
+ if (clockevent_state_periodic(dev))
|
|
|
return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
|
|
return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -625,7 +625,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
if (new) {
|
|
if (new) {
|
|
|
- BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED);
|
|
|
|
|
|
|
+ BUG_ON(!clockevent_state_detached(new));
|
|
|
clockevents_shutdown(new);
|
|
clockevents_shutdown(new);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
@@ -681,7 +681,7 @@ void tick_cleanup_dead_cpu(int cpu)
|
|
|
if (cpumask_test_cpu(cpu, dev->cpumask) &&
|
|
if (cpumask_test_cpu(cpu, dev->cpumask) &&
|
|
|
cpumask_weight(dev->cpumask) == 1 &&
|
|
cpumask_weight(dev->cpumask) == 1 &&
|
|
|
!tick_is_broadcast_device(dev)) {
|
|
!tick_is_broadcast_device(dev)) {
|
|
|
- BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED);
|
|
|
|
|
|
|
+ BUG_ON(!clockevent_state_detached(dev));
|
|
|
list_del(&dev->list);
|
|
list_del(&dev->list);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|