|
@@ -31,9 +31,18 @@
|
|
|
|
|
|
#include "cpts.h"
|
|
|
|
|
|
+#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
|
|
|
+
|
|
|
+struct cpts_skb_cb_data {
|
|
|
+ unsigned long tmo;
|
|
|
+};
|
|
|
+
|
|
|
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
|
|
|
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
|
|
|
|
|
|
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
|
|
|
+ u16 ts_seqid, u8 ts_msgtype);
|
|
|
+
|
|
|
static int event_expired(struct cpts_event *event)
|
|
|
{
|
|
|
return time_after(jiffies, event->tmo);
|
|
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
|
|
|
return removed ? 0 : -1;
|
|
|
}
|
|
|
|
|
|
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
|
|
|
+{
|
|
|
+ struct sk_buff *skb, *tmp;
|
|
|
+ u16 seqid;
|
|
|
+ u8 mtype;
|
|
|
+ bool found = false;
|
|
|
+
|
|
|
+ mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
|
|
|
+ seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
|
|
|
+
|
|
|
+ /* no need to grab txq.lock as access is always done under cpts->lock */
|
|
|
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
|
|
|
+ struct skb_shared_hwtstamps ssh;
|
|
|
+ unsigned int class = ptp_classify_raw(skb);
|
|
|
+ struct cpts_skb_cb_data *skb_cb =
|
|
|
+ (struct cpts_skb_cb_data *)skb->cb;
|
|
|
+
|
|
|
+ if (cpts_match(skb, class, seqid, mtype)) {
|
|
|
+ u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
|
|
|
+
|
|
|
+ memset(&ssh, 0, sizeof(ssh));
|
|
|
+ ssh.hwtstamp = ns_to_ktime(ns);
|
|
|
+ skb_tstamp_tx(skb, &ssh);
|
|
|
+ found = true;
|
|
|
+ __skb_unlink(skb, &cpts->txq);
|
|
|
+ dev_consume_skb_any(skb);
|
|
|
+ dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
|
|
|
+ mtype, seqid);
|
|
|
+ } else if (time_after(jiffies, skb_cb->tmo)) {
|
|
|
+ /* timeout any expired skbs over 1s */
|
|
|
+ dev_dbg(cpts->dev,
|
|
|
+ "expiring tx timestamp mtype %u seqid %04x\n",
|
|
|
+ mtype, seqid);
|
|
|
+ __skb_unlink(skb, &cpts->txq);
|
|
|
+ dev_consume_skb_any(skb);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return found;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Returns zero if matching event type was found.
|
|
|
*/
|
|
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
|
|
|
event->low = lo;
|
|
|
type = event_type(event);
|
|
|
switch (type) {
|
|
|
+ case CPTS_EV_TX:
|
|
|
+ if (cpts_match_tx_ts(cpts, event)) {
|
|
|
+ /* if the new event matches an existing skb,
|
|
|
+ * then don't queue it
|
|
|
+ */
|
|
|
+ break;
|
|
|
+ }
|
|
|
case CPTS_EV_PUSH:
|
|
|
case CPTS_EV_RX:
|
|
|
- case CPTS_EV_TX:
|
|
|
list_del_init(&event->list);
|
|
|
list_add_tail(&event->list, &cpts->events);
|
|
|
break;
|
|
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
|
|
|
return -EOPNOTSUPP;
|
|
|
}
|
|
|
|
|
|
+static long cpts_overflow_check(struct ptp_clock_info *ptp)
|
|
|
+{
|
|
|
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
|
|
|
+ unsigned long delay = cpts->ov_check_period;
|
|
|
+ struct timespec64 ts;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&cpts->lock, flags);
|
|
|
+ ts = ns_to_timespec64(timecounter_read(&cpts->tc));
|
|
|
+
|
|
|
+ if (!skb_queue_empty(&cpts->txq))
|
|
|
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
|
|
|
+ spin_unlock_irqrestore(&cpts->lock, flags);
|
|
|
+
|
|
|
+ pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
|
|
|
+ return (long)delay;
|
|
|
+}
|
|
|
+
|
|
|
static struct ptp_clock_info cpts_info = {
|
|
|
.owner = THIS_MODULE,
|
|
|
.name = "CTPS timer",
|
|
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
|
|
|
.gettime64 = cpts_ptp_gettime,
|
|
|
.settime64 = cpts_ptp_settime,
|
|
|
.enable = cpts_ptp_enable,
|
|
|
+ .do_aux_work = cpts_overflow_check,
|
|
|
};
|
|
|
|
|
|
-static void cpts_overflow_check(struct work_struct *work)
|
|
|
-{
|
|
|
- struct timespec64 ts;
|
|
|
- struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
|
|
|
-
|
|
|
- cpts_ptp_gettime(&cpts->info, &ts);
|
|
|
- pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
|
|
|
- schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
|
|
|
-}
|
|
|
-
|
|
|
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
|
|
|
u16 ts_seqid, u8 ts_msgtype)
|
|
|
{
|
|
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
|
|
|
return 0;
|
|
|
|
|
|
spin_lock_irqsave(&cpts->lock, flags);
|
|
|
- cpts_fifo_read(cpts, CPTS_EV_PUSH);
|
|
|
+ cpts_fifo_read(cpts, -1);
|
|
|
list_for_each_safe(this, next, &cpts->events) {
|
|
|
event = list_entry(this, struct cpts_event, list);
|
|
|
if (event_expired(event)) {
|
|
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ if (ev_type == CPTS_EV_TX && !ns) {
|
|
|
+ struct cpts_skb_cb_data *skb_cb =
|
|
|
+ (struct cpts_skb_cb_data *)skb->cb;
|
|
|
+ /* Not found, add frame to queue for processing later.
|
|
|
+ * The periodic FIFO check will handle this.
|
|
|
+ */
|
|
|
+ skb_get(skb);
|
|
|
+ /* get the timestamp for timeouts */
|
|
|
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
|
|
|
+ __skb_queue_tail(&cpts->txq, skb);
|
|
|
+ ptp_schedule_worker(cpts->clock, 0);
|
|
|
+ }
|
|
|
spin_unlock_irqrestore(&cpts->lock, flags);
|
|
|
|
|
|
return ns;
|
|
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
|
|
|
{
|
|
|
int err, i;
|
|
|
|
|
|
+ skb_queue_head_init(&cpts->txq);
|
|
|
INIT_LIST_HEAD(&cpts->events);
|
|
|
INIT_LIST_HEAD(&cpts->pool);
|
|
|
for (i = 0; i < CPTS_MAX_EVENTS; i++)
|
|
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
|
|
|
}
|
|
|
cpts->phc_index = ptp_clock_index(cpts->clock);
|
|
|
|
|
|
- schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
|
|
|
+ ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
|
|
|
return 0;
|
|
|
|
|
|
err_ptp:
|
|
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
|
|
|
if (WARN_ON(!cpts->clock))
|
|
|
return;
|
|
|
|
|
|
- cancel_delayed_work_sync(&cpts->overflow_work);
|
|
|
-
|
|
|
ptp_clock_unregister(cpts->clock);
|
|
|
cpts->clock = NULL;
|
|
|
|
|
|
cpts_write32(cpts, 0, int_enable);
|
|
|
cpts_write32(cpts, 0, control);
|
|
|
|
|
|
+ /* Drop all packet */
|
|
|
+ skb_queue_purge(&cpts->txq);
|
|
|
+
|
|
|
clk_disable(cpts->refclk);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cpts_unregister);
|
|
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
|
|
|
cpts->dev = dev;
|
|
|
cpts->reg = (struct cpsw_cpts __iomem *)regs;
|
|
|
spin_lock_init(&cpts->lock);
|
|
|
- INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
|
|
|
|
|
|
ret = cpts_of_parse(cpts, node);
|
|
|
if (ret)
|