|
@@ -31,9 +31,18 @@
|
|
|
|
|
|
#include "cpts.h"
|
|
|
|
|
|
+#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
|
|
|
+
|
|
|
+struct cpts_skb_cb_data {
|
|
|
+ unsigned long tmo;
|
|
|
+};
|
|
|
+
|
|
|
#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
|
|
|
#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
|
|
|
|
|
|
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
|
|
|
+ u16 ts_seqid, u8 ts_msgtype);
|
|
|
+
|
|
|
static int event_expired(struct cpts_event *event)
|
|
|
{
|
|
|
return time_after(jiffies, event->tmo);
|
|
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
|
|
|
return removed ? 0 : -1;
|
|
|
}
|
|
|
|
|
|
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
|
|
|
+{
|
|
|
+ struct sk_buff *skb, *tmp;
|
|
|
+ u16 seqid;
|
|
|
+ u8 mtype;
|
|
|
+ bool found = false;
|
|
|
+
|
|
|
+ mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
|
|
|
+ seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
|
|
|
+
|
|
|
+ /* no need to grab txq.lock as access is always done under cpts->lock */
|
|
|
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
|
|
|
+ struct skb_shared_hwtstamps ssh;
|
|
|
+ unsigned int class = ptp_classify_raw(skb);
|
|
|
+ struct cpts_skb_cb_data *skb_cb =
|
|
|
+ (struct cpts_skb_cb_data *)skb->cb;
|
|
|
+
|
|
|
+ if (cpts_match(skb, class, seqid, mtype)) {
|
|
|
+ u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
|
|
|
+
|
|
|
+ memset(&ssh, 0, sizeof(ssh));
|
|
|
+ ssh.hwtstamp = ns_to_ktime(ns);
|
|
|
+ skb_tstamp_tx(skb, &ssh);
|
|
|
+ found = true;
|
|
|
+ __skb_unlink(skb, &cpts->txq);
|
|
|
+ dev_consume_skb_any(skb);
|
|
|
+ dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
|
|
|
+ mtype, seqid);
|
|
|
+ } else if (time_after(jiffies, skb_cb->tmo)) {
|
|
|
+ /* timeout any expired skbs over 1s */
|
|
|
+ dev_dbg(cpts->dev,
|
|
|
+ "expiring tx timestamp mtype %u seqid %04x\n",
|
|
|
+ mtype, seqid);
|
|
|
+ __skb_unlink(skb, &cpts->txq);
|
|
|
+ dev_consume_skb_any(skb);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return found;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Returns zero if matching event type was found.
|
|
|
*/
|
|
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
|
|
|
event->low = lo;
|
|
|
type = event_type(event);
|
|
|
switch (type) {
|
|
|
+ case CPTS_EV_TX:
|
|
|
+ if (cpts_match_tx_ts(cpts, event)) {
|
|
|
+ /* if the new event matches an existing skb,
|
|
|
+ * then don't queue it
|
|
|
+ */
|
|
|
+ break;
|
|
|
+ }
|
|
|
case CPTS_EV_PUSH:
|
|
|
case CPTS_EV_RX:
|
|
|
- case CPTS_EV_TX:
|
|
|
list_del_init(&event->list);
|
|
|
list_add_tail(&event->list, &cpts->events);
|
|
|
break;
|
|
@@ -229,8 +285,15 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
|
|
|
struct cpts *cpts = container_of(ptp, struct cpts, info);
|
|
|
unsigned long delay = cpts->ov_check_period;
|
|
|
struct timespec64 ts;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&cpts->lock, flags);
|
|
|
+ ts = ns_to_timespec64(timecounter_read(&cpts->tc));
|
|
|
+
|
|
|
+ if (!skb_queue_empty(&cpts->txq))
|
|
|
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
|
|
|
+ spin_unlock_irqrestore(&cpts->lock, flags);
|
|
|
|
|
|
- cpts_ptp_gettime(&cpts->info, &ts);
|
|
|
pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
|
|
|
return (long)delay;
|
|
|
}
|
|
@@ -319,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ if (ev_type == CPTS_EV_TX && !ns) {
|
|
|
+ struct cpts_skb_cb_data *skb_cb =
|
|
|
+ (struct cpts_skb_cb_data *)skb->cb;
|
|
|
+ /* Not found, add frame to queue for processing later.
|
|
|
+ * The periodic FIFO check will handle this.
|
|
|
+ */
|
|
|
+ skb_get(skb);
|
|
|
+ /* get the timestamp for timeouts */
|
|
|
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
|
|
|
+ __skb_queue_tail(&cpts->txq, skb);
|
|
|
+ ptp_schedule_worker(cpts->clock, 0);
|
|
|
+ }
|
|
|
spin_unlock_irqrestore(&cpts->lock, flags);
|
|
|
|
|
|
return ns;
|
|
@@ -360,6 +436,7 @@ int cpts_register(struct cpts *cpts)
|
|
|
{
|
|
|
int err, i;
|
|
|
|
|
|
+ skb_queue_head_init(&cpts->txq);
|
|
|
INIT_LIST_HEAD(&cpts->events);
|
|
|
INIT_LIST_HEAD(&cpts->pool);
|
|
|
for (i = 0; i < CPTS_MAX_EVENTS; i++)
|
|
@@ -400,6 +477,9 @@ void cpts_unregister(struct cpts *cpts)
|
|
|
cpts_write32(cpts, 0, int_enable);
|
|
|
cpts_write32(cpts, 0, control);
|
|
|
|
|
|
+ /* Drop all packet */
|
|
|
+ skb_queue_purge(&cpts->txq);
|
|
|
+
|
|
|
clk_disable(cpts->refclk);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cpts_unregister);
|