cpts.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. * TI Common Platform Time Sync
  3. *
  4. * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <linux/err.h>
  21. #include <linux/if.h>
  22. #include <linux/hrtimer.h>
  23. #include <linux/module.h>
  24. #include <linux/net_tstamp.h>
  25. #include <linux/ptp_classify.h>
  26. #include <linux/time.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/if_ether.h>
  30. #include <linux/if_vlan.h>
  31. #include "cpts.h"
  32. #ifdef CONFIG_TI_CPTS
  33. #define cpts_read32(c, r) __raw_readl(&c->reg->r)
  34. #define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
  35. static int event_expired(struct cpts_event *event)
  36. {
  37. return time_after(jiffies, event->tmo);
  38. }
  39. static int event_type(struct cpts_event *event)
  40. {
  41. return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
  42. }
  43. static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
  44. {
  45. u32 r = cpts_read32(cpts, intstat_raw);
  46. if (r & TS_PEND_RAW) {
  47. *high = cpts_read32(cpts, event_high);
  48. *low = cpts_read32(cpts, event_low);
  49. cpts_write32(cpts, EVENT_POP, event_pop);
  50. return 0;
  51. }
  52. return -1;
  53. }
  54. /*
  55. * Returns zero if matching event type was found.
  56. */
  57. static int cpts_fifo_read(struct cpts *cpts, int match)
  58. {
  59. int i, type = -1;
  60. u32 hi, lo;
  61. struct cpts_event *event;
  62. for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
  63. if (cpts_fifo_pop(cpts, &hi, &lo))
  64. break;
  65. if (list_empty(&cpts->pool)) {
  66. pr_err("cpts: event pool is empty\n");
  67. return -1;
  68. }
  69. event = list_first_entry(&cpts->pool, struct cpts_event, list);
  70. event->tmo = jiffies + 2;
  71. event->high = hi;
  72. event->low = lo;
  73. type = event_type(event);
  74. switch (type) {
  75. case CPTS_EV_PUSH:
  76. case CPTS_EV_RX:
  77. case CPTS_EV_TX:
  78. list_del_init(&event->list);
  79. list_add_tail(&event->list, &cpts->events);
  80. break;
  81. case CPTS_EV_ROLL:
  82. case CPTS_EV_HALF:
  83. case CPTS_EV_HW:
  84. break;
  85. default:
  86. pr_err("cpts: unknown event type\n");
  87. break;
  88. }
  89. if (type == match)
  90. break;
  91. }
  92. return type == match ? 0 : -1;
  93. }
  94. static cycle_t cpts_systim_read(const struct cyclecounter *cc)
  95. {
  96. u64 val = 0;
  97. struct cpts_event *event;
  98. struct list_head *this, *next;
  99. struct cpts *cpts = container_of(cc, struct cpts, cc);
  100. cpts_write32(cpts, TS_PUSH, ts_push);
  101. if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
  102. pr_err("cpts: unable to obtain a time stamp\n");
  103. list_for_each_safe(this, next, &cpts->events) {
  104. event = list_entry(this, struct cpts_event, list);
  105. if (event_type(event) == CPTS_EV_PUSH) {
  106. list_del_init(&event->list);
  107. list_add(&event->list, &cpts->pool);
  108. val = event->low;
  109. break;
  110. }
  111. }
  112. return val;
  113. }
  114. /* PTP clock operations */
  115. static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
  116. {
  117. u64 adj;
  118. u32 diff, mult;
  119. int neg_adj = 0;
  120. unsigned long flags;
  121. struct cpts *cpts = container_of(ptp, struct cpts, info);
  122. if (ppb < 0) {
  123. neg_adj = 1;
  124. ppb = -ppb;
  125. }
  126. mult = cpts->cc_mult;
  127. adj = mult;
  128. adj *= ppb;
  129. diff = div_u64(adj, 1000000000ULL);
  130. spin_lock_irqsave(&cpts->lock, flags);
  131. timecounter_read(&cpts->tc);
  132. cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
  133. spin_unlock_irqrestore(&cpts->lock, flags);
  134. return 0;
  135. }
  136. static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
  137. {
  138. unsigned long flags;
  139. struct cpts *cpts = container_of(ptp, struct cpts, info);
  140. spin_lock_irqsave(&cpts->lock, flags);
  141. timecounter_adjtime(&cpts->tc, delta);
  142. spin_unlock_irqrestore(&cpts->lock, flags);
  143. return 0;
  144. }
  145. static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
  146. {
  147. u64 ns;
  148. u32 remainder;
  149. unsigned long flags;
  150. struct cpts *cpts = container_of(ptp, struct cpts, info);
  151. spin_lock_irqsave(&cpts->lock, flags);
  152. ns = timecounter_read(&cpts->tc);
  153. spin_unlock_irqrestore(&cpts->lock, flags);
  154. ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
  155. ts->tv_nsec = remainder;
  156. return 0;
  157. }
  158. static int cpts_ptp_settime(struct ptp_clock_info *ptp,
  159. const struct timespec *ts)
  160. {
  161. u64 ns;
  162. unsigned long flags;
  163. struct cpts *cpts = container_of(ptp, struct cpts, info);
  164. ns = ts->tv_sec * 1000000000ULL;
  165. ns += ts->tv_nsec;
  166. spin_lock_irqsave(&cpts->lock, flags);
  167. timecounter_init(&cpts->tc, &cpts->cc, ns);
  168. spin_unlock_irqrestore(&cpts->lock, flags);
  169. return 0;
  170. }
  171. static int cpts_ptp_enable(struct ptp_clock_info *ptp,
  172. struct ptp_clock_request *rq, int on)
  173. {
  174. return -EOPNOTSUPP;
  175. }
  176. static struct ptp_clock_info cpts_info = {
  177. .owner = THIS_MODULE,
  178. .name = "CTPS timer",
  179. .max_adj = 1000000,
  180. .n_ext_ts = 0,
  181. .n_pins = 0,
  182. .pps = 0,
  183. .adjfreq = cpts_ptp_adjfreq,
  184. .adjtime = cpts_ptp_adjtime,
  185. .gettime = cpts_ptp_gettime,
  186. .settime = cpts_ptp_settime,
  187. .enable = cpts_ptp_enable,
  188. };
  189. static void cpts_overflow_check(struct work_struct *work)
  190. {
  191. struct timespec ts;
  192. struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
  193. cpts_write32(cpts, CPTS_EN, control);
  194. cpts_write32(cpts, TS_PEND_EN, int_enable);
  195. cpts_ptp_gettime(&cpts->info, &ts);
  196. pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
  197. schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
  198. }
  199. static void cpts_clk_init(struct device *dev, struct cpts *cpts)
  200. {
  201. cpts->refclk = devm_clk_get(dev, "cpts");
  202. if (IS_ERR(cpts->refclk)) {
  203. dev_err(dev, "Failed to get cpts refclk\n");
  204. cpts->refclk = NULL;
  205. return;
  206. }
  207. clk_prepare_enable(cpts->refclk);
  208. }
  209. static void cpts_clk_release(struct cpts *cpts)
  210. {
  211. clk_disable(cpts->refclk);
  212. }
  213. static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
  214. u16 ts_seqid, u8 ts_msgtype)
  215. {
  216. u16 *seqid;
  217. unsigned int offset = 0;
  218. u8 *msgtype, *data = skb->data;
  219. if (ptp_class & PTP_CLASS_VLAN)
  220. offset += VLAN_HLEN;
  221. switch (ptp_class & PTP_CLASS_PMASK) {
  222. case PTP_CLASS_IPV4:
  223. offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
  224. break;
  225. case PTP_CLASS_IPV6:
  226. offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
  227. break;
  228. case PTP_CLASS_L2:
  229. offset += ETH_HLEN;
  230. break;
  231. default:
  232. return 0;
  233. }
  234. if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
  235. return 0;
  236. if (unlikely(ptp_class & PTP_CLASS_V1))
  237. msgtype = data + offset + OFF_PTP_CONTROL;
  238. else
  239. msgtype = data + offset;
  240. seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
  241. return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
  242. }
  243. static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
  244. {
  245. u64 ns = 0;
  246. struct cpts_event *event;
  247. struct list_head *this, *next;
  248. unsigned int class = ptp_classify_raw(skb);
  249. unsigned long flags;
  250. u16 seqid;
  251. u8 mtype;
  252. if (class == PTP_CLASS_NONE)
  253. return 0;
  254. spin_lock_irqsave(&cpts->lock, flags);
  255. cpts_fifo_read(cpts, CPTS_EV_PUSH);
  256. list_for_each_safe(this, next, &cpts->events) {
  257. event = list_entry(this, struct cpts_event, list);
  258. if (event_expired(event)) {
  259. list_del_init(&event->list);
  260. list_add(&event->list, &cpts->pool);
  261. continue;
  262. }
  263. mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
  264. seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
  265. if (ev_type == event_type(event) &&
  266. cpts_match(skb, class, seqid, mtype)) {
  267. ns = timecounter_cyc2time(&cpts->tc, event->low);
  268. list_del_init(&event->list);
  269. list_add(&event->list, &cpts->pool);
  270. break;
  271. }
  272. }
  273. spin_unlock_irqrestore(&cpts->lock, flags);
  274. return ns;
  275. }
  276. void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
  277. {
  278. u64 ns;
  279. struct skb_shared_hwtstamps *ssh;
  280. if (!cpts->rx_enable)
  281. return;
  282. ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
  283. if (!ns)
  284. return;
  285. ssh = skb_hwtstamps(skb);
  286. memset(ssh, 0, sizeof(*ssh));
  287. ssh->hwtstamp = ns_to_ktime(ns);
  288. }
  289. void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
  290. {
  291. u64 ns;
  292. struct skb_shared_hwtstamps ssh;
  293. if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
  294. return;
  295. ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
  296. if (!ns)
  297. return;
  298. memset(&ssh, 0, sizeof(ssh));
  299. ssh.hwtstamp = ns_to_ktime(ns);
  300. skb_tstamp_tx(skb, &ssh);
  301. }
  302. #endif /*CONFIG_TI_CPTS*/
  303. int cpts_register(struct device *dev, struct cpts *cpts,
  304. u32 mult, u32 shift)
  305. {
  306. #ifdef CONFIG_TI_CPTS
  307. int err, i;
  308. unsigned long flags;
  309. cpts->info = cpts_info;
  310. cpts->clock = ptp_clock_register(&cpts->info, dev);
  311. if (IS_ERR(cpts->clock)) {
  312. err = PTR_ERR(cpts->clock);
  313. cpts->clock = NULL;
  314. return err;
  315. }
  316. spin_lock_init(&cpts->lock);
  317. cpts->cc.read = cpts_systim_read;
  318. cpts->cc.mask = CLOCKSOURCE_MASK(32);
  319. cpts->cc_mult = mult;
  320. cpts->cc.mult = mult;
  321. cpts->cc.shift = shift;
  322. INIT_LIST_HEAD(&cpts->events);
  323. INIT_LIST_HEAD(&cpts->pool);
  324. for (i = 0; i < CPTS_MAX_EVENTS; i++)
  325. list_add(&cpts->pool_data[i].list, &cpts->pool);
  326. cpts_clk_init(dev, cpts);
  327. cpts_write32(cpts, CPTS_EN, control);
  328. cpts_write32(cpts, TS_PEND_EN, int_enable);
  329. spin_lock_irqsave(&cpts->lock, flags);
  330. timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
  331. spin_unlock_irqrestore(&cpts->lock, flags);
  332. INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
  333. schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
  334. cpts->phc_index = ptp_clock_index(cpts->clock);
  335. #endif
  336. return 0;
  337. }
  338. void cpts_unregister(struct cpts *cpts)
  339. {
  340. #ifdef CONFIG_TI_CPTS
  341. if (cpts->clock) {
  342. ptp_clock_unregister(cpts->clock);
  343. cancel_delayed_work_sync(&cpts->overflow_work);
  344. }
  345. if (cpts->refclk)
  346. cpts_clk_release(cpts);
  347. #endif
  348. }