bmc150-accel-core.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758
  1. /*
  2. * 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
  3. * - BMC150
  4. * - BMI055
  5. * - BMA255
  6. * - BMA250E
  7. * - BMA222E
  8. * - BMA280
  9. *
  10. * Copyright (c) 2014, Intel Corporation.
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms and conditions of the GNU General Public License,
  14. * version 2, as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope it will be useful, but WITHOUT
  17. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  18. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  19. * more details.
  20. */
  21. #include <linux/module.h>
  22. #include <linux/i2c.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/delay.h>
  25. #include <linux/slab.h>
  26. #include <linux/acpi.h>
  27. #include <linux/pm.h>
  28. #include <linux/pm_runtime.h>
  29. #include <linux/iio/iio.h>
  30. #include <linux/iio/sysfs.h>
  31. #include <linux/iio/buffer.h>
  32. #include <linux/iio/events.h>
  33. #include <linux/iio/trigger.h>
  34. #include <linux/iio/trigger_consumer.h>
  35. #include <linux/iio/triggered_buffer.h>
  36. #include <linux/regmap.h>
  37. #include "bmc150-accel.h"
  38. #define BMC150_ACCEL_DRV_NAME "bmc150_accel"
  39. #define BMC150_ACCEL_IRQ_NAME "bmc150_accel_event"
  40. #define BMC150_ACCEL_REG_CHIP_ID 0x00
  41. #define BMC150_ACCEL_REG_INT_STATUS_2 0x0B
  42. #define BMC150_ACCEL_ANY_MOTION_MASK 0x07
  43. #define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0)
  44. #define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1)
  45. #define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2)
  46. #define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3)
  47. #define BMC150_ACCEL_REG_PMU_LPW 0x11
  48. #define BMC150_ACCEL_PMU_MODE_MASK 0xE0
  49. #define BMC150_ACCEL_PMU_MODE_SHIFT 5
  50. #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_MASK 0x17
  51. #define BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT 1
  52. #define BMC150_ACCEL_REG_PMU_RANGE 0x0F
  53. #define BMC150_ACCEL_DEF_RANGE_2G 0x03
  54. #define BMC150_ACCEL_DEF_RANGE_4G 0x05
  55. #define BMC150_ACCEL_DEF_RANGE_8G 0x08
  56. #define BMC150_ACCEL_DEF_RANGE_16G 0x0C
  57. /* Default BW: 125Hz */
  58. #define BMC150_ACCEL_REG_PMU_BW 0x10
  59. #define BMC150_ACCEL_DEF_BW 125
  60. #define BMC150_ACCEL_REG_RESET 0x14
  61. #define BMC150_ACCEL_RESET_VAL 0xB6
  62. #define BMC150_ACCEL_REG_INT_MAP_0 0x19
  63. #define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2)
  64. #define BMC150_ACCEL_REG_INT_MAP_1 0x1A
  65. #define BMC150_ACCEL_INT_MAP_1_BIT_DATA BIT(0)
  66. #define BMC150_ACCEL_INT_MAP_1_BIT_FWM BIT(1)
  67. #define BMC150_ACCEL_INT_MAP_1_BIT_FFULL BIT(2)
  68. #define BMC150_ACCEL_REG_INT_RST_LATCH 0x21
  69. #define BMC150_ACCEL_INT_MODE_LATCH_RESET 0x80
  70. #define BMC150_ACCEL_INT_MODE_LATCH_INT 0x0F
  71. #define BMC150_ACCEL_INT_MODE_NON_LATCH_INT 0x00
  72. #define BMC150_ACCEL_REG_INT_EN_0 0x16
  73. #define BMC150_ACCEL_INT_EN_BIT_SLP_X BIT(0)
  74. #define BMC150_ACCEL_INT_EN_BIT_SLP_Y BIT(1)
  75. #define BMC150_ACCEL_INT_EN_BIT_SLP_Z BIT(2)
  76. #define BMC150_ACCEL_REG_INT_EN_1 0x17
  77. #define BMC150_ACCEL_INT_EN_BIT_DATA_EN BIT(4)
  78. #define BMC150_ACCEL_INT_EN_BIT_FFULL_EN BIT(5)
  79. #define BMC150_ACCEL_INT_EN_BIT_FWM_EN BIT(6)
  80. #define BMC150_ACCEL_REG_INT_OUT_CTRL 0x20
  81. #define BMC150_ACCEL_INT_OUT_CTRL_INT1_LVL BIT(0)
  82. #define BMC150_ACCEL_REG_INT_5 0x27
  83. #define BMC150_ACCEL_SLOPE_DUR_MASK 0x03
  84. #define BMC150_ACCEL_REG_INT_6 0x28
  85. #define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF
  86. /* Slope duration in terms of number of samples */
  87. #define BMC150_ACCEL_DEF_SLOPE_DURATION 1
  88. /* in terms of multiples of g's/LSB, based on range */
  89. #define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1
  90. #define BMC150_ACCEL_REG_XOUT_L 0x02
  91. #define BMC150_ACCEL_MAX_STARTUP_TIME_MS 100
  92. /* Sleep Duration values */
  93. #define BMC150_ACCEL_SLEEP_500_MICRO 0x05
  94. #define BMC150_ACCEL_SLEEP_1_MS 0x06
  95. #define BMC150_ACCEL_SLEEP_2_MS 0x07
  96. #define BMC150_ACCEL_SLEEP_4_MS 0x08
  97. #define BMC150_ACCEL_SLEEP_6_MS 0x09
  98. #define BMC150_ACCEL_SLEEP_10_MS 0x0A
  99. #define BMC150_ACCEL_SLEEP_25_MS 0x0B
  100. #define BMC150_ACCEL_SLEEP_50_MS 0x0C
  101. #define BMC150_ACCEL_SLEEP_100_MS 0x0D
  102. #define BMC150_ACCEL_SLEEP_500_MS 0x0E
  103. #define BMC150_ACCEL_SLEEP_1_SEC 0x0F
  104. #define BMC150_ACCEL_REG_TEMP 0x08
  105. #define BMC150_ACCEL_TEMP_CENTER_VAL 24
  106. #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
  107. #define BMC150_AUTO_SUSPEND_DELAY_MS 2000
  108. #define BMC150_ACCEL_REG_FIFO_STATUS 0x0E
  109. #define BMC150_ACCEL_REG_FIFO_CONFIG0 0x30
  110. #define BMC150_ACCEL_REG_FIFO_CONFIG1 0x3E
  111. #define BMC150_ACCEL_REG_FIFO_DATA 0x3F
  112. #define BMC150_ACCEL_FIFO_LENGTH 32
  113. enum bmc150_accel_axis {
  114. AXIS_X,
  115. AXIS_Y,
  116. AXIS_Z,
  117. AXIS_MAX,
  118. };
  119. enum bmc150_power_modes {
  120. BMC150_ACCEL_SLEEP_MODE_NORMAL,
  121. BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND,
  122. BMC150_ACCEL_SLEEP_MODE_LPM,
  123. BMC150_ACCEL_SLEEP_MODE_SUSPEND = 0x04,
  124. };
  125. struct bmc150_scale_info {
  126. int scale;
  127. u8 reg_range;
  128. };
  129. struct bmc150_accel_chip_info {
  130. const char *name;
  131. u8 chip_id;
  132. const struct iio_chan_spec *channels;
  133. int num_channels;
  134. const struct bmc150_scale_info scale_table[4];
  135. };
  136. struct bmc150_accel_interrupt {
  137. const struct bmc150_accel_interrupt_info *info;
  138. atomic_t users;
  139. };
  140. struct bmc150_accel_trigger {
  141. struct bmc150_accel_data *data;
  142. struct iio_trigger *indio_trig;
  143. int (*setup)(struct bmc150_accel_trigger *t, bool state);
  144. int intr;
  145. bool enabled;
  146. };
  147. enum bmc150_accel_interrupt_id {
  148. BMC150_ACCEL_INT_DATA_READY,
  149. BMC150_ACCEL_INT_ANY_MOTION,
  150. BMC150_ACCEL_INT_WATERMARK,
  151. BMC150_ACCEL_INTERRUPTS,
  152. };
  153. enum bmc150_accel_trigger_id {
  154. BMC150_ACCEL_TRIGGER_DATA_READY,
  155. BMC150_ACCEL_TRIGGER_ANY_MOTION,
  156. BMC150_ACCEL_TRIGGERS,
  157. };
  158. struct bmc150_accel_data {
  159. struct regmap *regmap;
  160. int irq;
  161. struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
  162. struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
  163. struct mutex mutex;
  164. u8 fifo_mode, watermark;
  165. s16 buffer[8];
  166. u8 bw_bits;
  167. u32 slope_dur;
  168. u32 slope_thres;
  169. u32 range;
  170. int ev_enable_state;
  171. int64_t timestamp, old_timestamp; /* Only used in hw fifo mode. */
  172. const struct bmc150_accel_chip_info *chip_info;
  173. };
  174. static const struct {
  175. int val;
  176. int val2;
  177. u8 bw_bits;
  178. } bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
  179. {31, 260000, 0x09},
  180. {62, 500000, 0x0A},
  181. {125, 0, 0x0B},
  182. {250, 0, 0x0C},
  183. {500, 0, 0x0D},
  184. {1000, 0, 0x0E},
  185. {2000, 0, 0x0F} };
  186. static const struct {
  187. int bw_bits;
  188. int msec;
  189. } bmc150_accel_sample_upd_time[] = { {0x08, 64},
  190. {0x09, 32},
  191. {0x0A, 16},
  192. {0x0B, 8},
  193. {0x0C, 4},
  194. {0x0D, 2},
  195. {0x0E, 1},
  196. {0x0F, 1} };
  197. static const struct {
  198. int sleep_dur;
  199. u8 reg_value;
  200. } bmc150_accel_sleep_value_table[] = { {0, 0},
  201. {500, BMC150_ACCEL_SLEEP_500_MICRO},
  202. {1000, BMC150_ACCEL_SLEEP_1_MS},
  203. {2000, BMC150_ACCEL_SLEEP_2_MS},
  204. {4000, BMC150_ACCEL_SLEEP_4_MS},
  205. {6000, BMC150_ACCEL_SLEEP_6_MS},
  206. {10000, BMC150_ACCEL_SLEEP_10_MS},
  207. {25000, BMC150_ACCEL_SLEEP_25_MS},
  208. {50000, BMC150_ACCEL_SLEEP_50_MS},
  209. {100000, BMC150_ACCEL_SLEEP_100_MS},
  210. {500000, BMC150_ACCEL_SLEEP_500_MS},
  211. {1000000, BMC150_ACCEL_SLEEP_1_SEC} };
  212. const struct regmap_config bmc150_regmap_conf = {
  213. .reg_bits = 8,
  214. .val_bits = 8,
  215. .max_register = 0x3f,
  216. };
  217. EXPORT_SYMBOL_GPL(bmc150_regmap_conf);
  218. static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
  219. enum bmc150_power_modes mode,
  220. int dur_us)
  221. {
  222. struct device *dev = regmap_get_device(data->regmap);
  223. int i;
  224. int ret;
  225. u8 lpw_bits;
  226. int dur_val = -1;
  227. if (dur_us > 0) {
  228. for (i = 0; i < ARRAY_SIZE(bmc150_accel_sleep_value_table);
  229. ++i) {
  230. if (bmc150_accel_sleep_value_table[i].sleep_dur ==
  231. dur_us)
  232. dur_val =
  233. bmc150_accel_sleep_value_table[i].reg_value;
  234. }
  235. } else {
  236. dur_val = 0;
  237. }
  238. if (dur_val < 0)
  239. return -EINVAL;
  240. lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT;
  241. lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT);
  242. dev_dbg(dev, "Set Mode bits %x\n", lpw_bits);
  243. ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits);
  244. if (ret < 0) {
  245. dev_err(dev, "Error writing reg_pmu_lpw\n");
  246. return ret;
  247. }
  248. return 0;
  249. }
  250. static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val,
  251. int val2)
  252. {
  253. int i;
  254. int ret;
  255. for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
  256. if (bmc150_accel_samp_freq_table[i].val == val &&
  257. bmc150_accel_samp_freq_table[i].val2 == val2) {
  258. ret = regmap_write(data->regmap,
  259. BMC150_ACCEL_REG_PMU_BW,
  260. bmc150_accel_samp_freq_table[i].bw_bits);
  261. if (ret < 0)
  262. return ret;
  263. data->bw_bits =
  264. bmc150_accel_samp_freq_table[i].bw_bits;
  265. return 0;
  266. }
  267. }
  268. return -EINVAL;
  269. }
  270. static int bmc150_accel_update_slope(struct bmc150_accel_data *data)
  271. {
  272. struct device *dev = regmap_get_device(data->regmap);
  273. int ret;
  274. ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6,
  275. data->slope_thres);
  276. if (ret < 0) {
  277. dev_err(dev, "Error writing reg_int_6\n");
  278. return ret;
  279. }
  280. ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5,
  281. BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur);
  282. if (ret < 0) {
  283. dev_err(dev, "Error updating reg_int_5\n");
  284. return ret;
  285. }
  286. dev_dbg(dev, "%x %x\n", data->slope_thres, data->slope_dur);
  287. return ret;
  288. }
  289. static int bmc150_accel_any_motion_setup(struct bmc150_accel_trigger *t,
  290. bool state)
  291. {
  292. if (state)
  293. return bmc150_accel_update_slope(t->data);
  294. return 0;
  295. }
  296. static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val,
  297. int *val2)
  298. {
  299. int i;
  300. for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
  301. if (bmc150_accel_samp_freq_table[i].bw_bits == data->bw_bits) {
  302. *val = bmc150_accel_samp_freq_table[i].val;
  303. *val2 = bmc150_accel_samp_freq_table[i].val2;
  304. return IIO_VAL_INT_PLUS_MICRO;
  305. }
  306. }
  307. return -EINVAL;
  308. }
  309. #ifdef CONFIG_PM
  310. static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data)
  311. {
  312. int i;
  313. for (i = 0; i < ARRAY_SIZE(bmc150_accel_sample_upd_time); ++i) {
  314. if (bmc150_accel_sample_upd_time[i].bw_bits == data->bw_bits)
  315. return bmc150_accel_sample_upd_time[i].msec;
  316. }
  317. return BMC150_ACCEL_MAX_STARTUP_TIME_MS;
  318. }
  319. static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
  320. {
  321. struct device *dev = regmap_get_device(data->regmap);
  322. int ret;
  323. if (on) {
  324. ret = pm_runtime_get_sync(dev);
  325. } else {
  326. pm_runtime_mark_last_busy(dev);
  327. ret = pm_runtime_put_autosuspend(dev);
  328. }
  329. if (ret < 0) {
  330. dev_err(dev,
  331. "Failed: bmc150_accel_set_power_state for %d\n", on);
  332. if (on)
  333. pm_runtime_put_noidle(dev);
  334. return ret;
  335. }
  336. return 0;
  337. }
  338. #else
  339. static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
  340. {
  341. return 0;
  342. }
  343. #endif
  344. static const struct bmc150_accel_interrupt_info {
  345. u8 map_reg;
  346. u8 map_bitmask;
  347. u8 en_reg;
  348. u8 en_bitmask;
  349. } bmc150_accel_interrupts[BMC150_ACCEL_INTERRUPTS] = {
  350. { /* data ready interrupt */
  351. .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
  352. .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_DATA,
  353. .en_reg = BMC150_ACCEL_REG_INT_EN_1,
  354. .en_bitmask = BMC150_ACCEL_INT_EN_BIT_DATA_EN,
  355. },
  356. { /* motion interrupt */
  357. .map_reg = BMC150_ACCEL_REG_INT_MAP_0,
  358. .map_bitmask = BMC150_ACCEL_INT_MAP_0_BIT_SLOPE,
  359. .en_reg = BMC150_ACCEL_REG_INT_EN_0,
  360. .en_bitmask = BMC150_ACCEL_INT_EN_BIT_SLP_X |
  361. BMC150_ACCEL_INT_EN_BIT_SLP_Y |
  362. BMC150_ACCEL_INT_EN_BIT_SLP_Z
  363. },
  364. { /* fifo watermark interrupt */
  365. .map_reg = BMC150_ACCEL_REG_INT_MAP_1,
  366. .map_bitmask = BMC150_ACCEL_INT_MAP_1_BIT_FWM,
  367. .en_reg = BMC150_ACCEL_REG_INT_EN_1,
  368. .en_bitmask = BMC150_ACCEL_INT_EN_BIT_FWM_EN,
  369. },
  370. };
  371. static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev,
  372. struct bmc150_accel_data *data)
  373. {
  374. int i;
  375. for (i = 0; i < BMC150_ACCEL_INTERRUPTS; i++)
  376. data->interrupts[i].info = &bmc150_accel_interrupts[i];
  377. }
  378. static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
  379. bool state)
  380. {
  381. struct device *dev = regmap_get_device(data->regmap);
  382. struct bmc150_accel_interrupt *intr = &data->interrupts[i];
  383. const struct bmc150_accel_interrupt_info *info = intr->info;
  384. int ret;
  385. if (state) {
  386. if (atomic_inc_return(&intr->users) > 1)
  387. return 0;
  388. } else {
  389. if (atomic_dec_return(&intr->users) > 0)
  390. return 0;
  391. }
  392. /*
  393. * We will expect the enable and disable to do operation in reverse
  394. * order. This will happen here anyway, as our resume operation uses
  395. * sync mode runtime pm calls. The suspend operation will be delayed
  396. * by autosuspend delay.
  397. * So the disable operation will still happen in reverse order of
  398. * enable operation. When runtime pm is disabled the mode is always on,
  399. * so sequence doesn't matter.
  400. */
  401. ret = bmc150_accel_set_power_state(data, state);
  402. if (ret < 0)
  403. return ret;
  404. /* map the interrupt to the appropriate pins */
  405. ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask,
  406. (state ? info->map_bitmask : 0));
  407. if (ret < 0) {
  408. dev_err(dev, "Error updating reg_int_map\n");
  409. goto out_fix_power_state;
  410. }
  411. /* enable/disable the interrupt */
  412. ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask,
  413. (state ? info->en_bitmask : 0));
  414. if (ret < 0) {
  415. dev_err(dev, "Error updating reg_int_en\n");
  416. goto out_fix_power_state;
  417. }
  418. return 0;
  419. out_fix_power_state:
  420. bmc150_accel_set_power_state(data, false);
  421. return ret;
  422. }
  423. static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
  424. {
  425. struct device *dev = regmap_get_device(data->regmap);
  426. int ret, i;
  427. for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) {
  428. if (data->chip_info->scale_table[i].scale == val) {
  429. ret = regmap_write(data->regmap,
  430. BMC150_ACCEL_REG_PMU_RANGE,
  431. data->chip_info->scale_table[i].reg_range);
  432. if (ret < 0) {
  433. dev_err(dev, "Error writing pmu_range\n");
  434. return ret;
  435. }
  436. data->range = data->chip_info->scale_table[i].reg_range;
  437. return 0;
  438. }
  439. }
  440. return -EINVAL;
  441. }
  442. static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
  443. {
  444. struct device *dev = regmap_get_device(data->regmap);
  445. int ret;
  446. unsigned int value;
  447. mutex_lock(&data->mutex);
  448. ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value);
  449. if (ret < 0) {
  450. dev_err(dev, "Error reading reg_temp\n");
  451. mutex_unlock(&data->mutex);
  452. return ret;
  453. }
  454. *val = sign_extend32(value, 7);
  455. mutex_unlock(&data->mutex);
  456. return IIO_VAL_INT;
  457. }
  458. static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
  459. struct iio_chan_spec const *chan,
  460. int *val)
  461. {
  462. struct device *dev = regmap_get_device(data->regmap);
  463. int ret;
  464. int axis = chan->scan_index;
  465. __le16 raw_val;
  466. mutex_lock(&data->mutex);
  467. ret = bmc150_accel_set_power_state(data, true);
  468. if (ret < 0) {
  469. mutex_unlock(&data->mutex);
  470. return ret;
  471. }
  472. ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
  473. &raw_val, sizeof(raw_val));
  474. if (ret < 0) {
  475. dev_err(dev, "Error reading axis %d\n", axis);
  476. bmc150_accel_set_power_state(data, false);
  477. mutex_unlock(&data->mutex);
  478. return ret;
  479. }
  480. *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
  481. chan->scan_type.realbits - 1);
  482. ret = bmc150_accel_set_power_state(data, false);
  483. mutex_unlock(&data->mutex);
  484. if (ret < 0)
  485. return ret;
  486. return IIO_VAL_INT;
  487. }
  488. static int bmc150_accel_read_raw(struct iio_dev *indio_dev,
  489. struct iio_chan_spec const *chan,
  490. int *val, int *val2, long mask)
  491. {
  492. struct bmc150_accel_data *data = iio_priv(indio_dev);
  493. int ret;
  494. switch (mask) {
  495. case IIO_CHAN_INFO_RAW:
  496. switch (chan->type) {
  497. case IIO_TEMP:
  498. return bmc150_accel_get_temp(data, val);
  499. case IIO_ACCEL:
  500. if (iio_buffer_enabled(indio_dev))
  501. return -EBUSY;
  502. else
  503. return bmc150_accel_get_axis(data, chan, val);
  504. default:
  505. return -EINVAL;
  506. }
  507. case IIO_CHAN_INFO_OFFSET:
  508. if (chan->type == IIO_TEMP) {
  509. *val = BMC150_ACCEL_TEMP_CENTER_VAL;
  510. return IIO_VAL_INT;
  511. } else {
  512. return -EINVAL;
  513. }
  514. case IIO_CHAN_INFO_SCALE:
  515. *val = 0;
  516. switch (chan->type) {
  517. case IIO_TEMP:
  518. *val2 = 500000;
  519. return IIO_VAL_INT_PLUS_MICRO;
  520. case IIO_ACCEL:
  521. {
  522. int i;
  523. const struct bmc150_scale_info *si;
  524. int st_size = ARRAY_SIZE(data->chip_info->scale_table);
  525. for (i = 0; i < st_size; ++i) {
  526. si = &data->chip_info->scale_table[i];
  527. if (si->reg_range == data->range) {
  528. *val2 = si->scale;
  529. return IIO_VAL_INT_PLUS_MICRO;
  530. }
  531. }
  532. return -EINVAL;
  533. }
  534. default:
  535. return -EINVAL;
  536. }
  537. case IIO_CHAN_INFO_SAMP_FREQ:
  538. mutex_lock(&data->mutex);
  539. ret = bmc150_accel_get_bw(data, val, val2);
  540. mutex_unlock(&data->mutex);
  541. return ret;
  542. default:
  543. return -EINVAL;
  544. }
  545. }
  546. static int bmc150_accel_write_raw(struct iio_dev *indio_dev,
  547. struct iio_chan_spec const *chan,
  548. int val, int val2, long mask)
  549. {
  550. struct bmc150_accel_data *data = iio_priv(indio_dev);
  551. int ret;
  552. switch (mask) {
  553. case IIO_CHAN_INFO_SAMP_FREQ:
  554. mutex_lock(&data->mutex);
  555. ret = bmc150_accel_set_bw(data, val, val2);
  556. mutex_unlock(&data->mutex);
  557. break;
  558. case IIO_CHAN_INFO_SCALE:
  559. if (val)
  560. return -EINVAL;
  561. mutex_lock(&data->mutex);
  562. ret = bmc150_accel_set_scale(data, val2);
  563. mutex_unlock(&data->mutex);
  564. return ret;
  565. default:
  566. ret = -EINVAL;
  567. }
  568. return ret;
  569. }
  570. static int bmc150_accel_read_event(struct iio_dev *indio_dev,
  571. const struct iio_chan_spec *chan,
  572. enum iio_event_type type,
  573. enum iio_event_direction dir,
  574. enum iio_event_info info,
  575. int *val, int *val2)
  576. {
  577. struct bmc150_accel_data *data = iio_priv(indio_dev);
  578. *val2 = 0;
  579. switch (info) {
  580. case IIO_EV_INFO_VALUE:
  581. *val = data->slope_thres;
  582. break;
  583. case IIO_EV_INFO_PERIOD:
  584. *val = data->slope_dur;
  585. break;
  586. default:
  587. return -EINVAL;
  588. }
  589. return IIO_VAL_INT;
  590. }
  591. static int bmc150_accel_write_event(struct iio_dev *indio_dev,
  592. const struct iio_chan_spec *chan,
  593. enum iio_event_type type,
  594. enum iio_event_direction dir,
  595. enum iio_event_info info,
  596. int val, int val2)
  597. {
  598. struct bmc150_accel_data *data = iio_priv(indio_dev);
  599. if (data->ev_enable_state)
  600. return -EBUSY;
  601. switch (info) {
  602. case IIO_EV_INFO_VALUE:
  603. data->slope_thres = val & BMC150_ACCEL_SLOPE_THRES_MASK;
  604. break;
  605. case IIO_EV_INFO_PERIOD:
  606. data->slope_dur = val & BMC150_ACCEL_SLOPE_DUR_MASK;
  607. break;
  608. default:
  609. return -EINVAL;
  610. }
  611. return 0;
  612. }
  613. static int bmc150_accel_read_event_config(struct iio_dev *indio_dev,
  614. const struct iio_chan_spec *chan,
  615. enum iio_event_type type,
  616. enum iio_event_direction dir)
  617. {
  618. struct bmc150_accel_data *data = iio_priv(indio_dev);
  619. return data->ev_enable_state;
  620. }
  621. static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
  622. const struct iio_chan_spec *chan,
  623. enum iio_event_type type,
  624. enum iio_event_direction dir,
  625. int state)
  626. {
  627. struct bmc150_accel_data *data = iio_priv(indio_dev);
  628. int ret;
  629. if (state == data->ev_enable_state)
  630. return 0;
  631. mutex_lock(&data->mutex);
  632. ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_ANY_MOTION,
  633. state);
  634. if (ret < 0) {
  635. mutex_unlock(&data->mutex);
  636. return ret;
  637. }
  638. data->ev_enable_state = state;
  639. mutex_unlock(&data->mutex);
  640. return 0;
  641. }
  642. static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
  643. struct iio_trigger *trig)
  644. {
  645. struct bmc150_accel_data *data = iio_priv(indio_dev);
  646. int i;
  647. for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
  648. if (data->triggers[i].indio_trig == trig)
  649. return 0;
  650. }
  651. return -EINVAL;
  652. }
  653. static ssize_t bmc150_accel_get_fifo_watermark(struct device *dev,
  654. struct device_attribute *attr,
  655. char *buf)
  656. {
  657. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  658. struct bmc150_accel_data *data = iio_priv(indio_dev);
  659. int wm;
  660. mutex_lock(&data->mutex);
  661. wm = data->watermark;
  662. mutex_unlock(&data->mutex);
  663. return sprintf(buf, "%d\n", wm);
  664. }
  665. static ssize_t bmc150_accel_get_fifo_state(struct device *dev,
  666. struct device_attribute *attr,
  667. char *buf)
  668. {
  669. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  670. struct bmc150_accel_data *data = iio_priv(indio_dev);
  671. bool state;
  672. mutex_lock(&data->mutex);
  673. state = data->fifo_mode;
  674. mutex_unlock(&data->mutex);
  675. return sprintf(buf, "%d\n", state);
  676. }
  677. static IIO_CONST_ATTR(hwfifo_watermark_min, "1");
  678. static IIO_CONST_ATTR(hwfifo_watermark_max,
  679. __stringify(BMC150_ACCEL_FIFO_LENGTH));
  680. static IIO_DEVICE_ATTR(hwfifo_enabled, S_IRUGO,
  681. bmc150_accel_get_fifo_state, NULL, 0);
  682. static IIO_DEVICE_ATTR(hwfifo_watermark, S_IRUGO,
  683. bmc150_accel_get_fifo_watermark, NULL, 0);
  684. static const struct attribute *bmc150_accel_fifo_attributes[] = {
  685. &iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
  686. &iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
  687. &iio_dev_attr_hwfifo_watermark.dev_attr.attr,
  688. &iio_dev_attr_hwfifo_enabled.dev_attr.attr,
  689. NULL,
  690. };
  691. static int bmc150_accel_set_watermark(struct iio_dev *indio_dev, unsigned val)
  692. {
  693. struct bmc150_accel_data *data = iio_priv(indio_dev);
  694. if (val > BMC150_ACCEL_FIFO_LENGTH)
  695. val = BMC150_ACCEL_FIFO_LENGTH;
  696. mutex_lock(&data->mutex);
  697. data->watermark = val;
  698. mutex_unlock(&data->mutex);
  699. return 0;
  700. }
  701. /*
  702. * We must read at least one full frame in one burst, otherwise the rest of the
  703. * frame data is discarded.
  704. */
  705. static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
  706. char *buffer, int samples)
  707. {
  708. struct device *dev = regmap_get_device(data->regmap);
  709. int sample_length = 3 * 2;
  710. int ret;
  711. int total_length = samples * sample_length;
  712. int i;
  713. size_t step = regmap_get_raw_read_max(data->regmap);
  714. if (!step || step > total_length)
  715. step = total_length;
  716. else if (step < total_length)
  717. step = sample_length;
  718. /*
  719. * Seems we have a bus with size limitation so we have to execute
  720. * multiple reads
  721. */
  722. for (i = 0; i < total_length; i += step) {
  723. ret = regmap_raw_read(data->regmap, BMC150_ACCEL_REG_FIFO_DATA,
  724. &buffer[i], step);
  725. if (ret)
  726. break;
  727. }
  728. if (ret)
  729. dev_err(dev,
  730. "Error transferring data from fifo in single steps of %zu\n",
  731. step);
  732. return ret;
  733. }
  734. static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
  735. unsigned samples, bool irq)
  736. {
  737. struct bmc150_accel_data *data = iio_priv(indio_dev);
  738. struct device *dev = regmap_get_device(data->regmap);
  739. int ret, i;
  740. u8 count;
  741. u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3];
  742. int64_t tstamp;
  743. uint64_t sample_period;
  744. unsigned int val;
  745. ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val);
  746. if (ret < 0) {
  747. dev_err(dev, "Error reading reg_fifo_status\n");
  748. return ret;
  749. }
  750. count = val & 0x7F;
  751. if (!count)
  752. return 0;
  753. /*
  754. * If we getting called from IRQ handler we know the stored timestamp is
  755. * fairly accurate for the last stored sample. Otherwise, if we are
  756. * called as a result of a read operation from userspace and hence
  757. * before the watermark interrupt was triggered, take a timestamp
  758. * now. We can fall anywhere in between two samples so the error in this
  759. * case is at most one sample period.
  760. */
  761. if (!irq) {
  762. data->old_timestamp = data->timestamp;
  763. data->timestamp = iio_get_time_ns(indio_dev);
  764. }
  765. /*
  766. * Approximate timestamps for each of the sample based on the sampling
  767. * frequency, timestamp for last sample and number of samples.
  768. *
  769. * Note that we can't use the current bandwidth settings to compute the
  770. * sample period because the sample rate varies with the device
  771. * (e.g. between 31.70ms to 32.20ms for a bandwidth of 15.63HZ). That
  772. * small variation adds when we store a large number of samples and
  773. * creates significant jitter between the last and first samples in
  774. * different batches (e.g. 32ms vs 21ms).
  775. *
  776. * To avoid this issue we compute the actual sample period ourselves
  777. * based on the timestamp delta between the last two flush operations.
  778. */
  779. sample_period = (data->timestamp - data->old_timestamp);
  780. do_div(sample_period, count);
  781. tstamp = data->timestamp - (count - 1) * sample_period;
  782. if (samples && count > samples)
  783. count = samples;
  784. ret = bmc150_accel_fifo_transfer(data, (u8 *)buffer, count);
  785. if (ret)
  786. return ret;
  787. /*
  788. * Ideally we want the IIO core to handle the demux when running in fifo
  789. * mode but not when running in triggered buffer mode. Unfortunately
  790. * this does not seem to be possible, so stick with driver demux for
  791. * now.
  792. */
  793. for (i = 0; i < count; i++) {
  794. u16 sample[8];
  795. int j, bit;
  796. j = 0;
  797. for_each_set_bit(bit, indio_dev->active_scan_mask,
  798. indio_dev->masklength)
  799. memcpy(&sample[j++], &buffer[i * 3 + bit], 2);
  800. iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp);
  801. tstamp += sample_period;
  802. }
  803. return count;
  804. }
  805. static int bmc150_accel_fifo_flush(struct iio_dev *indio_dev, unsigned samples)
  806. {
  807. struct bmc150_accel_data *data = iio_priv(indio_dev);
  808. int ret;
  809. mutex_lock(&data->mutex);
  810. ret = __bmc150_accel_fifo_flush(indio_dev, samples, false);
  811. mutex_unlock(&data->mutex);
  812. return ret;
  813. }
  814. static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
  815. "15.620000 31.260000 62.50000 125 250 500 1000 2000");
  816. static struct attribute *bmc150_accel_attributes[] = {
  817. &iio_const_attr_sampling_frequency_available.dev_attr.attr,
  818. NULL,
  819. };
  820. static const struct attribute_group bmc150_accel_attrs_group = {
  821. .attrs = bmc150_accel_attributes,
  822. };
  823. static const struct iio_event_spec bmc150_accel_event = {
  824. .type = IIO_EV_TYPE_ROC,
  825. .dir = IIO_EV_DIR_EITHER,
  826. .mask_separate = BIT(IIO_EV_INFO_VALUE) |
  827. BIT(IIO_EV_INFO_ENABLE) |
  828. BIT(IIO_EV_INFO_PERIOD)
  829. };
  830. #define BMC150_ACCEL_CHANNEL(_axis, bits) { \
  831. .type = IIO_ACCEL, \
  832. .modified = 1, \
  833. .channel2 = IIO_MOD_##_axis, \
  834. .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
  835. .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
  836. BIT(IIO_CHAN_INFO_SAMP_FREQ), \
  837. .scan_index = AXIS_##_axis, \
  838. .scan_type = { \
  839. .sign = 's', \
  840. .realbits = (bits), \
  841. .storagebits = 16, \
  842. .shift = 16 - (bits), \
  843. .endianness = IIO_LE, \
  844. }, \
  845. .event_spec = &bmc150_accel_event, \
  846. .num_event_specs = 1 \
  847. }
  848. #define BMC150_ACCEL_CHANNELS(bits) { \
  849. { \
  850. .type = IIO_TEMP, \
  851. .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
  852. BIT(IIO_CHAN_INFO_SCALE) | \
  853. BIT(IIO_CHAN_INFO_OFFSET), \
  854. .scan_index = -1, \
  855. }, \
  856. BMC150_ACCEL_CHANNEL(X, bits), \
  857. BMC150_ACCEL_CHANNEL(Y, bits), \
  858. BMC150_ACCEL_CHANNEL(Z, bits), \
  859. IIO_CHAN_SOFT_TIMESTAMP(3), \
  860. }
  861. static const struct iio_chan_spec bma222e_accel_channels[] =
  862. BMC150_ACCEL_CHANNELS(8);
  863. static const struct iio_chan_spec bma250e_accel_channels[] =
  864. BMC150_ACCEL_CHANNELS(10);
  865. static const struct iio_chan_spec bmc150_accel_channels[] =
  866. BMC150_ACCEL_CHANNELS(12);
  867. static const struct iio_chan_spec bma280_accel_channels[] =
  868. BMC150_ACCEL_CHANNELS(14);
  869. static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
  870. [bmc150] = {
  871. .name = "BMC150A",
  872. .chip_id = 0xFA,
  873. .channels = bmc150_accel_channels,
  874. .num_channels = ARRAY_SIZE(bmc150_accel_channels),
  875. .scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
  876. {19122, BMC150_ACCEL_DEF_RANGE_4G},
  877. {38344, BMC150_ACCEL_DEF_RANGE_8G},
  878. {76590, BMC150_ACCEL_DEF_RANGE_16G} },
  879. },
  880. [bmi055] = {
  881. .name = "BMI055A",
  882. .chip_id = 0xFA,
  883. .channels = bmc150_accel_channels,
  884. .num_channels = ARRAY_SIZE(bmc150_accel_channels),
  885. .scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
  886. {19122, BMC150_ACCEL_DEF_RANGE_4G},
  887. {38344, BMC150_ACCEL_DEF_RANGE_8G},
  888. {76590, BMC150_ACCEL_DEF_RANGE_16G} },
  889. },
  890. [bma255] = {
  891. .name = "BMA0255",
  892. .chip_id = 0xFA,
  893. .channels = bmc150_accel_channels,
  894. .num_channels = ARRAY_SIZE(bmc150_accel_channels),
  895. .scale_table = { {9610, BMC150_ACCEL_DEF_RANGE_2G},
  896. {19122, BMC150_ACCEL_DEF_RANGE_4G},
  897. {38344, BMC150_ACCEL_DEF_RANGE_8G},
  898. {76590, BMC150_ACCEL_DEF_RANGE_16G} },
  899. },
  900. [bma250e] = {
  901. .name = "BMA250E",
  902. .chip_id = 0xF9,
  903. .channels = bma250e_accel_channels,
  904. .num_channels = ARRAY_SIZE(bma250e_accel_channels),
  905. .scale_table = { {38344, BMC150_ACCEL_DEF_RANGE_2G},
  906. {76590, BMC150_ACCEL_DEF_RANGE_4G},
  907. {153277, BMC150_ACCEL_DEF_RANGE_8G},
  908. {306457, BMC150_ACCEL_DEF_RANGE_16G} },
  909. },
  910. [bma222e] = {
  911. .name = "BMA222E",
  912. .chip_id = 0xF8,
  913. .channels = bma222e_accel_channels,
  914. .num_channels = ARRAY_SIZE(bma222e_accel_channels),
  915. .scale_table = { {153277, BMC150_ACCEL_DEF_RANGE_2G},
  916. {306457, BMC150_ACCEL_DEF_RANGE_4G},
  917. {612915, BMC150_ACCEL_DEF_RANGE_8G},
  918. {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
  919. },
  920. [bma280] = {
  921. .name = "BMA0280",
  922. .chip_id = 0xFB,
  923. .channels = bma280_accel_channels,
  924. .num_channels = ARRAY_SIZE(bma280_accel_channels),
  925. .scale_table = { {2392, BMC150_ACCEL_DEF_RANGE_2G},
  926. {4785, BMC150_ACCEL_DEF_RANGE_4G},
  927. {9581, BMC150_ACCEL_DEF_RANGE_8G},
  928. {19152, BMC150_ACCEL_DEF_RANGE_16G} },
  929. },
  930. };
  931. static const struct iio_info bmc150_accel_info = {
  932. .attrs = &bmc150_accel_attrs_group,
  933. .read_raw = bmc150_accel_read_raw,
  934. .write_raw = bmc150_accel_write_raw,
  935. .read_event_value = bmc150_accel_read_event,
  936. .write_event_value = bmc150_accel_write_event,
  937. .write_event_config = bmc150_accel_write_event_config,
  938. .read_event_config = bmc150_accel_read_event_config,
  939. };
  940. static const struct iio_info bmc150_accel_info_fifo = {
  941. .attrs = &bmc150_accel_attrs_group,
  942. .read_raw = bmc150_accel_read_raw,
  943. .write_raw = bmc150_accel_write_raw,
  944. .read_event_value = bmc150_accel_read_event,
  945. .write_event_value = bmc150_accel_write_event,
  946. .write_event_config = bmc150_accel_write_event_config,
  947. .read_event_config = bmc150_accel_read_event_config,
  948. .validate_trigger = bmc150_accel_validate_trigger,
  949. .hwfifo_set_watermark = bmc150_accel_set_watermark,
  950. .hwfifo_flush_to_buffer = bmc150_accel_fifo_flush,
  951. };
  952. static const unsigned long bmc150_accel_scan_masks[] = {
  953. BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
  954. 0};
  955. static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
  956. {
  957. struct iio_poll_func *pf = p;
  958. struct iio_dev *indio_dev = pf->indio_dev;
  959. struct bmc150_accel_data *data = iio_priv(indio_dev);
  960. int ret;
  961. mutex_lock(&data->mutex);
  962. ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_REG_XOUT_L,
  963. data->buffer, AXIS_MAX * 2);
  964. mutex_unlock(&data->mutex);
  965. if (ret < 0)
  966. goto err_read;
  967. iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
  968. pf->timestamp);
  969. err_read:
  970. iio_trigger_notify_done(indio_dev->trig);
  971. return IRQ_HANDLED;
  972. }
  973. static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
  974. {
  975. struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
  976. struct bmc150_accel_data *data = t->data;
  977. struct device *dev = regmap_get_device(data->regmap);
  978. int ret;
  979. /* new data interrupts don't need ack */
  980. if (t == &t->data->triggers[BMC150_ACCEL_TRIGGER_DATA_READY])
  981. return 0;
  982. mutex_lock(&data->mutex);
  983. /* clear any latched interrupt */
  984. ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
  985. BMC150_ACCEL_INT_MODE_LATCH_INT |
  986. BMC150_ACCEL_INT_MODE_LATCH_RESET);
  987. mutex_unlock(&data->mutex);
  988. if (ret < 0) {
  989. dev_err(dev, "Error writing reg_int_rst_latch\n");
  990. return ret;
  991. }
  992. return 0;
  993. }
  994. static int bmc150_accel_trigger_set_state(struct iio_trigger *trig,
  995. bool state)
  996. {
  997. struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
  998. struct bmc150_accel_data *data = t->data;
  999. int ret;
  1000. mutex_lock(&data->mutex);
  1001. if (t->enabled == state) {
  1002. mutex_unlock(&data->mutex);
  1003. return 0;
  1004. }
  1005. if (t->setup) {
  1006. ret = t->setup(t, state);
  1007. if (ret < 0) {
  1008. mutex_unlock(&data->mutex);
  1009. return ret;
  1010. }
  1011. }
  1012. ret = bmc150_accel_set_interrupt(data, t->intr, state);
  1013. if (ret < 0) {
  1014. mutex_unlock(&data->mutex);
  1015. return ret;
  1016. }
  1017. t->enabled = state;
  1018. mutex_unlock(&data->mutex);
  1019. return ret;
  1020. }
  1021. static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
  1022. .set_trigger_state = bmc150_accel_trigger_set_state,
  1023. .try_reenable = bmc150_accel_trig_try_reen,
  1024. };
  1025. static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
  1026. {
  1027. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1028. struct device *dev = regmap_get_device(data->regmap);
  1029. int dir;
  1030. int ret;
  1031. unsigned int val;
  1032. ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val);
  1033. if (ret < 0) {
  1034. dev_err(dev, "Error reading reg_int_status_2\n");
  1035. return ret;
  1036. }
  1037. if (val & BMC150_ACCEL_ANY_MOTION_BIT_SIGN)
  1038. dir = IIO_EV_DIR_FALLING;
  1039. else
  1040. dir = IIO_EV_DIR_RISING;
  1041. if (val & BMC150_ACCEL_ANY_MOTION_BIT_X)
  1042. iio_push_event(indio_dev,
  1043. IIO_MOD_EVENT_CODE(IIO_ACCEL,
  1044. 0,
  1045. IIO_MOD_X,
  1046. IIO_EV_TYPE_ROC,
  1047. dir),
  1048. data->timestamp);
  1049. if (val & BMC150_ACCEL_ANY_MOTION_BIT_Y)
  1050. iio_push_event(indio_dev,
  1051. IIO_MOD_EVENT_CODE(IIO_ACCEL,
  1052. 0,
  1053. IIO_MOD_Y,
  1054. IIO_EV_TYPE_ROC,
  1055. dir),
  1056. data->timestamp);
  1057. if (val & BMC150_ACCEL_ANY_MOTION_BIT_Z)
  1058. iio_push_event(indio_dev,
  1059. IIO_MOD_EVENT_CODE(IIO_ACCEL,
  1060. 0,
  1061. IIO_MOD_Z,
  1062. IIO_EV_TYPE_ROC,
  1063. dir),
  1064. data->timestamp);
  1065. return ret;
  1066. }
  1067. static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
  1068. {
  1069. struct iio_dev *indio_dev = private;
  1070. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1071. struct device *dev = regmap_get_device(data->regmap);
  1072. bool ack = false;
  1073. int ret;
  1074. mutex_lock(&data->mutex);
  1075. if (data->fifo_mode) {
  1076. ret = __bmc150_accel_fifo_flush(indio_dev,
  1077. BMC150_ACCEL_FIFO_LENGTH, true);
  1078. if (ret > 0)
  1079. ack = true;
  1080. }
  1081. if (data->ev_enable_state) {
  1082. ret = bmc150_accel_handle_roc_event(indio_dev);
  1083. if (ret > 0)
  1084. ack = true;
  1085. }
  1086. if (ack) {
  1087. ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
  1088. BMC150_ACCEL_INT_MODE_LATCH_INT |
  1089. BMC150_ACCEL_INT_MODE_LATCH_RESET);
  1090. if (ret)
  1091. dev_err(dev, "Error writing reg_int_rst_latch\n");
  1092. ret = IRQ_HANDLED;
  1093. } else {
  1094. ret = IRQ_NONE;
  1095. }
  1096. mutex_unlock(&data->mutex);
  1097. return ret;
  1098. }
  1099. static irqreturn_t bmc150_accel_irq_handler(int irq, void *private)
  1100. {
  1101. struct iio_dev *indio_dev = private;
  1102. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1103. bool ack = false;
  1104. int i;
  1105. data->old_timestamp = data->timestamp;
  1106. data->timestamp = iio_get_time_ns(indio_dev);
  1107. for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
  1108. if (data->triggers[i].enabled) {
  1109. iio_trigger_poll(data->triggers[i].indio_trig);
  1110. ack = true;
  1111. break;
  1112. }
  1113. }
  1114. if (data->ev_enable_state || data->fifo_mode)
  1115. return IRQ_WAKE_THREAD;
  1116. if (ack)
  1117. return IRQ_HANDLED;
  1118. return IRQ_NONE;
  1119. }
  1120. static const struct {
  1121. int intr;
  1122. const char *name;
  1123. int (*setup)(struct bmc150_accel_trigger *t, bool state);
  1124. } bmc150_accel_triggers[BMC150_ACCEL_TRIGGERS] = {
  1125. {
  1126. .intr = 0,
  1127. .name = "%s-dev%d",
  1128. },
  1129. {
  1130. .intr = 1,
  1131. .name = "%s-any-motion-dev%d",
  1132. .setup = bmc150_accel_any_motion_setup,
  1133. },
  1134. };
  1135. static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
  1136. int from)
  1137. {
  1138. int i;
  1139. for (i = from; i >= 0; i--) {
  1140. if (data->triggers[i].indio_trig) {
  1141. iio_trigger_unregister(data->triggers[i].indio_trig);
  1142. data->triggers[i].indio_trig = NULL;
  1143. }
  1144. }
  1145. }
  1146. static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
  1147. struct bmc150_accel_data *data)
  1148. {
  1149. struct device *dev = regmap_get_device(data->regmap);
  1150. int i, ret;
  1151. for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
  1152. struct bmc150_accel_trigger *t = &data->triggers[i];
  1153. t->indio_trig = devm_iio_trigger_alloc(dev,
  1154. bmc150_accel_triggers[i].name,
  1155. indio_dev->name,
  1156. indio_dev->id);
  1157. if (!t->indio_trig) {
  1158. ret = -ENOMEM;
  1159. break;
  1160. }
  1161. t->indio_trig->dev.parent = dev;
  1162. t->indio_trig->ops = &bmc150_accel_trigger_ops;
  1163. t->intr = bmc150_accel_triggers[i].intr;
  1164. t->data = data;
  1165. t->setup = bmc150_accel_triggers[i].setup;
  1166. iio_trigger_set_drvdata(t->indio_trig, t);
  1167. ret = iio_trigger_register(t->indio_trig);
  1168. if (ret)
  1169. break;
  1170. }
  1171. if (ret)
  1172. bmc150_accel_unregister_triggers(data, i - 1);
  1173. return ret;
  1174. }
  1175. #define BMC150_ACCEL_FIFO_MODE_STREAM 0x80
  1176. #define BMC150_ACCEL_FIFO_MODE_FIFO 0x40
  1177. #define BMC150_ACCEL_FIFO_MODE_BYPASS 0x00
  1178. static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
  1179. {
  1180. struct device *dev = regmap_get_device(data->regmap);
  1181. u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1;
  1182. int ret;
  1183. ret = regmap_write(data->regmap, reg, data->fifo_mode);
  1184. if (ret < 0) {
  1185. dev_err(dev, "Error writing reg_fifo_config1\n");
  1186. return ret;
  1187. }
  1188. if (!data->fifo_mode)
  1189. return 0;
  1190. ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0,
  1191. data->watermark);
  1192. if (ret < 0)
  1193. dev_err(dev, "Error writing reg_fifo_config0\n");
  1194. return ret;
  1195. }
  1196. static int bmc150_accel_buffer_preenable(struct iio_dev *indio_dev)
  1197. {
  1198. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1199. return bmc150_accel_set_power_state(data, true);
  1200. }
  1201. static int bmc150_accel_buffer_postenable(struct iio_dev *indio_dev)
  1202. {
  1203. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1204. int ret = 0;
  1205. if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
  1206. return iio_triggered_buffer_postenable(indio_dev);
  1207. mutex_lock(&data->mutex);
  1208. if (!data->watermark)
  1209. goto out;
  1210. ret = bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK,
  1211. true);
  1212. if (ret)
  1213. goto out;
  1214. data->fifo_mode = BMC150_ACCEL_FIFO_MODE_FIFO;
  1215. ret = bmc150_accel_fifo_set_mode(data);
  1216. if (ret) {
  1217. data->fifo_mode = 0;
  1218. bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK,
  1219. false);
  1220. }
  1221. out:
  1222. mutex_unlock(&data->mutex);
  1223. return ret;
  1224. }
  1225. static int bmc150_accel_buffer_predisable(struct iio_dev *indio_dev)
  1226. {
  1227. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1228. if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
  1229. return iio_triggered_buffer_predisable(indio_dev);
  1230. mutex_lock(&data->mutex);
  1231. if (!data->fifo_mode)
  1232. goto out;
  1233. bmc150_accel_set_interrupt(data, BMC150_ACCEL_INT_WATERMARK, false);
  1234. __bmc150_accel_fifo_flush(indio_dev, BMC150_ACCEL_FIFO_LENGTH, false);
  1235. data->fifo_mode = 0;
  1236. bmc150_accel_fifo_set_mode(data);
  1237. out:
  1238. mutex_unlock(&data->mutex);
  1239. return 0;
  1240. }
  1241. static int bmc150_accel_buffer_postdisable(struct iio_dev *indio_dev)
  1242. {
  1243. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1244. return bmc150_accel_set_power_state(data, false);
  1245. }
  1246. static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = {
  1247. .preenable = bmc150_accel_buffer_preenable,
  1248. .postenable = bmc150_accel_buffer_postenable,
  1249. .predisable = bmc150_accel_buffer_predisable,
  1250. .postdisable = bmc150_accel_buffer_postdisable,
  1251. };
  1252. static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
  1253. {
  1254. struct device *dev = regmap_get_device(data->regmap);
  1255. int ret, i;
  1256. unsigned int val;
  1257. /*
  1258. * Reset chip to get it in a known good state. A delay of 1.8ms after
  1259. * reset is required according to the data sheets of supported chips.
  1260. */
  1261. regmap_write(data->regmap, BMC150_ACCEL_REG_RESET,
  1262. BMC150_ACCEL_RESET_VAL);
  1263. usleep_range(1800, 2500);
  1264. ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
  1265. if (ret < 0) {
  1266. dev_err(dev, "Error: Reading chip id\n");
  1267. return ret;
  1268. }
  1269. dev_dbg(dev, "Chip Id %x\n", val);
  1270. for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) {
  1271. if (bmc150_accel_chip_info_tbl[i].chip_id == val) {
  1272. data->chip_info = &bmc150_accel_chip_info_tbl[i];
  1273. break;
  1274. }
  1275. }
  1276. if (!data->chip_info) {
  1277. dev_err(dev, "Invalid chip %x\n", val);
  1278. return -ENODEV;
  1279. }
  1280. ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
  1281. if (ret < 0)
  1282. return ret;
  1283. /* Set Bandwidth */
  1284. ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0);
  1285. if (ret < 0)
  1286. return ret;
  1287. /* Set Default Range */
  1288. ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE,
  1289. BMC150_ACCEL_DEF_RANGE_4G);
  1290. if (ret < 0) {
  1291. dev_err(dev, "Error writing reg_pmu_range\n");
  1292. return ret;
  1293. }
  1294. data->range = BMC150_ACCEL_DEF_RANGE_4G;
  1295. /* Set default slope duration and thresholds */
  1296. data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD;
  1297. data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION;
  1298. ret = bmc150_accel_update_slope(data);
  1299. if (ret < 0)
  1300. return ret;
  1301. /* Set default as latched interrupts */
  1302. ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
  1303. BMC150_ACCEL_INT_MODE_LATCH_INT |
  1304. BMC150_ACCEL_INT_MODE_LATCH_RESET);
  1305. if (ret < 0) {
  1306. dev_err(dev, "Error writing reg_int_rst_latch\n");
  1307. return ret;
  1308. }
  1309. return 0;
  1310. }
  1311. int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
  1312. const char *name, bool block_supported)
  1313. {
  1314. struct bmc150_accel_data *data;
  1315. struct iio_dev *indio_dev;
  1316. int ret;
  1317. indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
  1318. if (!indio_dev)
  1319. return -ENOMEM;
  1320. data = iio_priv(indio_dev);
  1321. dev_set_drvdata(dev, indio_dev);
  1322. data->irq = irq;
  1323. data->regmap = regmap;
  1324. ret = bmc150_accel_chip_init(data);
  1325. if (ret < 0)
  1326. return ret;
  1327. mutex_init(&data->mutex);
  1328. indio_dev->dev.parent = dev;
  1329. indio_dev->channels = data->chip_info->channels;
  1330. indio_dev->num_channels = data->chip_info->num_channels;
  1331. indio_dev->name = name ? name : data->chip_info->name;
  1332. indio_dev->available_scan_masks = bmc150_accel_scan_masks;
  1333. indio_dev->modes = INDIO_DIRECT_MODE;
  1334. indio_dev->info = &bmc150_accel_info;
  1335. ret = iio_triggered_buffer_setup(indio_dev,
  1336. &iio_pollfunc_store_time,
  1337. bmc150_accel_trigger_handler,
  1338. &bmc150_accel_buffer_ops);
  1339. if (ret < 0) {
  1340. dev_err(dev, "Failed: iio triggered buffer setup\n");
  1341. return ret;
  1342. }
  1343. if (data->irq > 0) {
  1344. ret = devm_request_threaded_irq(
  1345. dev, data->irq,
  1346. bmc150_accel_irq_handler,
  1347. bmc150_accel_irq_thread_handler,
  1348. IRQF_TRIGGER_RISING,
  1349. BMC150_ACCEL_IRQ_NAME,
  1350. indio_dev);
  1351. if (ret)
  1352. goto err_buffer_cleanup;
  1353. /*
  1354. * Set latched mode interrupt. While certain interrupts are
  1355. * non-latched regardless of this settings (e.g. new data) we
  1356. * want to use latch mode when we can to prevent interrupt
  1357. * flooding.
  1358. */
  1359. ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
  1360. BMC150_ACCEL_INT_MODE_LATCH_RESET);
  1361. if (ret < 0) {
  1362. dev_err(dev, "Error writing reg_int_rst_latch\n");
  1363. goto err_buffer_cleanup;
  1364. }
  1365. bmc150_accel_interrupts_setup(indio_dev, data);
  1366. ret = bmc150_accel_triggers_setup(indio_dev, data);
  1367. if (ret)
  1368. goto err_buffer_cleanup;
  1369. if (block_supported) {
  1370. indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
  1371. indio_dev->info = &bmc150_accel_info_fifo;
  1372. iio_buffer_set_attrs(indio_dev->buffer,
  1373. bmc150_accel_fifo_attributes);
  1374. }
  1375. }
  1376. ret = pm_runtime_set_active(dev);
  1377. if (ret)
  1378. goto err_trigger_unregister;
  1379. pm_runtime_enable(dev);
  1380. pm_runtime_set_autosuspend_delay(dev, BMC150_AUTO_SUSPEND_DELAY_MS);
  1381. pm_runtime_use_autosuspend(dev);
  1382. ret = iio_device_register(indio_dev);
  1383. if (ret < 0) {
  1384. dev_err(dev, "Unable to register iio device\n");
  1385. goto err_trigger_unregister;
  1386. }
  1387. return 0;
  1388. err_trigger_unregister:
  1389. bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
  1390. err_buffer_cleanup:
  1391. iio_triggered_buffer_cleanup(indio_dev);
  1392. return ret;
  1393. }
  1394. EXPORT_SYMBOL_GPL(bmc150_accel_core_probe);
  1395. int bmc150_accel_core_remove(struct device *dev)
  1396. {
  1397. struct iio_dev *indio_dev = dev_get_drvdata(dev);
  1398. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1399. iio_device_unregister(indio_dev);
  1400. pm_runtime_disable(dev);
  1401. pm_runtime_set_suspended(dev);
  1402. pm_runtime_put_noidle(dev);
  1403. bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
  1404. iio_triggered_buffer_cleanup(indio_dev);
  1405. mutex_lock(&data->mutex);
  1406. bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_DEEP_SUSPEND, 0);
  1407. mutex_unlock(&data->mutex);
  1408. return 0;
  1409. }
  1410. EXPORT_SYMBOL_GPL(bmc150_accel_core_remove);
  1411. #ifdef CONFIG_PM_SLEEP
  1412. static int bmc150_accel_suspend(struct device *dev)
  1413. {
  1414. struct iio_dev *indio_dev = dev_get_drvdata(dev);
  1415. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1416. mutex_lock(&data->mutex);
  1417. bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
  1418. mutex_unlock(&data->mutex);
  1419. return 0;
  1420. }
  1421. static int bmc150_accel_resume(struct device *dev)
  1422. {
  1423. struct iio_dev *indio_dev = dev_get_drvdata(dev);
  1424. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1425. mutex_lock(&data->mutex);
  1426. bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
  1427. bmc150_accel_fifo_set_mode(data);
  1428. mutex_unlock(&data->mutex);
  1429. return 0;
  1430. }
  1431. #endif
  1432. #ifdef CONFIG_PM
  1433. static int bmc150_accel_runtime_suspend(struct device *dev)
  1434. {
  1435. struct iio_dev *indio_dev = dev_get_drvdata(dev);
  1436. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1437. int ret;
  1438. ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
  1439. if (ret < 0)
  1440. return -EAGAIN;
  1441. return 0;
  1442. }
  1443. static int bmc150_accel_runtime_resume(struct device *dev)
  1444. {
  1445. struct iio_dev *indio_dev = dev_get_drvdata(dev);
  1446. struct bmc150_accel_data *data = iio_priv(indio_dev);
  1447. int ret;
  1448. int sleep_val;
  1449. ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
  1450. if (ret < 0)
  1451. return ret;
  1452. ret = bmc150_accel_fifo_set_mode(data);
  1453. if (ret < 0)
  1454. return ret;
  1455. sleep_val = bmc150_accel_get_startup_times(data);
  1456. if (sleep_val < 20)
  1457. usleep_range(sleep_val * 1000, 20000);
  1458. else
  1459. msleep_interruptible(sleep_val);
  1460. return 0;
  1461. }
  1462. #endif
  1463. const struct dev_pm_ops bmc150_accel_pm_ops = {
  1464. SET_SYSTEM_SLEEP_PM_OPS(bmc150_accel_suspend, bmc150_accel_resume)
  1465. SET_RUNTIME_PM_OPS(bmc150_accel_runtime_suspend,
  1466. bmc150_accel_runtime_resume, NULL)
  1467. };
  1468. EXPORT_SYMBOL_GPL(bmc150_accel_pm_ops);
  1469. MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
  1470. MODULE_LICENSE("GPL v2");
  1471. MODULE_DESCRIPTION("BMC150 accelerometer driver");