mvsw61xx.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /*
  2. * Marvell 88E61xx switch driver
  3. *
  4. * Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
  5. * Copyright (c) 2014 Nikita Nazarenko <nnazarenko@radiofid.com>
  6. *
  7. * Based on code (c) 2008 Felix Fietkau <nbd@nbd.name>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License v2 as published by the
  11. * Free Software Foundation
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/list.h>
  17. #include <linux/mii.h>
  18. #include <linux/phy.h>
  19. #include <linux/of.h>
  20. #include <linux/of_mdio.h>
  21. #include <linux/delay.h>
  22. #include <linux/switch.h>
  23. #include <linux/device.h>
  24. #include <linux/platform_device.h>
  25. #include "mvsw61xx.h"
  26. MODULE_DESCRIPTION("Marvell 88E61xx Switch driver");
  27. MODULE_AUTHOR("Claudio Leite <leitec@staticky.com>");
  28. MODULE_AUTHOR("Nikita Nazarenko <nnazarenko@radiofid.com>");
  29. MODULE_LICENSE("GPL v2");
  30. MODULE_ALIAS("platform:mvsw61xx");
  31. /*
  32. * Register access is done through direct or indirect addressing,
  33. * depending on how the switch is physically connected.
  34. *
  35. * Direct addressing: all port and global registers directly
  36. * accessible via an address/register pair
  37. *
  38. * Indirect addressing: switch is mapped at a single address,
  39. * port and global registers accessible via a single command/data
  40. * register pair
  41. */
  42. static int
  43. mvsw61xx_wait_mask_raw(struct mii_bus *bus, int addr,
  44. int reg, u16 mask, u16 val)
  45. {
  46. int i = 100;
  47. u16 r;
  48. do {
  49. r = bus->read(bus, addr, reg);
  50. if ((r & mask) == val)
  51. return 0;
  52. } while (--i > 0);
  53. return -ETIMEDOUT;
  54. }
  55. static u16
  56. r16(struct mii_bus *bus, bool indirect, int base_addr, int addr, int reg)
  57. {
  58. u16 ind_addr;
  59. if (!indirect)
  60. return bus->read(bus, addr, reg);
  61. /* Indirect read: First, make sure switch is free */
  62. mvsw61xx_wait_mask_raw(bus, base_addr, MV_INDIRECT_REG_CMD,
  63. MV_INDIRECT_INPROGRESS, 0);
  64. /* Load address and request read */
  65. ind_addr = MV_INDIRECT_READ | (addr << MV_INDIRECT_ADDR_S) | reg;
  66. bus->write(bus, base_addr, MV_INDIRECT_REG_CMD,
  67. ind_addr);
  68. /* Wait until it's ready */
  69. mvsw61xx_wait_mask_raw(bus, base_addr, MV_INDIRECT_REG_CMD,
  70. MV_INDIRECT_INPROGRESS, 0);
  71. /* Read the requested data */
  72. return bus->read(bus, base_addr, MV_INDIRECT_REG_DATA);
  73. }
  74. static void
  75. w16(struct mii_bus *bus, bool indirect, int base_addr, int addr,
  76. int reg, u16 val)
  77. {
  78. u16 ind_addr;
  79. if (!indirect) {
  80. bus->write(bus, addr, reg, val);
  81. return;
  82. }
  83. /* Indirect write: First, make sure switch is free */
  84. mvsw61xx_wait_mask_raw(bus, base_addr, MV_INDIRECT_REG_CMD,
  85. MV_INDIRECT_INPROGRESS, 0);
  86. /* Load the data to be written */
  87. bus->write(bus, base_addr, MV_INDIRECT_REG_DATA, val);
  88. /* Wait again for switch to be free */
  89. mvsw61xx_wait_mask_raw(bus, base_addr, MV_INDIRECT_REG_CMD,
  90. MV_INDIRECT_INPROGRESS, 0);
  91. /* Load address, and issue write command */
  92. ind_addr = MV_INDIRECT_WRITE | (addr << MV_INDIRECT_ADDR_S) | reg;
  93. bus->write(bus, base_addr, MV_INDIRECT_REG_CMD,
  94. ind_addr);
  95. }
  96. /* swconfig support */
  97. static inline u16
  98. sr16(struct switch_dev *dev, int addr, int reg)
  99. {
  100. struct mvsw61xx_state *state = get_state(dev);
  101. return r16(state->bus, state->is_indirect, state->base_addr, addr, reg);
  102. }
  103. static inline void
  104. sw16(struct switch_dev *dev, int addr, int reg, u16 val)
  105. {
  106. struct mvsw61xx_state *state = get_state(dev);
  107. w16(state->bus, state->is_indirect, state->base_addr, addr, reg, val);
  108. }
  109. static int
  110. mvsw61xx_wait_mask_s(struct switch_dev *dev, int addr,
  111. int reg, u16 mask, u16 val)
  112. {
  113. int i = 100;
  114. u16 r;
  115. do {
  116. r = sr16(dev, addr, reg) & mask;
  117. if (r == val)
  118. return 0;
  119. } while (--i > 0);
  120. return -ETIMEDOUT;
  121. }
  122. static int
  123. mvsw61xx_get_port_mask(struct switch_dev *dev,
  124. const struct switch_attr *attr, struct switch_val *val)
  125. {
  126. struct mvsw61xx_state *state = get_state(dev);
  127. char *buf = state->buf;
  128. int port, len, i;
  129. u16 reg;
  130. port = val->port_vlan;
  131. reg = sr16(dev, MV_PORTREG(VLANMAP, port)) & MV_PORTS_MASK;
  132. len = sprintf(buf, "0x%04x: ", reg);
  133. for (i = 0; i < MV_PORTS; i++) {
  134. if (reg & (1 << i))
  135. len += sprintf(buf + len, "%d ", i);
  136. else if (i == port)
  137. len += sprintf(buf + len, "(%d) ", i);
  138. }
  139. val->value.s = buf;
  140. return 0;
  141. }
  142. static int
  143. mvsw61xx_get_port_qmode(struct switch_dev *dev,
  144. const struct switch_attr *attr, struct switch_val *val)
  145. {
  146. struct mvsw61xx_state *state = get_state(dev);
  147. val->value.i = state->ports[val->port_vlan].qmode;
  148. return 0;
  149. }
  150. static int
  151. mvsw61xx_set_port_qmode(struct switch_dev *dev,
  152. const struct switch_attr *attr, struct switch_val *val)
  153. {
  154. struct mvsw61xx_state *state = get_state(dev);
  155. state->ports[val->port_vlan].qmode = val->value.i;
  156. return 0;
  157. }
  158. static int
  159. mvsw61xx_get_port_pvid(struct switch_dev *dev, int port, int *val)
  160. {
  161. struct mvsw61xx_state *state = get_state(dev);
  162. *val = state->ports[port].pvid;
  163. return 0;
  164. }
  165. static int
  166. mvsw61xx_set_port_pvid(struct switch_dev *dev, int port, int val)
  167. {
  168. struct mvsw61xx_state *state = get_state(dev);
  169. if (val < 0 || val >= MV_VLANS)
  170. return -EINVAL;
  171. state->ports[port].pvid = (u16)val;
  172. return 0;
  173. }
  174. static int
  175. mvsw61xx_get_port_link(struct switch_dev *dev, int port,
  176. struct switch_port_link *link)
  177. {
  178. u16 status, speed;
  179. status = sr16(dev, MV_PORTREG(STATUS, port));
  180. link->link = status & MV_PORT_STATUS_LINK;
  181. if (!link->link)
  182. return 0;
  183. link->duplex = status & MV_PORT_STATUS_FDX;
  184. speed = (status & MV_PORT_STATUS_SPEED_MASK) >>
  185. MV_PORT_STATUS_SPEED_SHIFT;
  186. switch (speed) {
  187. case MV_PORT_STATUS_SPEED_10:
  188. link->speed = SWITCH_PORT_SPEED_10;
  189. break;
  190. case MV_PORT_STATUS_SPEED_100:
  191. link->speed = SWITCH_PORT_SPEED_100;
  192. break;
  193. case MV_PORT_STATUS_SPEED_1000:
  194. link->speed = SWITCH_PORT_SPEED_1000;
  195. break;
  196. }
  197. return 0;
  198. }
  199. static int mvsw61xx_get_vlan_ports(struct switch_dev *dev,
  200. struct switch_val *val)
  201. {
  202. struct mvsw61xx_state *state = get_state(dev);
  203. int i, j, mode, vno;
  204. vno = val->port_vlan;
  205. if (vno <= 0 || vno >= dev->vlans)
  206. return -EINVAL;
  207. for (i = 0, j = 0; i < dev->ports; i++) {
  208. if (state->vlans[vno].mask & (1 << i)) {
  209. val->value.ports[j].id = i;
  210. mode = (state->vlans[vno].port_mode >> (i * 4)) & 0xf;
  211. if (mode == MV_VTUCTL_EGRESS_TAGGED)
  212. val->value.ports[j].flags =
  213. (1 << SWITCH_PORT_FLAG_TAGGED);
  214. else
  215. val->value.ports[j].flags = 0;
  216. j++;
  217. }
  218. }
  219. val->len = j;
  220. return 0;
  221. }
  222. static int mvsw61xx_set_vlan_ports(struct switch_dev *dev,
  223. struct switch_val *val)
  224. {
  225. struct mvsw61xx_state *state = get_state(dev);
  226. int i, mode, pno, vno;
  227. vno = val->port_vlan;
  228. if (vno <= 0 || vno >= dev->vlans)
  229. return -EINVAL;
  230. state->vlans[vno].mask = 0;
  231. state->vlans[vno].port_mode = 0;
  232. state->vlans[vno].port_sstate = 0;
  233. if(state->vlans[vno].vid == 0)
  234. state->vlans[vno].vid = vno;
  235. for (i = 0; i < val->len; i++) {
  236. pno = val->value.ports[i].id;
  237. state->vlans[vno].mask |= (1 << pno);
  238. if (val->value.ports[i].flags &
  239. (1 << SWITCH_PORT_FLAG_TAGGED))
  240. mode = MV_VTUCTL_EGRESS_TAGGED;
  241. else
  242. mode = MV_VTUCTL_EGRESS_UNTAGGED;
  243. state->vlans[vno].port_mode |= mode << (pno * 4);
  244. state->vlans[vno].port_sstate |=
  245. MV_STUCTL_STATE_FORWARDING << (pno * 4 + 2);
  246. }
  247. /*
  248. * DISCARD is nonzero, so it must be explicitly
  249. * set on ports not in the VLAN.
  250. */
  251. for (i = 0; i < dev->ports; i++)
  252. if (!(state->vlans[vno].mask & (1 << i)))
  253. state->vlans[vno].port_mode |=
  254. MV_VTUCTL_DISCARD << (i * 4);
  255. return 0;
  256. }
  257. static int mvsw61xx_get_vlan_port_based(struct switch_dev *dev,
  258. const struct switch_attr *attr, struct switch_val *val)
  259. {
  260. struct mvsw61xx_state *state = get_state(dev);
  261. int vno = val->port_vlan;
  262. if (vno <= 0 || vno >= dev->vlans)
  263. return -EINVAL;
  264. if (state->vlans[vno].port_based)
  265. val->value.i = 1;
  266. else
  267. val->value.i = 0;
  268. return 0;
  269. }
  270. static int mvsw61xx_set_vlan_port_based(struct switch_dev *dev,
  271. const struct switch_attr *attr, struct switch_val *val)
  272. {
  273. struct mvsw61xx_state *state = get_state(dev);
  274. int vno = val->port_vlan;
  275. if (vno <= 0 || vno >= dev->vlans)
  276. return -EINVAL;
  277. if (val->value.i == 1)
  278. state->vlans[vno].port_based = true;
  279. else
  280. state->vlans[vno].port_based = false;
  281. return 0;
  282. }
  283. static int mvsw61xx_get_vid(struct switch_dev *dev,
  284. const struct switch_attr *attr, struct switch_val *val)
  285. {
  286. struct mvsw61xx_state *state = get_state(dev);
  287. int vno = val->port_vlan;
  288. if (vno <= 0 || vno >= dev->vlans)
  289. return -EINVAL;
  290. val->value.i = state->vlans[vno].vid;
  291. return 0;
  292. }
  293. static int mvsw61xx_set_vid(struct switch_dev *dev,
  294. const struct switch_attr *attr, struct switch_val *val)
  295. {
  296. struct mvsw61xx_state *state = get_state(dev);
  297. int vno = val->port_vlan;
  298. if (vno <= 0 || vno >= dev->vlans)
  299. return -EINVAL;
  300. state->vlans[vno].vid = val->value.i;
  301. return 0;
  302. }
  303. static int mvsw61xx_get_enable_vlan(struct switch_dev *dev,
  304. const struct switch_attr *attr, struct switch_val *val)
  305. {
  306. struct mvsw61xx_state *state = get_state(dev);
  307. val->value.i = state->vlan_enabled;
  308. return 0;
  309. }
  310. static int mvsw61xx_set_enable_vlan(struct switch_dev *dev,
  311. const struct switch_attr *attr, struct switch_val *val)
  312. {
  313. struct mvsw61xx_state *state = get_state(dev);
  314. state->vlan_enabled = val->value.i;
  315. return 0;
  316. }
  317. static int mvsw61xx_vtu_program(struct switch_dev *dev)
  318. {
  319. struct mvsw61xx_state *state = get_state(dev);
  320. u16 v1, v2, s1, s2;
  321. int i;
  322. /* Flush */
  323. mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(VTU_OP),
  324. MV_VTUOP_INPROGRESS, 0);
  325. sw16(dev, MV_GLOBALREG(VTU_OP),
  326. MV_VTUOP_INPROGRESS | MV_VTUOP_PURGE);
  327. /* Write VLAN table */
  328. for (i = 1; i < dev->vlans; i++) {
  329. if (state->vlans[i].mask == 0 ||
  330. state->vlans[i].vid == 0 ||
  331. state->vlans[i].port_based == true)
  332. continue;
  333. mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(VTU_OP),
  334. MV_VTUOP_INPROGRESS, 0);
  335. /* Write per-VLAN port state into STU */
  336. s1 = (u16) (state->vlans[i].port_sstate & 0xffff);
  337. s2 = (u16) ((state->vlans[i].port_sstate >> 16) & 0xffff);
  338. sw16(dev, MV_GLOBALREG(VTU_VID), MV_VTU_VID_VALID);
  339. sw16(dev, MV_GLOBALREG(VTU_SID), i);
  340. sw16(dev, MV_GLOBALREG(VTU_DATA1), s1);
  341. sw16(dev, MV_GLOBALREG(VTU_DATA2), s2);
  342. sw16(dev, MV_GLOBALREG(VTU_DATA3), 0);
  343. sw16(dev, MV_GLOBALREG(VTU_OP),
  344. MV_VTUOP_INPROGRESS | MV_VTUOP_STULOAD);
  345. mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(VTU_OP),
  346. MV_VTUOP_INPROGRESS, 0);
  347. /* Write VLAN information into VTU */
  348. v1 = (u16) (state->vlans[i].port_mode & 0xffff);
  349. v2 = (u16) ((state->vlans[i].port_mode >> 16) & 0xffff);
  350. sw16(dev, MV_GLOBALREG(VTU_VID),
  351. MV_VTU_VID_VALID | state->vlans[i].vid);
  352. sw16(dev, MV_GLOBALREG(VTU_SID), i);
  353. sw16(dev, MV_GLOBALREG(VTU_FID), i);
  354. sw16(dev, MV_GLOBALREG(VTU_DATA1), v1);
  355. sw16(dev, MV_GLOBALREG(VTU_DATA2), v2);
  356. sw16(dev, MV_GLOBALREG(VTU_DATA3), 0);
  357. sw16(dev, MV_GLOBALREG(VTU_OP),
  358. MV_VTUOP_INPROGRESS | MV_VTUOP_LOAD);
  359. mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(VTU_OP),
  360. MV_VTUOP_INPROGRESS, 0);
  361. }
  362. return 0;
  363. }
  364. static void mvsw61xx_vlan_port_config(struct switch_dev *dev, int vno)
  365. {
  366. struct mvsw61xx_state *state = get_state(dev);
  367. int i, mode;
  368. for (i = 0; i < dev->ports; i++) {
  369. if (!(state->vlans[vno].mask & (1 << i)))
  370. continue;
  371. mode = (state->vlans[vno].port_mode >> (i * 4)) & 0xf;
  372. if(mode != MV_VTUCTL_EGRESS_TAGGED)
  373. state->ports[i].pvid = state->vlans[vno].vid;
  374. if (state->vlans[vno].port_based) {
  375. state->ports[i].mask |= state->vlans[vno].mask;
  376. state->ports[i].fdb = vno;
  377. }
  378. else
  379. state->ports[i].qmode = MV_8021Q_MODE_SECURE;
  380. }
  381. }
  382. static int mvsw61xx_update_state(struct switch_dev *dev)
  383. {
  384. struct mvsw61xx_state *state = get_state(dev);
  385. int i;
  386. u16 reg;
  387. if (!state->registered)
  388. return -EINVAL;
  389. /*
  390. * Set 802.1q-only mode if vlan_enabled is true.
  391. *
  392. * Without this, even if 802.1q is enabled for
  393. * a port/VLAN, it still depends on the port-based
  394. * VLAN mask being set.
  395. *
  396. * With this setting, port-based VLANs are still
  397. * functional, provided the VID is not in the VTU.
  398. */
  399. reg = sr16(dev, MV_GLOBAL2REG(SDET_POLARITY));
  400. if (state->vlan_enabled)
  401. reg |= MV_8021Q_VLAN_ONLY;
  402. else
  403. reg &= ~MV_8021Q_VLAN_ONLY;
  404. sw16(dev, MV_GLOBAL2REG(SDET_POLARITY), reg);
  405. /*
  406. * Set port-based VLAN masks on each port
  407. * based only on VLAN definitions known to
  408. * the driver (i.e. in state).
  409. *
  410. * This means any pre-existing port mapping is
  411. * wiped out once our driver is initialized.
  412. */
  413. for (i = 0; i < dev->ports; i++) {
  414. state->ports[i].mask = 0;
  415. state->ports[i].qmode = MV_8021Q_MODE_DISABLE;
  416. }
  417. for (i = 0; i < dev->vlans; i++)
  418. mvsw61xx_vlan_port_config(dev, i);
  419. for (i = 0; i < dev->ports; i++) {
  420. reg = sr16(dev, MV_PORTREG(VLANID, i)) & ~MV_PVID_MASK;
  421. reg |= state->ports[i].pvid;
  422. sw16(dev, MV_PORTREG(VLANID, i), reg);
  423. state->ports[i].mask &= ~(1 << i);
  424. /* set default forwarding DB number and port mask */
  425. reg = sr16(dev, MV_PORTREG(CONTROL1, i)) & ~MV_FDB_HI_MASK;
  426. reg |= (state->ports[i].fdb >> MV_FDB_HI_SHIFT) &
  427. MV_FDB_HI_MASK;
  428. sw16(dev, MV_PORTREG(CONTROL1, i), reg);
  429. reg = ((state->ports[i].fdb & 0xf) << MV_FDB_LO_SHIFT) |
  430. state->ports[i].mask;
  431. sw16(dev, MV_PORTREG(VLANMAP, i), reg);
  432. reg = sr16(dev, MV_PORTREG(CONTROL2, i)) &
  433. ~MV_8021Q_MODE_MASK;
  434. reg |= state->ports[i].qmode << MV_8021Q_MODE_SHIFT;
  435. sw16(dev, MV_PORTREG(CONTROL2, i), reg);
  436. }
  437. mvsw61xx_vtu_program(dev);
  438. return 0;
  439. }
  440. static int mvsw61xx_apply(struct switch_dev *dev)
  441. {
  442. return mvsw61xx_update_state(dev);
  443. }
  444. static int mvsw61xx_reset(struct switch_dev *dev)
  445. {
  446. struct mvsw61xx_state *state = get_state(dev);
  447. int i;
  448. u16 reg;
  449. /* Disable all ports before reset */
  450. for (i = 0; i < dev->ports; i++) {
  451. reg = sr16(dev, MV_PORTREG(CONTROL, i)) &
  452. ~MV_PORTCTRL_FORWARDING;
  453. sw16(dev, MV_PORTREG(CONTROL, i), reg);
  454. }
  455. reg = sr16(dev, MV_GLOBALREG(CONTROL)) | MV_CONTROL_RESET;
  456. sw16(dev, MV_GLOBALREG(CONTROL), reg);
  457. if (mvsw61xx_wait_mask_s(dev, MV_GLOBALREG(CONTROL),
  458. MV_CONTROL_RESET, 0) < 0)
  459. return -ETIMEDOUT;
  460. for (i = 0; i < dev->ports; i++) {
  461. state->ports[i].fdb = 0;
  462. state->ports[i].qmode = 0;
  463. state->ports[i].mask = 0;
  464. state->ports[i].pvid = 0;
  465. /* Force flow control off */
  466. reg = sr16(dev, MV_PORTREG(PHYCTL, i)) & ~MV_PHYCTL_FC_MASK;
  467. reg |= MV_PHYCTL_FC_DISABLE;
  468. sw16(dev, MV_PORTREG(PHYCTL, i), reg);
  469. /* Set port association vector */
  470. sw16(dev, MV_PORTREG(ASSOC, i), (1 << i));
  471. }
  472. for (i = 0; i < dev->vlans; i++) {
  473. state->vlans[i].port_based = false;
  474. state->vlans[i].mask = 0;
  475. state->vlans[i].vid = 0;
  476. state->vlans[i].port_mode = 0;
  477. state->vlans[i].port_sstate = 0;
  478. }
  479. state->vlan_enabled = 0;
  480. mvsw61xx_update_state(dev);
  481. /* Re-enable ports */
  482. for (i = 0; i < dev->ports; i++) {
  483. reg = sr16(dev, MV_PORTREG(CONTROL, i)) |
  484. MV_PORTCTRL_FORWARDING;
  485. sw16(dev, MV_PORTREG(CONTROL, i), reg);
  486. }
  487. return 0;
  488. }
  489. enum {
  490. MVSW61XX_ENABLE_VLAN,
  491. };
  492. enum {
  493. MVSW61XX_VLAN_PORT_BASED,
  494. MVSW61XX_VLAN_ID,
  495. };
  496. enum {
  497. MVSW61XX_PORT_MASK,
  498. MVSW61XX_PORT_QMODE,
  499. };
  500. static const struct switch_attr mvsw61xx_global[] = {
  501. [MVSW61XX_ENABLE_VLAN] = {
  502. .id = MVSW61XX_ENABLE_VLAN,
  503. .type = SWITCH_TYPE_INT,
  504. .name = "enable_vlan",
  505. .description = "Enable 802.1q VLAN support",
  506. .get = mvsw61xx_get_enable_vlan,
  507. .set = mvsw61xx_set_enable_vlan,
  508. },
  509. };
  510. static const struct switch_attr mvsw61xx_vlan[] = {
  511. [MVSW61XX_VLAN_PORT_BASED] = {
  512. .id = MVSW61XX_VLAN_PORT_BASED,
  513. .type = SWITCH_TYPE_INT,
  514. .name = "port_based",
  515. .description = "Use port-based (non-802.1q) VLAN only",
  516. .get = mvsw61xx_get_vlan_port_based,
  517. .set = mvsw61xx_set_vlan_port_based,
  518. },
  519. [MVSW61XX_VLAN_ID] = {
  520. .id = MVSW61XX_VLAN_ID,
  521. .type = SWITCH_TYPE_INT,
  522. .name = "vid",
  523. .description = "Get/set VLAN ID",
  524. .get = mvsw61xx_get_vid,
  525. .set = mvsw61xx_set_vid,
  526. },
  527. };
  528. static const struct switch_attr mvsw61xx_port[] = {
  529. [MVSW61XX_PORT_MASK] = {
  530. .id = MVSW61XX_PORT_MASK,
  531. .type = SWITCH_TYPE_STRING,
  532. .description = "Port-based VLAN mask",
  533. .name = "mask",
  534. .get = mvsw61xx_get_port_mask,
  535. .set = NULL,
  536. },
  537. [MVSW61XX_PORT_QMODE] = {
  538. .id = MVSW61XX_PORT_QMODE,
  539. .type = SWITCH_TYPE_INT,
  540. .description = "802.1q mode: 0=off/1=fallback/2=check/3=secure",
  541. .name = "qmode",
  542. .get = mvsw61xx_get_port_qmode,
  543. .set = mvsw61xx_set_port_qmode,
  544. },
  545. };
  546. static const struct switch_dev_ops mvsw61xx_ops = {
  547. .attr_global = {
  548. .attr = mvsw61xx_global,
  549. .n_attr = ARRAY_SIZE(mvsw61xx_global),
  550. },
  551. .attr_vlan = {
  552. .attr = mvsw61xx_vlan,
  553. .n_attr = ARRAY_SIZE(mvsw61xx_vlan),
  554. },
  555. .attr_port = {
  556. .attr = mvsw61xx_port,
  557. .n_attr = ARRAY_SIZE(mvsw61xx_port),
  558. },
  559. .get_port_link = mvsw61xx_get_port_link,
  560. .get_port_pvid = mvsw61xx_get_port_pvid,
  561. .set_port_pvid = mvsw61xx_set_port_pvid,
  562. .get_vlan_ports = mvsw61xx_get_vlan_ports,
  563. .set_vlan_ports = mvsw61xx_set_vlan_ports,
  564. .apply_config = mvsw61xx_apply,
  565. .reset_switch = mvsw61xx_reset,
  566. };
  567. /* end swconfig stuff */
  568. static int mvsw61xx_probe(struct platform_device *pdev)
  569. {
  570. struct mvsw61xx_state *state;
  571. struct device_node *np = pdev->dev.of_node;
  572. struct device_node *mdio;
  573. char *model_str;
  574. u32 val;
  575. int err;
  576. state = kzalloc(sizeof(*state), GFP_KERNEL);
  577. if (!state)
  578. return -ENOMEM;
  579. mdio = of_parse_phandle(np, "mii-bus", 0);
  580. if (!mdio) {
  581. dev_err(&pdev->dev, "Couldn't get MII bus handle\n");
  582. err = -ENODEV;
  583. goto out_err;
  584. }
  585. state->bus = of_mdio_find_bus(mdio);
  586. if (!state->bus) {
  587. dev_err(&pdev->dev, "Couldn't find MII bus from handle\n");
  588. err = -ENODEV;
  589. goto out_err;
  590. }
  591. state->is_indirect = of_property_read_bool(np, "is-indirect");
  592. if (state->is_indirect) {
  593. if (of_property_read_u32(np, "reg", &val)) {
  594. dev_err(&pdev->dev, "Switch address not specified\n");
  595. err = -ENODEV;
  596. goto out_err;
  597. }
  598. state->base_addr = val;
  599. } else {
  600. state->base_addr = MV_BASE;
  601. }
  602. state->model = r16(state->bus, state->is_indirect, state->base_addr,
  603. MV_PORTREG(IDENT, 0)) & MV_IDENT_MASK;
  604. switch(state->model) {
  605. case MV_IDENT_VALUE_6171:
  606. model_str = MV_IDENT_STR_6171;
  607. break;
  608. case MV_IDENT_VALUE_6172:
  609. model_str = MV_IDENT_STR_6172;
  610. break;
  611. case MV_IDENT_VALUE_6176:
  612. model_str = MV_IDENT_STR_6176;
  613. break;
  614. default:
  615. dev_err(&pdev->dev, "No compatible switch found at 0x%02x\n",
  616. state->base_addr);
  617. err = -ENODEV;
  618. goto out_err;
  619. }
  620. platform_set_drvdata(pdev, state);
  621. dev_info(&pdev->dev, "Found %s at %s:%02x\n", model_str,
  622. state->bus->id, state->base_addr);
  623. dev_info(&pdev->dev, "Using %sdirect addressing\n",
  624. (state->is_indirect ? "in" : ""));
  625. if (of_property_read_u32(np, "cpu-port-0", &val)) {
  626. dev_err(&pdev->dev, "CPU port not set\n");
  627. err = -ENODEV;
  628. goto out_err;
  629. }
  630. state->cpu_port0 = val;
  631. if (!of_property_read_u32(np, "cpu-port-1", &val))
  632. state->cpu_port1 = val;
  633. else
  634. state->cpu_port1 = -1;
  635. state->dev.vlans = MV_VLANS;
  636. state->dev.cpu_port = state->cpu_port0;
  637. state->dev.ports = MV_PORTS;
  638. state->dev.name = model_str;
  639. state->dev.ops = &mvsw61xx_ops;
  640. state->dev.alias = dev_name(&pdev->dev);
  641. err = register_switch(&state->dev, NULL);
  642. if (err < 0)
  643. goto out_err;
  644. state->registered = true;
  645. return 0;
  646. out_err:
  647. kfree(state);
  648. return err;
  649. }
  650. static int
  651. mvsw61xx_remove(struct platform_device *pdev)
  652. {
  653. struct mvsw61xx_state *state = platform_get_drvdata(pdev);
  654. if (state->registered)
  655. unregister_switch(&state->dev);
  656. kfree(state);
  657. return 0;
  658. }
  659. static const struct of_device_id mvsw61xx_match[] = {
  660. { .compatible = "marvell,88e6171" },
  661. { .compatible = "marvell,88e6172" },
  662. { .compatible = "marvell,88e6176" },
  663. { }
  664. };
  665. MODULE_DEVICE_TABLE(of, mvsw61xx_match);
  666. static struct platform_driver mvsw61xx_driver = {
  667. .probe = mvsw61xx_probe,
  668. .remove = mvsw61xx_remove,
  669. .driver = {
  670. .name = "mvsw61xx",
  671. .of_match_table = of_match_ptr(mvsw61xx_match),
  672. .owner = THIS_MODULE,
  673. },
  674. };
  675. static int __init mvsw61xx_module_init(void)
  676. {
  677. return platform_driver_register(&mvsw61xx_driver);
  678. }
  679. late_initcall(mvsw61xx_module_init);
  680. static void __exit mvsw61xx_module_exit(void)
  681. {
  682. platform_driver_unregister(&mvsw61xx_driver);
  683. }
  684. module_exit(mvsw61xx_module_exit);