0028-NET-lantiq-various-etop-fixes.patch 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. From 870ed9cae083ff8a60a739ef7e74c5a1800533be Mon Sep 17 00:00:00 2001
  2. From: John Crispin <blogic@openwrt.org>
  3. Date: Tue, 9 Sep 2014 22:45:34 +0200
  4. Subject: [PATCH 28/36] NET: lantiq: various etop fixes
  5. Signed-off-by: John Crispin <blogic@openwrt.org>
  6. ---
  7. drivers/net/ethernet/lantiq_etop.c | 555 +++++++++++++++++++++++++-----------
  8. 1 file changed, 389 insertions(+), 166 deletions(-)
  9. --- a/drivers/net/ethernet/lantiq_etop.c
  10. +++ b/drivers/net/ethernet/lantiq_etop.c
  11. @@ -11,7 +11,7 @@
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  14. *
  15. - * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
  16. + * Copyright (C) 2011-12 John Crispin <blogic@openwrt.org>
  17. */
  18. #include <linux/kernel.h>
  19. @@ -30,11 +30,16 @@
  20. #include <linux/mm.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/ethtool.h>
  23. +#include <linux/if_vlan.h>
  24. #include <linux/init.h>
  25. #include <linux/delay.h>
  26. #include <linux/io.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/module.h>
  29. +#include <linux/clk.h>
  30. +#include <linux/of_net.h>
  31. +#include <linux/of_irq.h>
  32. +#include <linux/of_platform.h>
  33. #include <asm/checksum.h>
  34. @@ -42,7 +47,7 @@
  35. #include <xway_dma.h>
  36. #include <lantiq_platform.h>
  37. -#define LTQ_ETOP_MDIO 0x11804
  38. +#define LTQ_ETOP_MDIO_ACC 0x11804
  39. #define MDIO_REQUEST 0x80000000
  40. #define MDIO_READ 0x40000000
  41. #define MDIO_ADDR_MASK 0x1f
  42. @@ -51,44 +56,91 @@
  43. #define MDIO_REG_OFFSET 0x10
  44. #define MDIO_VAL_MASK 0xffff
  45. -#define PPE32_CGEN 0x800
  46. -#define LQ_PPE32_ENET_MAC_CFG 0x1840
  47. +#define LTQ_ETOP_MDIO_CFG 0x11800
  48. +#define MDIO_CFG_MASK 0x6
  49. +
  50. +#define LTQ_ETOP_CFG 0x11808
  51. +#define LTQ_ETOP_IGPLEN 0x11820
  52. +#define LTQ_ETOP_MAC_CFG 0x11840
  53. #define LTQ_ETOP_ENETS0 0x11850
  54. #define LTQ_ETOP_MAC_DA0 0x1186C
  55. #define LTQ_ETOP_MAC_DA1 0x11870
  56. -#define LTQ_ETOP_CFG 0x16020
  57. -#define LTQ_ETOP_IGPLEN 0x16080
  58. +
  59. +#define MAC_CFG_MASK 0xfff
  60. +#define MAC_CFG_CGEN (1 << 11)
  61. +#define MAC_CFG_DUPLEX (1 << 2)
  62. +#define MAC_CFG_SPEED (1 << 1)
  63. +#define MAC_CFG_LINK (1 << 0)
  64. #define MAX_DMA_CHAN 0x8
  65. #define MAX_DMA_CRC_LEN 0x4
  66. #define MAX_DMA_DATA_LEN 0x600
  67. #define ETOP_FTCU BIT(28)
  68. -#define ETOP_MII_MASK 0xf
  69. -#define ETOP_MII_NORMAL 0xd
  70. -#define ETOP_MII_REVERSE 0xe
  71. #define ETOP_PLEN_UNDER 0x40
  72. -#define ETOP_CGEN 0x800
  73. +#define ETOP_CFG_MII0 0x01
  74. -/* use 2 static channels for TX/RX */
  75. -#define LTQ_ETOP_TX_CHANNEL 1
  76. -#define LTQ_ETOP_RX_CHANNEL 6
  77. -#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
  78. -#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
  79. +#define ETOP_CFG_MASK 0xfff
  80. +#define ETOP_CFG_FEN0 (1 << 8)
  81. +#define ETOP_CFG_SEN0 (1 << 6)
  82. +#define ETOP_CFG_OFF1 (1 << 3)
  83. +#define ETOP_CFG_REMII0 (1 << 1)
  84. +#define ETOP_CFG_OFF0 (1 << 0)
  85. +
  86. +#define LTQ_GBIT_MDIO_CTL 0xCC
  87. +#define LTQ_GBIT_MDIO_DATA 0xd0
  88. +#define LTQ_GBIT_GCTL0 0x68
  89. +#define LTQ_GBIT_PMAC_HD_CTL 0x8c
  90. +#define LTQ_GBIT_P0_CTL 0x4
  91. +#define LTQ_GBIT_PMAC_RX_IPG 0xa8
  92. +#define LTQ_GBIT_RGMII_CTL 0x78
  93. +
  94. +#define PMAC_HD_CTL_AS (1 << 19)
  95. +#define PMAC_HD_CTL_RXSH (1 << 22)
  96. +
  97. +/* Switch Enable (0=disable, 1=enable) */
  98. +#define GCTL0_SE 0x80000000
  99. +/* Disable MDIO auto polling (0=disable, 1=enable) */
  100. +#define PX_CTL_DMDIO 0x00400000
  101. +
  102. +/* MDC clock divider, clock = 25MHz/((MDC_CLOCK + 1) * 2) */
  103. +#define MDC_CLOCK_MASK 0xff000000
  104. +#define MDC_CLOCK_OFFSET 24
  105. +
  106. +/* register information for the gbit's MDIO bus */
  107. +#define MDIO_XR9_REQUEST 0x00008000
  108. +#define MDIO_XR9_READ 0x00000800
  109. +#define MDIO_XR9_WRITE 0x00000400
  110. +#define MDIO_XR9_REG_MASK 0x1f
  111. +#define MDIO_XR9_ADDR_MASK 0x1f
  112. +#define MDIO_XR9_RD_MASK 0xffff
  113. +#define MDIO_XR9_REG_OFFSET 0
  114. +#define MDIO_XR9_ADDR_OFFSET 5
  115. +#define MDIO_XR9_WR_OFFSET 16
  116. +#define LTQ_DMA_ETOP ((of_machine_is_compatible("lantiq,ase")) ? \
  117. + (INT_NUM_IM3_IRL0) : (INT_NUM_IM2_IRL0))
  118. +
  119. +/* the newer xway socks have a embedded 3/7 port gbit multiplexer */
  120. #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
  121. #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
  122. #define ltq_etop_w32_mask(x, y, z) \
  123. ltq_w32_mask(x, y, ltq_etop_membase + (z))
  124. -#define DRV_VERSION "1.0"
  125. +#define ltq_gbit_r32(x) ltq_r32(ltq_gbit_membase + (x))
  126. +#define ltq_gbit_w32(x, y) ltq_w32(x, ltq_gbit_membase + (y))
  127. +#define ltq_gbit_w32_mask(x, y, z) \
  128. + ltq_w32_mask(x, y, ltq_gbit_membase + (z))
  129. +
  130. +#define DRV_VERSION "1.2"
  131. static void __iomem *ltq_etop_membase;
  132. +static void __iomem *ltq_gbit_membase;
  133. struct ltq_etop_chan {
  134. - int idx;
  135. int tx_free;
  136. + int irq;
  137. struct net_device *netdev;
  138. struct napi_struct napi;
  139. struct ltq_dma_channel dma;
  140. @@ -98,22 +150,35 @@ struct ltq_etop_chan {
  141. struct ltq_etop_priv {
  142. struct net_device *netdev;
  143. struct platform_device *pdev;
  144. - struct ltq_eth_data *pldata;
  145. struct resource *res;
  146. struct mii_bus *mii_bus;
  147. struct phy_device *phydev;
  148. - struct ltq_etop_chan ch[MAX_DMA_CHAN];
  149. - int tx_free[MAX_DMA_CHAN >> 1];
  150. + struct ltq_etop_chan txch;
  151. + struct ltq_etop_chan rxch;
  152. +
  153. + int tx_irq;
  154. + int rx_irq;
  155. +
  156. + unsigned char mac[6];
  157. + int mii_mode;
  158. spinlock_t lock;
  159. +
  160. + struct clk *clk_ppe;
  161. + struct clk *clk_switch;
  162. + struct clk *clk_ephy;
  163. + struct clk *clk_ephycgu;
  164. };
  165. +static int ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr,
  166. + int phy_reg, u16 phy_data);
  167. +
  168. static int
  169. ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
  170. {
  171. - ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
  172. + ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
  173. if (!ch->skb[ch->dma.desc])
  174. return -ENOMEM;
  175. ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
  176. @@ -148,8 +213,11 @@ ltq_etop_hw_receive(struct ltq_etop_chan
  177. spin_unlock_irqrestore(&priv->lock, flags);
  178. skb_put(skb, len);
  179. + skb->dev = ch->netdev;
  180. skb->protocol = eth_type_trans(skb, ch->netdev);
  181. netif_receive_skb(skb);
  182. + ch->netdev->stats.rx_packets++;
  183. + ch->netdev->stats.rx_bytes += len;
  184. }
  185. static int
  186. @@ -157,8 +225,10 @@ ltq_etop_poll_rx(struct napi_struct *nap
  187. {
  188. struct ltq_etop_chan *ch = container_of(napi,
  189. struct ltq_etop_chan, napi);
  190. + struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
  191. int rx = 0;
  192. int complete = 0;
  193. + unsigned long flags;
  194. while ((rx < budget) && !complete) {
  195. struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  196. @@ -172,7 +242,9 @@ ltq_etop_poll_rx(struct napi_struct *nap
  197. }
  198. if (complete || !rx) {
  199. napi_complete(&ch->napi);
  200. + spin_lock_irqsave(&priv->lock, flags);
  201. ltq_dma_ack_irq(&ch->dma);
  202. + spin_unlock_irqrestore(&priv->lock, flags);
  203. }
  204. return rx;
  205. }
  206. @@ -184,12 +256,14 @@ ltq_etop_poll_tx(struct napi_struct *nap
  207. container_of(napi, struct ltq_etop_chan, napi);
  208. struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
  209. struct netdev_queue *txq =
  210. - netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
  211. + netdev_get_tx_queue(ch->netdev, ch->dma.nr >> 1);
  212. unsigned long flags;
  213. spin_lock_irqsave(&priv->lock, flags);
  214. while ((ch->dma.desc_base[ch->tx_free].ctl &
  215. (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
  216. + ch->netdev->stats.tx_packets++;
  217. + ch->netdev->stats.tx_bytes += ch->skb[ch->tx_free]->len;
  218. dev_kfree_skb_any(ch->skb[ch->tx_free]);
  219. ch->skb[ch->tx_free] = NULL;
  220. memset(&ch->dma.desc_base[ch->tx_free], 0,
  221. @@ -202,7 +276,9 @@ ltq_etop_poll_tx(struct napi_struct *nap
  222. if (netif_tx_queue_stopped(txq))
  223. netif_tx_start_queue(txq);
  224. napi_complete(&ch->napi);
  225. + spin_lock_irqsave(&priv->lock, flags);
  226. ltq_dma_ack_irq(&ch->dma);
  227. + spin_unlock_irqrestore(&priv->lock, flags);
  228. return 1;
  229. }
  230. @@ -210,9 +286,10 @@ static irqreturn_t
  231. ltq_etop_dma_irq(int irq, void *_priv)
  232. {
  233. struct ltq_etop_priv *priv = _priv;
  234. - int ch = irq - LTQ_DMA_CH0_INT;
  235. -
  236. - napi_schedule(&priv->ch[ch].napi);
  237. + if (irq == priv->txch.dma.irq)
  238. + napi_schedule(&priv->txch.napi);
  239. + else
  240. + napi_schedule(&priv->rxch.napi);
  241. return IRQ_HANDLED;
  242. }
  243. @@ -224,7 +301,7 @@ ltq_etop_free_channel(struct net_device
  244. ltq_dma_free(&ch->dma);
  245. if (ch->dma.irq)
  246. free_irq(ch->dma.irq, priv);
  247. - if (IS_RX(ch->idx)) {
  248. + if (ch == &priv->txch) {
  249. int desc;
  250. for (desc = 0; desc < LTQ_DESC_NUM; desc++)
  251. dev_kfree_skb_any(ch->skb[ch->dma.desc]);
  252. @@ -235,65 +312,133 @@ static void
  253. ltq_etop_hw_exit(struct net_device *dev)
  254. {
  255. struct ltq_etop_priv *priv = netdev_priv(dev);
  256. - int i;
  257. - ltq_pmu_disable(PMU_PPE);
  258. - for (i = 0; i < MAX_DMA_CHAN; i++)
  259. - if (IS_TX(i) || IS_RX(i))
  260. - ltq_etop_free_channel(dev, &priv->ch[i]);
  261. + clk_disable(priv->clk_ppe);
  262. +
  263. + if (of_machine_is_compatible("lantiq,ar9"))
  264. + clk_disable(priv->clk_switch);
  265. +
  266. + if (of_machine_is_compatible("lantiq,ase")) {
  267. + clk_disable(priv->clk_ephy);
  268. + clk_disable(priv->clk_ephycgu);
  269. + }
  270. +
  271. + ltq_etop_free_channel(dev, &priv->txch);
  272. + ltq_etop_free_channel(dev, &priv->rxch);
  273. +}
  274. +
  275. +static void
  276. +ltq_etop_gbit_init(struct net_device *dev)
  277. +{
  278. + struct ltq_etop_priv *priv = netdev_priv(dev);
  279. +
  280. + clk_enable(priv->clk_switch);
  281. +
  282. + /* enable gbit port0 on the SoC */
  283. + ltq_gbit_w32_mask((1 << 17), (1 << 18), LTQ_GBIT_P0_CTL);
  284. +
  285. + ltq_gbit_w32_mask(0, GCTL0_SE, LTQ_GBIT_GCTL0);
  286. + /* disable MDIO auto polling mode */
  287. + ltq_gbit_w32_mask(0, PX_CTL_DMDIO, LTQ_GBIT_P0_CTL);
  288. + /* set 1522 packet size */
  289. + ltq_gbit_w32_mask(0x300, 0, LTQ_GBIT_GCTL0);
  290. + /* disable pmac & dmac headers */
  291. + ltq_gbit_w32_mask(PMAC_HD_CTL_AS | PMAC_HD_CTL_RXSH, 0,
  292. + LTQ_GBIT_PMAC_HD_CTL);
  293. + /* Due to traffic halt when burst length 8,
  294. + replace default IPG value with 0x3B */
  295. + ltq_gbit_w32(0x3B, LTQ_GBIT_PMAC_RX_IPG);
  296. + /* set mdc clock to 2.5 MHz */
  297. + ltq_gbit_w32_mask(MDC_CLOCK_MASK, 4 << MDC_CLOCK_OFFSET,
  298. + LTQ_GBIT_RGMII_CTL);
  299. }
  300. static int
  301. ltq_etop_hw_init(struct net_device *dev)
  302. {
  303. struct ltq_etop_priv *priv = netdev_priv(dev);
  304. - int i;
  305. + int mii_mode = priv->mii_mode;
  306. - ltq_pmu_enable(PMU_PPE);
  307. + clk_enable(priv->clk_ppe);
  308. +
  309. + if (of_machine_is_compatible("lantiq,ar9")) {
  310. + ltq_etop_gbit_init(dev);
  311. + /* force the etops link to the gbit to MII */
  312. + mii_mode = PHY_INTERFACE_MODE_MII;
  313. + }
  314. + ltq_etop_w32_mask(MDIO_CFG_MASK, 0, LTQ_ETOP_MDIO_CFG);
  315. + ltq_etop_w32_mask(MAC_CFG_MASK, MAC_CFG_CGEN | MAC_CFG_DUPLEX |
  316. + MAC_CFG_SPEED | MAC_CFG_LINK, LTQ_ETOP_MAC_CFG);
  317. - switch (priv->pldata->mii_mode) {
  318. + switch (mii_mode) {
  319. case PHY_INTERFACE_MODE_RMII:
  320. - ltq_etop_w32_mask(ETOP_MII_MASK,
  321. - ETOP_MII_REVERSE, LTQ_ETOP_CFG);
  322. + ltq_etop_w32_mask(ETOP_CFG_MASK, ETOP_CFG_REMII0 | ETOP_CFG_OFF1 |
  323. + ETOP_CFG_SEN0 | ETOP_CFG_FEN0, LTQ_ETOP_CFG);
  324. break;
  325. case PHY_INTERFACE_MODE_MII:
  326. - ltq_etop_w32_mask(ETOP_MII_MASK,
  327. - ETOP_MII_NORMAL, LTQ_ETOP_CFG);
  328. + ltq_etop_w32_mask(ETOP_CFG_MASK, ETOP_CFG_OFF1 |
  329. + ETOP_CFG_SEN0 | ETOP_CFG_FEN0, LTQ_ETOP_CFG);
  330. break;
  331. default:
  332. + if (of_machine_is_compatible("lantiq,ase")) {
  333. + clk_enable(priv->clk_ephy);
  334. + /* disable external MII */
  335. + ltq_etop_w32_mask(0, ETOP_CFG_MII0, LTQ_ETOP_CFG);
  336. + /* enable clock for internal PHY */
  337. + clk_enable(priv->clk_ephycgu);
  338. + /* we need to write this magic to the internal phy to
  339. + make it work */
  340. + ltq_etop_mdio_wr(NULL, 0x8, 0x12, 0xC020);
  341. + pr_info("Selected EPHY mode\n");
  342. + break;
  343. + }
  344. netdev_err(dev, "unknown mii mode %d\n",
  345. - priv->pldata->mii_mode);
  346. + mii_mode);
  347. return -ENOTSUPP;
  348. }
  349. - /* enable crc generation */
  350. - ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
  351. + return 0;
  352. +}
  353. +
  354. +static int
  355. +ltq_etop_dma_init(struct net_device *dev)
  356. +{
  357. + struct ltq_etop_priv *priv = netdev_priv(dev);
  358. + int tx = priv->tx_irq - LTQ_DMA_ETOP;
  359. + int rx = priv->rx_irq - LTQ_DMA_ETOP;
  360. + int err;
  361. ltq_dma_init_port(DMA_PORT_ETOP);
  362. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  363. - int irq = LTQ_DMA_CH0_INT + i;
  364. - struct ltq_etop_chan *ch = &priv->ch[i];
  365. -
  366. - ch->idx = ch->dma.nr = i;
  367. -
  368. - if (IS_TX(i)) {
  369. - ltq_dma_alloc_tx(&ch->dma);
  370. - request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
  371. - } else if (IS_RX(i)) {
  372. - ltq_dma_alloc_rx(&ch->dma);
  373. - for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
  374. - ch->dma.desc++)
  375. - if (ltq_etop_alloc_skb(ch))
  376. - return -ENOMEM;
  377. - ch->dma.desc = 0;
  378. - request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
  379. + priv->txch.dma.nr = tx;
  380. + ltq_dma_alloc_tx(&priv->txch.dma);
  381. + err = request_irq(priv->tx_irq, ltq_etop_dma_irq, 0, "eth_tx", priv);
  382. + if (err) {
  383. + netdev_err(dev, "failed to allocate tx irq\n");
  384. + goto err_out;
  385. + }
  386. + priv->txch.dma.irq = priv->tx_irq;
  387. +
  388. + priv->rxch.dma.nr = rx;
  389. + ltq_dma_alloc_rx(&priv->rxch.dma);
  390. + for (priv->rxch.dma.desc = 0; priv->rxch.dma.desc < LTQ_DESC_NUM;
  391. + priv->rxch.dma.desc++) {
  392. + if (ltq_etop_alloc_skb(&priv->rxch)) {
  393. + netdev_err(dev, "failed to allocate skbs\n");
  394. + err = -ENOMEM;
  395. + goto err_out;
  396. }
  397. - ch->dma.irq = irq;
  398. }
  399. - return 0;
  400. + priv->rxch.dma.desc = 0;
  401. + err = request_irq(priv->rx_irq, ltq_etop_dma_irq, 0, "eth_rx", priv);
  402. + if (err)
  403. + netdev_err(dev, "failed to allocate rx irq\n");
  404. + else
  405. + priv->rxch.dma.irq = priv->rx_irq;
  406. +err_out:
  407. + return err;
  408. }
  409. static void
  410. @@ -309,7 +454,10 @@ ltq_etop_get_settings(struct net_device
  411. {
  412. struct ltq_etop_priv *priv = netdev_priv(dev);
  413. - return phy_ethtool_gset(priv->phydev, cmd);
  414. + if (priv->phydev)
  415. + return phy_ethtool_gset(priv->phydev, cmd);
  416. + else
  417. + return 0;
  418. }
  419. static int
  420. @@ -317,7 +465,10 @@ ltq_etop_set_settings(struct net_device
  421. {
  422. struct ltq_etop_priv *priv = netdev_priv(dev);
  423. - return phy_ethtool_sset(priv->phydev, cmd);
  424. + if (priv->phydev)
  425. + return phy_ethtool_sset(priv->phydev, cmd);
  426. + else
  427. + return 0;
  428. }
  429. static int
  430. @@ -325,7 +476,10 @@ ltq_etop_nway_reset(struct net_device *d
  431. {
  432. struct ltq_etop_priv *priv = netdev_priv(dev);
  433. - return phy_start_aneg(priv->phydev);
  434. + if (priv->phydev)
  435. + return phy_start_aneg(priv->phydev);
  436. + else
  437. + return 0;
  438. }
  439. static const struct ethtool_ops ltq_etop_ethtool_ops = {
  440. @@ -336,6 +490,39 @@ static const struct ethtool_ops ltq_etop
  441. };
  442. static int
  443. +ltq_etop_mdio_wr_xr9(struct mii_bus *bus, int phy_addr,
  444. + int phy_reg, u16 phy_data)
  445. +{
  446. + u32 val = MDIO_XR9_REQUEST | MDIO_XR9_WRITE |
  447. + (phy_data << MDIO_XR9_WR_OFFSET) |
  448. + ((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
  449. + ((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
  450. +
  451. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  452. + ;
  453. + ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
  454. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  455. + ;
  456. + return 0;
  457. +}
  458. +
  459. +static int
  460. +ltq_etop_mdio_rd_xr9(struct mii_bus *bus, int phy_addr, int phy_reg)
  461. +{
  462. + u32 val = MDIO_XR9_REQUEST | MDIO_XR9_READ |
  463. + ((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
  464. + ((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
  465. +
  466. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  467. + ;
  468. + ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
  469. + while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
  470. + ;
  471. + val = ltq_gbit_r32(LTQ_GBIT_MDIO_DATA) & MDIO_XR9_RD_MASK;
  472. + return val;
  473. +}
  474. +
  475. +static int
  476. ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
  477. {
  478. u32 val = MDIO_REQUEST |
  479. @@ -343,9 +530,9 @@ ltq_etop_mdio_wr(struct mii_bus *bus, in
  480. ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
  481. phy_data;
  482. - while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
  483. + while (ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_REQUEST)
  484. ;
  485. - ltq_etop_w32(val, LTQ_ETOP_MDIO);
  486. + ltq_etop_w32(val, LTQ_ETOP_MDIO_ACC);
  487. return 0;
  488. }
  489. @@ -356,12 +543,12 @@ ltq_etop_mdio_rd(struct mii_bus *bus, in
  490. ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
  491. ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
  492. - while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
  493. + while (ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_REQUEST)
  494. ;
  495. - ltq_etop_w32(val, LTQ_ETOP_MDIO);
  496. - while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
  497. + ltq_etop_w32(val, LTQ_ETOP_MDIO_ACC);
  498. + while (ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_REQUEST)
  499. ;
  500. - val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
  501. + val = ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_VAL_MASK;
  502. return val;
  503. }
  504. @@ -376,14 +563,18 @@ ltq_etop_mdio_probe(struct net_device *d
  505. {
  506. struct ltq_etop_priv *priv = netdev_priv(dev);
  507. struct phy_device *phydev = NULL;
  508. - int phy_addr;
  509. + u32 phy_supported = (SUPPORTED_10baseT_Half
  510. + | SUPPORTED_10baseT_Full
  511. + | SUPPORTED_100baseT_Half
  512. + | SUPPORTED_100baseT_Full
  513. + | SUPPORTED_Autoneg
  514. + | SUPPORTED_MII
  515. + | SUPPORTED_TP);
  516. - for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
  517. - if (priv->mii_bus->phy_map[phy_addr]) {
  518. - phydev = priv->mii_bus->phy_map[phy_addr];
  519. - break;
  520. - }
  521. - }
  522. + if (of_machine_is_compatible("lantiq,ase"))
  523. + phydev = priv->mii_bus->phy_map[8];
  524. + else
  525. + phydev = priv->mii_bus->phy_map[0];
  526. if (!phydev) {
  527. netdev_err(dev, "no PHY found\n");
  528. @@ -391,21 +582,18 @@ ltq_etop_mdio_probe(struct net_device *d
  529. }
  530. phydev = phy_connect(dev, dev_name(&phydev->dev),
  531. - &ltq_etop_mdio_link, priv->pldata->mii_mode);
  532. + &ltq_etop_mdio_link, priv->mii_mode);
  533. if (IS_ERR(phydev)) {
  534. netdev_err(dev, "Could not attach to PHY\n");
  535. return PTR_ERR(phydev);
  536. }
  537. - phydev->supported &= (SUPPORTED_10baseT_Half
  538. - | SUPPORTED_10baseT_Full
  539. - | SUPPORTED_100baseT_Half
  540. - | SUPPORTED_100baseT_Full
  541. - | SUPPORTED_Autoneg
  542. - | SUPPORTED_MII
  543. - | SUPPORTED_TP);
  544. + if (of_machine_is_compatible("lantiq,ar9"))
  545. + phy_supported |= SUPPORTED_1000baseT_Half
  546. + | SUPPORTED_1000baseT_Full;
  547. + phydev->supported &= phy_supported;
  548. phydev->advertising = phydev->supported;
  549. priv->phydev = phydev;
  550. pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
  551. @@ -430,8 +618,13 @@ ltq_etop_mdio_init(struct net_device *de
  552. }
  553. priv->mii_bus->priv = dev;
  554. - priv->mii_bus->read = ltq_etop_mdio_rd;
  555. - priv->mii_bus->write = ltq_etop_mdio_wr;
  556. + if (of_machine_is_compatible("lantiq,ar9")) {
  557. + priv->mii_bus->read = ltq_etop_mdio_rd_xr9;
  558. + priv->mii_bus->write = ltq_etop_mdio_wr_xr9;
  559. + } else {
  560. + priv->mii_bus->read = ltq_etop_mdio_rd;
  561. + priv->mii_bus->write = ltq_etop_mdio_wr;
  562. + }
  563. priv->mii_bus->name = "ltq_mii";
  564. snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
  565. priv->pdev->name, priv->pdev->id);
  566. @@ -480,17 +673,19 @@ static int
  567. ltq_etop_open(struct net_device *dev)
  568. {
  569. struct ltq_etop_priv *priv = netdev_priv(dev);
  570. - int i;
  571. + unsigned long flags;
  572. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  573. - struct ltq_etop_chan *ch = &priv->ch[i];
  574. + napi_enable(&priv->txch.napi);
  575. + napi_enable(&priv->rxch.napi);
  576. +
  577. + spin_lock_irqsave(&priv->lock, flags);
  578. + ltq_dma_open(&priv->txch.dma);
  579. + ltq_dma_open(&priv->rxch.dma);
  580. + spin_unlock_irqrestore(&priv->lock, flags);
  581. +
  582. + if (priv->phydev)
  583. + phy_start(priv->phydev);
  584. - if (!IS_TX(i) && (!IS_RX(i)))
  585. - continue;
  586. - ltq_dma_open(&ch->dma);
  587. - napi_enable(&ch->napi);
  588. - }
  589. - phy_start(priv->phydev);
  590. netif_tx_start_all_queues(dev);
  591. return 0;
  592. }
  593. @@ -499,18 +694,19 @@ static int
  594. ltq_etop_stop(struct net_device *dev)
  595. {
  596. struct ltq_etop_priv *priv = netdev_priv(dev);
  597. - int i;
  598. + unsigned long flags;
  599. netif_tx_stop_all_queues(dev);
  600. - phy_stop(priv->phydev);
  601. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  602. - struct ltq_etop_chan *ch = &priv->ch[i];
  603. -
  604. - if (!IS_RX(i) && !IS_TX(i))
  605. - continue;
  606. - napi_disable(&ch->napi);
  607. - ltq_dma_close(&ch->dma);
  608. - }
  609. + if (priv->phydev)
  610. + phy_stop(priv->phydev);
  611. + napi_disable(&priv->txch.napi);
  612. + napi_disable(&priv->rxch.napi);
  613. +
  614. + spin_lock_irqsave(&priv->lock, flags);
  615. + ltq_dma_close(&priv->txch.dma);
  616. + ltq_dma_close(&priv->rxch.dma);
  617. + spin_unlock_irqrestore(&priv->lock, flags);
  618. +
  619. return 0;
  620. }
  621. @@ -520,16 +716,16 @@ ltq_etop_tx(struct sk_buff *skb, struct
  622. int queue = skb_get_queue_mapping(skb);
  623. struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
  624. struct ltq_etop_priv *priv = netdev_priv(dev);
  625. - struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
  626. - struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
  627. - int len;
  628. + struct ltq_dma_desc *desc =
  629. + &priv->txch.dma.desc_base[priv->txch.dma.desc];
  630. unsigned long flags;
  631. u32 byte_offset;
  632. + int len;
  633. len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
  634. - if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
  635. - dev_kfree_skb_any(skb);
  636. + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) ||
  637. + priv->txch.skb[priv->txch.dma.desc]) {
  638. netdev_err(dev, "tx ring full\n");
  639. netif_tx_stop_queue(txq);
  640. return NETDEV_TX_BUSY;
  641. @@ -537,7 +733,7 @@ ltq_etop_tx(struct sk_buff *skb, struct
  642. /* dma needs to start on a 16 byte aligned address */
  643. byte_offset = CPHYSADDR(skb->data) % 16;
  644. - ch->skb[ch->dma.desc] = skb;
  645. + priv->txch.skb[priv->txch.dma.desc] = skb;
  646. dev->trans_start = jiffies;
  647. @@ -547,11 +743,11 @@ ltq_etop_tx(struct sk_buff *skb, struct
  648. wmb();
  649. desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
  650. LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
  651. - ch->dma.desc++;
  652. - ch->dma.desc %= LTQ_DESC_NUM;
  653. + priv->txch.dma.desc++;
  654. + priv->txch.dma.desc %= LTQ_DESC_NUM;
  655. spin_unlock_irqrestore(&priv->lock, flags);
  656. - if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
  657. + if (priv->txch.dma.desc_base[priv->txch.dma.desc].ctl & LTQ_DMA_OWN)
  658. netif_tx_stop_queue(txq);
  659. return NETDEV_TX_OK;
  660. @@ -566,8 +762,10 @@ ltq_etop_change_mtu(struct net_device *d
  661. struct ltq_etop_priv *priv = netdev_priv(dev);
  662. unsigned long flags;
  663. + int max = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
  664. +
  665. spin_lock_irqsave(&priv->lock, flags);
  666. - ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu,
  667. + ltq_etop_w32((ETOP_PLEN_UNDER << 16) | max,
  668. LTQ_ETOP_IGPLEN);
  669. spin_unlock_irqrestore(&priv->lock, flags);
  670. }
  671. @@ -638,6 +836,9 @@ ltq_etop_init(struct net_device *dev)
  672. if (err)
  673. goto err_hw;
  674. ltq_etop_change_mtu(dev, 1500);
  675. + err = ltq_etop_dma_init(dev);
  676. + if (err)
  677. + goto err_hw;
  678. memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
  679. if (!is_valid_ether_addr(mac.sa_data)) {
  680. @@ -655,9 +856,10 @@ ltq_etop_init(struct net_device *dev)
  681. dev->addr_assign_type = NET_ADDR_RANDOM;
  682. ltq_etop_set_multicast_list(dev);
  683. - err = ltq_etop_mdio_init(dev);
  684. - if (err)
  685. - goto err_netdev;
  686. + if (!ltq_etop_mdio_init(dev))
  687. + dev->ethtool_ops = &ltq_etop_ethtool_ops;
  688. + else
  689. + pr_warn("etop: mdio probe failed\n");;
  690. return 0;
  691. err_netdev:
  692. @@ -677,6 +879,9 @@ ltq_etop_tx_timeout(struct net_device *d
  693. err = ltq_etop_hw_init(dev);
  694. if (err)
  695. goto err_hw;
  696. + err = ltq_etop_dma_init(dev);
  697. + if (err)
  698. + goto err_hw;
  699. dev->trans_start = jiffies;
  700. netif_wake_queue(dev);
  701. return;
  702. @@ -700,14 +905,19 @@ static const struct net_device_ops ltq_e
  703. .ndo_tx_timeout = ltq_etop_tx_timeout,
  704. };
  705. -static int __init
  706. -ltq_etop_probe(struct platform_device *pdev)
  707. +static int ltq_etop_probe(struct platform_device *pdev)
  708. {
  709. struct net_device *dev;
  710. struct ltq_etop_priv *priv;
  711. - struct resource *res;
  712. + struct resource *res, *gbit_res, irqres[2];
  713. + const u8 *mac;
  714. int err;
  715. - int i;
  716. +
  717. + err = of_irq_to_resource_table(pdev->dev.of_node, irqres, 2);
  718. + if (err != 2) {
  719. + dev_err(&pdev->dev, "failed to get etop irqs\n");
  720. + return -EINVAL;
  721. + }
  722. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  723. if (!res) {
  724. @@ -733,30 +943,61 @@ ltq_etop_probe(struct platform_device *p
  725. goto err_out;
  726. }
  727. - dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
  728. - if (!dev) {
  729. - err = -ENOMEM;
  730. - goto err_out;
  731. + if (of_machine_is_compatible("lantiq,ar9")) {
  732. + gbit_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  733. + if (!gbit_res) {
  734. + dev_err(&pdev->dev, "failed to get gbit resource\n");
  735. + err = -ENOENT;
  736. + goto err_out;
  737. + }
  738. + ltq_gbit_membase = devm_ioremap_nocache(&pdev->dev,
  739. + gbit_res->start, resource_size(gbit_res));
  740. + if (!ltq_gbit_membase) {
  741. + dev_err(&pdev->dev, "failed to remap gigabit switch %d\n",
  742. + pdev->id);
  743. + err = -ENOMEM;
  744. + goto err_out;
  745. + }
  746. }
  747. +
  748. + dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
  749. strcpy(dev->name, "eth%d");
  750. dev->netdev_ops = &ltq_eth_netdev_ops;
  751. - dev->ethtool_ops = &ltq_etop_ethtool_ops;
  752. priv = netdev_priv(dev);
  753. priv->res = res;
  754. priv->pdev = pdev;
  755. - priv->pldata = dev_get_platdata(&pdev->dev);
  756. priv->netdev = dev;
  757. + priv->tx_irq = irqres[0].start;
  758. + priv->rx_irq = irqres[1].start;
  759. + priv->mii_mode = of_get_phy_mode(pdev->dev.of_node);
  760. +
  761. + mac = of_get_mac_address(pdev->dev.of_node);
  762. + if (mac)
  763. + memcpy(priv->mac, mac, ETH_ALEN);
  764. +
  765. + priv->clk_ppe = clk_get(&pdev->dev, NULL);
  766. + if (IS_ERR(priv->clk_ppe))
  767. + return PTR_ERR(priv->clk_ppe);
  768. + if (of_machine_is_compatible("lantiq,ar9")) {
  769. + priv->clk_switch = clk_get(&pdev->dev, "switch");
  770. + if (IS_ERR(priv->clk_switch))
  771. + return PTR_ERR(priv->clk_switch);
  772. + }
  773. + if (of_machine_is_compatible("lantiq,ase")) {
  774. + priv->clk_ephy = clk_get(&pdev->dev, "ephy");
  775. + if (IS_ERR(priv->clk_ephy))
  776. + return PTR_ERR(priv->clk_ephy);
  777. + priv->clk_ephycgu = clk_get(&pdev->dev, "ephycgu");
  778. + if (IS_ERR(priv->clk_ephycgu))
  779. + return PTR_ERR(priv->clk_ephycgu);
  780. + }
  781. +
  782. spin_lock_init(&priv->lock);
  783. - for (i = 0; i < MAX_DMA_CHAN; i++) {
  784. - if (IS_TX(i))
  785. - netif_napi_add(dev, &priv->ch[i].napi,
  786. - ltq_etop_poll_tx, 8);
  787. - else if (IS_RX(i))
  788. - netif_napi_add(dev, &priv->ch[i].napi,
  789. - ltq_etop_poll_rx, 32);
  790. - priv->ch[i].netdev = dev;
  791. - }
  792. + netif_napi_add(dev, &priv->txch.napi, ltq_etop_poll_tx, 8);
  793. + netif_napi_add(dev, &priv->rxch.napi, ltq_etop_poll_rx, 32);
  794. + priv->txch.netdev = dev;
  795. + priv->rxch.netdev = dev;
  796. err = register_netdev(dev);
  797. if (err)
  798. @@ -785,31 +1026,22 @@ ltq_etop_remove(struct platform_device *
  799. return 0;
  800. }
  801. +static const struct of_device_id ltq_etop_match[] = {
  802. + { .compatible = "lantiq,etop-xway" },
  803. + {},
  804. +};
  805. +MODULE_DEVICE_TABLE(of, ltq_etop_match);
  806. +
  807. static struct platform_driver ltq_mii_driver = {
  808. + .probe = ltq_etop_probe,
  809. .remove = ltq_etop_remove,
  810. .driver = {
  811. .name = "ltq_etop",
  812. + .of_match_table = ltq_etop_match,
  813. },
  814. };
  815. -int __init
  816. -init_ltq_etop(void)
  817. -{
  818. - int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
  819. -
  820. - if (ret)
  821. - pr_err("ltq_etop: Error registering platform driver!");
  822. - return ret;
  823. -}
  824. -
  825. -static void __exit
  826. -exit_ltq_etop(void)
  827. -{
  828. - platform_driver_unregister(&ltq_mii_driver);
  829. -}
  830. -
  831. -module_init(init_ltq_etop);
  832. -module_exit(exit_ltq_etop);
  833. +module_platform_driver(ltq_mii_driver);
  834. MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
  835. MODULE_DESCRIPTION("Lantiq SoC ETOP");