adm5120sw.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219
  1. /*
  2. * ADM5120 built-in ethernet switch driver
  3. *
  4. * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
  5. *
  6. * This code was based on a driver for Linux 2.6.xx by Jeroen Vreeken.
  7. * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
  8. * NAPI extension for the Jeroen's driver
  9. * Copyright Thomas Langer (Thomas.Langer@infineon.com), 2007
  10. * Copyright Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
  11. * Inspiration for the Jeroen's driver came from the ADMtek 2.4 driver.
  12. * Copyright ADMtek Inc.
  13. *
  14. * This program is free software; you can redistribute it and/or modify it
  15. * under the terms of the GNU General Public License version 2 as published
  16. * by the Free Software Foundation.
  17. *
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/errno.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/ioport.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/io.h>
  27. #include <linux/irq.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <asm/mipsregs.h>
  32. #include <asm/mach-adm5120/adm5120_info.h>
  33. #include <asm/mach-adm5120/adm5120_defs.h>
  34. #include <asm/mach-adm5120/adm5120_switch.h>
  35. #include "adm5120sw.h"
  36. #include <linux/dma-mapping.h>
  37. #define DRV_NAME "adm5120-switch"
  38. #define DRV_DESC "ADM5120 built-in ethernet switch driver"
  39. #define DRV_VERSION "0.1.1"
  40. #define CONFIG_ADM5120_SWITCH_NAPI 1
  41. #undef CONFIG_ADM5120_SWITCH_DEBUG
  42. /* ------------------------------------------------------------------------ */
  43. #ifdef CONFIG_ADM5120_SWITCH_DEBUG
  44. #define SW_DBG(f, a...) printk(KERN_DEBUG "%s: " f, DRV_NAME , ## a)
  45. #else
  46. #define SW_DBG(f, a...) do {} while (0)
  47. #endif
  48. #define SW_ERR(f, a...) printk(KERN_ERR "%s: " f, DRV_NAME , ## a)
  49. #define SW_INFO(f, a...) printk(KERN_INFO "%s: " f, DRV_NAME , ## a)
  50. #define SWITCH_NUM_PORTS 6
  51. #define ETH_CSUM_LEN 4
  52. #define RX_MAX_PKTLEN 1550
  53. #define RX_RING_SIZE 64
  54. #define TX_RING_SIZE 32
  55. #define TX_QUEUE_LEN 28 /* Limit ring entries actually used. */
  56. #define TX_TIMEOUT (HZ * 400)
  57. #define RX_DESCS_SIZE (RX_RING_SIZE * sizeof(struct dma_desc *))
  58. #define RX_SKBS_SIZE (RX_RING_SIZE * sizeof(struct sk_buff *))
  59. #define TX_DESCS_SIZE (TX_RING_SIZE * sizeof(struct dma_desc *))
  60. #define TX_SKBS_SIZE (TX_RING_SIZE * sizeof(struct sk_buff *))
  61. #define SKB_ALLOC_LEN (RX_MAX_PKTLEN + 32)
  62. #define SKB_RESERVE_LEN (NET_IP_ALIGN + NET_SKB_PAD)
  63. #define SWITCH_INTS_HIGH (SWITCH_INT_SHD | SWITCH_INT_RHD | SWITCH_INT_HDF)
  64. #define SWITCH_INTS_LOW (SWITCH_INT_SLD | SWITCH_INT_RLD | SWITCH_INT_LDF)
  65. #define SWITCH_INTS_ERR (SWITCH_INT_RDE | SWITCH_INT_SDE | SWITCH_INT_CPUH)
  66. #define SWITCH_INTS_Q (SWITCH_INT_P0QF | SWITCH_INT_P1QF | SWITCH_INT_P2QF | \
  67. SWITCH_INT_P3QF | SWITCH_INT_P4QF | SWITCH_INT_P5QF | \
  68. SWITCH_INT_CPQF | SWITCH_INT_GQF)
  69. #define SWITCH_INTS_ALL (SWITCH_INTS_HIGH | SWITCH_INTS_LOW | \
  70. SWITCH_INTS_ERR | SWITCH_INTS_Q | \
  71. SWITCH_INT_MD | SWITCH_INT_PSC)
  72. #define SWITCH_INTS_USED (SWITCH_INTS_LOW | SWITCH_INT_PSC)
  73. #define SWITCH_INTS_POLL (SWITCH_INT_RLD | SWITCH_INT_LDF | SWITCH_INT_SLD)
  74. /* ------------------------------------------------------------------------ */
  75. struct adm5120_if_priv {
  76. struct net_device *dev;
  77. unsigned int vlan_no;
  78. unsigned int port_mask;
  79. #ifdef CONFIG_ADM5120_SWITCH_NAPI
  80. struct napi_struct napi;
  81. #endif
  82. };
  83. struct dma_desc {
  84. __u32 buf1;
  85. #define DESC_OWN (1UL << 31) /* Owned by the switch */
  86. #define DESC_EOR (1UL << 28) /* End of Ring */
  87. #define DESC_ADDR_MASK 0x1FFFFFF
  88. #define DESC_ADDR(x) ((__u32)(x) & DESC_ADDR_MASK)
  89. __u32 buf2;
  90. #define DESC_BUF2_EN (1UL << 31) /* Buffer 2 enable */
  91. __u32 buflen;
  92. __u32 misc;
  93. /* definitions for tx/rx descriptors */
  94. #define DESC_PKTLEN_SHIFT 16
  95. #define DESC_PKTLEN_MASK 0x7FF
  96. /* tx descriptor specific part */
  97. #define DESC_CSUM (1UL << 31) /* Append checksum */
  98. #define DESC_DSTPORT_SHIFT 8
  99. #define DESC_DSTPORT_MASK 0x3F
  100. #define DESC_VLAN_MASK 0x3F
  101. /* rx descriptor specific part */
  102. #define DESC_SRCPORT_SHIFT 12
  103. #define DESC_SRCPORT_MASK 0x7
  104. #define DESC_DA_MASK 0x3
  105. #define DESC_DA_SHIFT 4
  106. #define DESC_IPCSUM_FAIL (1UL << 3) /* IP checksum fail */
  107. #define DESC_VLAN_TAG (1UL << 2) /* VLAN tag present */
  108. #define DESC_TYPE_MASK 0x3 /* mask for Packet type */
  109. #define DESC_TYPE_IP 0x0 /* IP packet */
  110. #define DESC_TYPE_PPPoE 0x1 /* PPPoE packet */
  111. } __attribute__ ((aligned(16)));
  112. /* ------------------------------------------------------------------------ */
  113. static int adm5120_nrdevs;
  114. static struct net_device *adm5120_devs[SWITCH_NUM_PORTS];
  115. /* Lookup table port -> device */
  116. static struct net_device *adm5120_port[SWITCH_NUM_PORTS];
  117. static struct dma_desc *txl_descs;
  118. static struct dma_desc *rxl_descs;
  119. static dma_addr_t txl_descs_dma;
  120. static dma_addr_t rxl_descs_dma;
  121. static struct sk_buff **txl_skbuff;
  122. static struct sk_buff **rxl_skbuff;
  123. static unsigned int cur_rxl, dirty_rxl; /* producer/consumer ring indices */
  124. static unsigned int cur_txl, dirty_txl;
  125. static unsigned int sw_used;
  126. static DEFINE_SPINLOCK(tx_lock);
  127. /* ------------------------------------------------------------------------ */
  128. static inline u32 sw_read_reg(u32 reg)
  129. {
  130. return __raw_readl((void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
  131. }
  132. static inline void sw_write_reg(u32 reg, u32 val)
  133. {
  134. __raw_writel(val, (void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
  135. }
  136. static inline void sw_int_mask(u32 mask)
  137. {
  138. u32 t;
  139. t = sw_read_reg(SWITCH_REG_INT_MASK);
  140. t |= mask;
  141. sw_write_reg(SWITCH_REG_INT_MASK, t);
  142. }
  143. static inline void sw_int_unmask(u32 mask)
  144. {
  145. u32 t;
  146. t = sw_read_reg(SWITCH_REG_INT_MASK);
  147. t &= ~mask;
  148. sw_write_reg(SWITCH_REG_INT_MASK, t);
  149. }
  150. static inline void sw_int_ack(u32 mask)
  151. {
  152. sw_write_reg(SWITCH_REG_INT_STATUS, mask);
  153. }
  154. static inline u32 sw_int_status(void)
  155. {
  156. u32 t;
  157. t = sw_read_reg(SWITCH_REG_INT_STATUS);
  158. t &= ~sw_read_reg(SWITCH_REG_INT_MASK);
  159. return t;
  160. }
  161. static inline u32 desc_get_srcport(struct dma_desc *desc)
  162. {
  163. return (desc->misc >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK;
  164. }
  165. static inline u32 desc_get_pktlen(struct dma_desc *desc)
  166. {
  167. return (desc->misc >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK;
  168. }
  169. static inline int desc_ipcsum_fail(struct dma_desc *desc)
  170. {
  171. return ((desc->misc & DESC_IPCSUM_FAIL) != 0);
  172. }
  173. /* ------------------------------------------------------------------------ */
  174. #ifdef CONFIG_ADM5120_SWITCH_DEBUG
  175. static void sw_dump_desc(char *label, struct dma_desc *desc, int tx)
  176. {
  177. u32 t;
  178. SW_DBG("%s %s desc/%p\n", label, tx ? "tx" : "rx", desc);
  179. t = desc->buf1;
  180. SW_DBG(" buf1 %08X addr=%08X; len=%08X %s%s\n", t,
  181. t & DESC_ADDR_MASK,
  182. desc->buflen,
  183. (t & DESC_OWN) ? "SWITCH" : "CPU",
  184. (t & DESC_EOR) ? " RE" : "");
  185. t = desc->buf2;
  186. SW_DBG(" buf2 %08X addr=%08X%s\n", desc->buf2,
  187. t & DESC_ADDR_MASK,
  188. (t & DESC_BUF2_EN) ? " EN" : "");
  189. t = desc->misc;
  190. if (tx)
  191. SW_DBG(" misc %08X%s pktlen=%04X ports=%02X vlan=%02X\n", t,
  192. (t & DESC_CSUM) ? " CSUM" : "",
  193. (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
  194. (t >> DESC_DSTPORT_SHIFT) & DESC_DSTPORT_MASK,
  195. t & DESC_VLAN_MASK);
  196. else
  197. SW_DBG(" misc %08X pktlen=%04X port=%d DA=%d%s%s type=%d\n",
  198. t,
  199. (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
  200. (t >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK,
  201. (t >> DESC_DA_SHIFT) & DESC_DA_MASK,
  202. (t & DESC_IPCSUM_FAIL) ? " IPCF" : "",
  203. (t & DESC_VLAN_TAG) ? " VLAN" : "",
  204. (t & DESC_TYPE_MASK));
  205. }
  206. static void sw_dump_intr_mask(char *label, u32 mask)
  207. {
  208. SW_DBG("%s %08X%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
  209. label, mask,
  210. (mask & SWITCH_INT_SHD) ? " SHD" : "",
  211. (mask & SWITCH_INT_SLD) ? " SLD" : "",
  212. (mask & SWITCH_INT_RHD) ? " RHD" : "",
  213. (mask & SWITCH_INT_RLD) ? " RLD" : "",
  214. (mask & SWITCH_INT_HDF) ? " HDF" : "",
  215. (mask & SWITCH_INT_LDF) ? " LDF" : "",
  216. (mask & SWITCH_INT_P0QF) ? " P0QF" : "",
  217. (mask & SWITCH_INT_P1QF) ? " P1QF" : "",
  218. (mask & SWITCH_INT_P2QF) ? " P2QF" : "",
  219. (mask & SWITCH_INT_P3QF) ? " P3QF" : "",
  220. (mask & SWITCH_INT_P4QF) ? " P4QF" : "",
  221. (mask & SWITCH_INT_CPQF) ? " CPQF" : "",
  222. (mask & SWITCH_INT_GQF) ? " GQF" : "",
  223. (mask & SWITCH_INT_MD) ? " MD" : "",
  224. (mask & SWITCH_INT_BCS) ? " BCS" : "",
  225. (mask & SWITCH_INT_PSC) ? " PSC" : "",
  226. (mask & SWITCH_INT_ID) ? " ID" : "",
  227. (mask & SWITCH_INT_W0TE) ? " W0TE" : "",
  228. (mask & SWITCH_INT_W1TE) ? " W1TE" : "",
  229. (mask & SWITCH_INT_RDE) ? " RDE" : "",
  230. (mask & SWITCH_INT_SDE) ? " SDE" : "",
  231. (mask & SWITCH_INT_CPUH) ? " CPUH" : "");
  232. }
  233. static void sw_dump_regs(void)
  234. {
  235. u32 t;
  236. t = sw_read_reg(SWITCH_REG_PHY_STATUS);
  237. SW_DBG("phy_status: %08X\n", t);
  238. t = sw_read_reg(SWITCH_REG_CPUP_CONF);
  239. SW_DBG("cpup_conf: %08X%s%s%s\n", t,
  240. (t & CPUP_CONF_DCPUP) ? " DCPUP" : "",
  241. (t & CPUP_CONF_CRCP) ? " CRCP" : "",
  242. (t & CPUP_CONF_BTM) ? " BTM" : "");
  243. t = sw_read_reg(SWITCH_REG_PORT_CONF0);
  244. SW_DBG("port_conf0: %08X\n", t);
  245. t = sw_read_reg(SWITCH_REG_PORT_CONF1);
  246. SW_DBG("port_conf1: %08X\n", t);
  247. t = sw_read_reg(SWITCH_REG_PORT_CONF2);
  248. SW_DBG("port_conf2: %08X\n", t);
  249. t = sw_read_reg(SWITCH_REG_VLAN_G1);
  250. SW_DBG("vlan g1: %08X\n", t);
  251. t = sw_read_reg(SWITCH_REG_VLAN_G2);
  252. SW_DBG("vlan g2: %08X\n", t);
  253. t = sw_read_reg(SWITCH_REG_BW_CNTL0);
  254. SW_DBG("bw_cntl0: %08X\n", t);
  255. t = sw_read_reg(SWITCH_REG_BW_CNTL1);
  256. SW_DBG("bw_cntl1: %08X\n", t);
  257. t = sw_read_reg(SWITCH_REG_PHY_CNTL0);
  258. SW_DBG("phy_cntl0: %08X\n", t);
  259. t = sw_read_reg(SWITCH_REG_PHY_CNTL1);
  260. SW_DBG("phy_cntl1: %08X\n", t);
  261. t = sw_read_reg(SWITCH_REG_PHY_CNTL2);
  262. SW_DBG("phy_cntl2: %08X\n", t);
  263. t = sw_read_reg(SWITCH_REG_PHY_CNTL3);
  264. SW_DBG("phy_cntl3: %08X\n", t);
  265. t = sw_read_reg(SWITCH_REG_PHY_CNTL4);
  266. SW_DBG("phy_cntl4: %08X\n", t);
  267. t = sw_read_reg(SWITCH_REG_INT_STATUS);
  268. sw_dump_intr_mask("int_status: ", t);
  269. t = sw_read_reg(SWITCH_REG_INT_MASK);
  270. sw_dump_intr_mask("int_mask: ", t);
  271. t = sw_read_reg(SWITCH_REG_SHDA);
  272. SW_DBG("shda: %08X\n", t);
  273. t = sw_read_reg(SWITCH_REG_SLDA);
  274. SW_DBG("slda: %08X\n", t);
  275. t = sw_read_reg(SWITCH_REG_RHDA);
  276. SW_DBG("rhda: %08X\n", t);
  277. t = sw_read_reg(SWITCH_REG_RLDA);
  278. SW_DBG("rlda: %08X\n", t);
  279. }
  280. #else
  281. static inline void sw_dump_desc(char *label, struct dma_desc *desc, int tx) {}
  282. static void sw_dump_intr_mask(char *label, u32 mask) {}
  283. static inline void sw_dump_regs(void) {}
  284. #endif /* CONFIG_ADM5120_SWITCH_DEBUG */
  285. /* ------------------------------------------------------------------------ */
  286. static inline void adm5120_rx_dma_update(struct dma_desc *desc,
  287. struct sk_buff *skb, int end)
  288. {
  289. desc->misc = 0;
  290. desc->buf2 = 0;
  291. desc->buflen = RX_MAX_PKTLEN;
  292. desc->buf1 = DESC_ADDR(skb->data) |
  293. DESC_OWN | (end ? DESC_EOR : 0);
  294. }
  295. static void adm5120_switch_rx_refill(void)
  296. {
  297. unsigned int entry;
  298. for (; cur_rxl - dirty_rxl > 0; dirty_rxl++) {
  299. struct dma_desc *desc;
  300. struct sk_buff *skb;
  301. entry = dirty_rxl % RX_RING_SIZE;
  302. desc = &rxl_descs[entry];
  303. skb = rxl_skbuff[entry];
  304. if (skb == NULL) {
  305. skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
  306. if (skb) {
  307. skb_reserve(skb, SKB_RESERVE_LEN);
  308. rxl_skbuff[entry] = skb;
  309. } else {
  310. SW_ERR("no memory for skb\n");
  311. desc->buflen = 0;
  312. desc->buf2 = 0;
  313. desc->misc = 0;
  314. desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN;
  315. break;
  316. }
  317. }
  318. desc->buf2 = 0;
  319. desc->buflen = RX_MAX_PKTLEN;
  320. desc->misc = 0;
  321. desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN |
  322. DESC_ADDR(skb->data);
  323. }
  324. }
  325. static int adm5120_switch_rx(int limit)
  326. {
  327. unsigned int done = 0;
  328. SW_DBG("rx start, limit=%d, cur_rxl=%u, dirty_rxl=%u\n",
  329. limit, cur_rxl, dirty_rxl);
  330. while (done < limit) {
  331. int entry = cur_rxl % RX_RING_SIZE;
  332. struct dma_desc *desc = &rxl_descs[entry];
  333. struct net_device *rdev;
  334. unsigned int port;
  335. if (desc->buf1 & DESC_OWN)
  336. break;
  337. if (dirty_rxl + RX_RING_SIZE == cur_rxl)
  338. break;
  339. port = desc_get_srcport(desc);
  340. rdev = adm5120_port[port];
  341. SW_DBG("rx descriptor %u, desc=%p, skb=%p\n", entry, desc,
  342. rxl_skbuff[entry]);
  343. if ((rdev) && netif_running(rdev)) {
  344. struct sk_buff *skb = rxl_skbuff[entry];
  345. int pktlen;
  346. pktlen = desc_get_pktlen(desc);
  347. pktlen -= ETH_CSUM_LEN;
  348. if ((pktlen == 0) || desc_ipcsum_fail(desc)) {
  349. rdev->stats.rx_errors++;
  350. if (pktlen == 0)
  351. rdev->stats.rx_length_errors++;
  352. if (desc_ipcsum_fail(desc))
  353. rdev->stats.rx_crc_errors++;
  354. SW_DBG("rx error, recycling skb %u\n", entry);
  355. } else {
  356. skb_put(skb, pktlen);
  357. skb->dev = rdev;
  358. skb->protocol = eth_type_trans(skb, rdev);
  359. skb->ip_summed = CHECKSUM_UNNECESSARY;
  360. dma_cache_wback_inv((unsigned long)skb->data,
  361. skb->len);
  362. #ifdef CONFIG_ADM5120_SWITCH_NAPI
  363. netif_receive_skb(skb);
  364. #else
  365. netif_rx(skb);
  366. #endif
  367. rdev->last_rx = jiffies;
  368. rdev->stats.rx_packets++;
  369. rdev->stats.rx_bytes += pktlen;
  370. rxl_skbuff[entry] = NULL;
  371. done++;
  372. }
  373. } else {
  374. SW_DBG("no rx device, recycling skb %u\n", entry);
  375. }
  376. cur_rxl++;
  377. if (cur_rxl - dirty_rxl > RX_RING_SIZE / 4)
  378. adm5120_switch_rx_refill();
  379. }
  380. adm5120_switch_rx_refill();
  381. SW_DBG("rx finished, cur_rxl=%u, dirty_rxl=%u, processed %d\n",
  382. cur_rxl, dirty_rxl, done);
  383. return done;
  384. }
  385. static void adm5120_switch_tx(void)
  386. {
  387. unsigned int entry;
  388. spin_lock(&tx_lock);
  389. entry = dirty_txl % TX_RING_SIZE;
  390. while (dirty_txl != cur_txl) {
  391. struct dma_desc *desc = &txl_descs[entry];
  392. struct sk_buff *skb = txl_skbuff[entry];
  393. if (desc->buf1 & DESC_OWN)
  394. break;
  395. if (netif_running(skb->dev)) {
  396. skb->dev->stats.tx_bytes += skb->len;
  397. skb->dev->stats.tx_packets++;
  398. }
  399. dev_kfree_skb_irq(skb);
  400. txl_skbuff[entry] = NULL;
  401. entry = (++dirty_txl) % TX_RING_SIZE;
  402. }
  403. if ((cur_txl - dirty_txl) < TX_QUEUE_LEN - 4) {
  404. int i;
  405. for (i = 0; i < SWITCH_NUM_PORTS; i++) {
  406. if (!adm5120_devs[i])
  407. continue;
  408. netif_wake_queue(adm5120_devs[i]);
  409. }
  410. }
  411. spin_unlock(&tx_lock);
  412. }
  413. #ifdef CONFIG_ADM5120_SWITCH_NAPI
  414. static int adm5120_if_poll(struct napi_struct *napi, int limit)
  415. {
  416. struct adm5120_if_priv *priv = container_of(napi,
  417. struct adm5120_if_priv, napi);
  418. struct net_device *dev __maybe_unused = priv->dev;
  419. int done;
  420. u32 status;
  421. sw_int_ack(SWITCH_INTS_POLL);
  422. SW_DBG("%s: processing TX ring\n", dev->name);
  423. adm5120_switch_tx();
  424. SW_DBG("%s: processing RX ring\n", dev->name);
  425. done = adm5120_switch_rx(limit);
  426. status = sw_int_status() & SWITCH_INTS_POLL;
  427. if ((done < limit) && (!status)) {
  428. SW_DBG("disable polling mode for %s\n", dev->name);
  429. napi_complete(napi);
  430. sw_int_unmask(SWITCH_INTS_POLL);
  431. return 0;
  432. }
  433. SW_DBG("%s still in polling mode, done=%d, status=%x\n",
  434. dev->name, done, status);
  435. return 1;
  436. }
  437. #endif /* CONFIG_ADM5120_SWITCH_NAPI */
  438. static irqreturn_t adm5120_switch_irq(int irq, void *dev_id)
  439. {
  440. u32 status;
  441. status = sw_int_status();
  442. status &= SWITCH_INTS_ALL;
  443. if (!status)
  444. return IRQ_NONE;
  445. #ifdef CONFIG_ADM5120_SWITCH_NAPI
  446. sw_int_ack(status & ~SWITCH_INTS_POLL);
  447. if (status & SWITCH_INTS_POLL) {
  448. struct net_device *dev = dev_id;
  449. struct adm5120_if_priv *priv = netdev_priv(dev);
  450. sw_dump_intr_mask("poll ints", status);
  451. SW_DBG("enable polling mode for %s\n", dev->name);
  452. sw_int_mask(SWITCH_INTS_POLL);
  453. napi_schedule(&priv->napi);
  454. }
  455. #else
  456. sw_int_ack(status);
  457. if (status & (SWITCH_INT_RLD | SWITCH_INT_LDF))
  458. adm5120_switch_rx(RX_RING_SIZE);
  459. if (status & SWITCH_INT_SLD)
  460. adm5120_switch_tx();
  461. #endif
  462. return IRQ_HANDLED;
  463. }
  464. static void adm5120_set_bw(char *matrix)
  465. {
  466. unsigned long val;
  467. /* Port 0 to 3 are set using the bandwidth control 0 register */
  468. val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
  469. sw_write_reg(SWITCH_REG_BW_CNTL0, val);
  470. /* Port 4 and 5 are set using the bandwidth control 1 register */
  471. val = matrix[4];
  472. if (matrix[5] == 1)
  473. sw_write_reg(SWITCH_REG_BW_CNTL1, val | 0x80000000);
  474. else
  475. sw_write_reg(SWITCH_REG_BW_CNTL1, val & ~0x8000000);
  476. SW_DBG("D: ctl0 0x%ux, ctl1 0x%ux\n", sw_read_reg(SWITCH_REG_BW_CNTL0),
  477. sw_read_reg(SWITCH_REG_BW_CNTL1));
  478. }
  479. static void adm5120_switch_tx_ring_reset(struct dma_desc *desc,
  480. struct sk_buff **skbl, int num)
  481. {
  482. memset(desc, 0, num * sizeof(*desc));
  483. desc[num-1].buf1 |= DESC_EOR;
  484. memset(skbl, 0, sizeof(struct skb *) * num);
  485. cur_txl = 0;
  486. dirty_txl = 0;
  487. }
  488. static void adm5120_switch_rx_ring_reset(struct dma_desc *desc,
  489. struct sk_buff **skbl, int num)
  490. {
  491. int i;
  492. memset(desc, 0, num * sizeof(*desc));
  493. for (i = 0; i < num; i++) {
  494. skbl[i] = dev_alloc_skb(SKB_ALLOC_LEN);
  495. if (!skbl[i]) {
  496. i = num;
  497. break;
  498. }
  499. skb_reserve(skbl[i], SKB_RESERVE_LEN);
  500. adm5120_rx_dma_update(&desc[i], skbl[i], (num - 1 == i));
  501. }
  502. cur_rxl = 0;
  503. dirty_rxl = 0;
  504. }
  505. static int adm5120_switch_tx_ring_alloc(void)
  506. {
  507. int err;
  508. txl_descs = dma_alloc_coherent(NULL, TX_DESCS_SIZE, &txl_descs_dma,
  509. GFP_ATOMIC);
  510. if (!txl_descs) {
  511. err = -ENOMEM;
  512. goto err;
  513. }
  514. txl_skbuff = kzalloc(TX_SKBS_SIZE, GFP_KERNEL);
  515. if (!txl_skbuff) {
  516. err = -ENOMEM;
  517. goto err;
  518. }
  519. return 0;
  520. err:
  521. return err;
  522. }
  523. static void adm5120_switch_tx_ring_free(void)
  524. {
  525. int i;
  526. if (txl_skbuff) {
  527. for (i = 0; i < TX_RING_SIZE; i++)
  528. if (txl_skbuff[i])
  529. kfree_skb(txl_skbuff[i]);
  530. kfree(txl_skbuff);
  531. }
  532. if (txl_descs)
  533. dma_free_coherent(NULL, TX_DESCS_SIZE, txl_descs,
  534. txl_descs_dma);
  535. }
  536. static int adm5120_switch_rx_ring_alloc(void)
  537. {
  538. int err;
  539. int i;
  540. /* init RX ring */
  541. rxl_descs = dma_alloc_coherent(NULL, RX_DESCS_SIZE, &rxl_descs_dma,
  542. GFP_ATOMIC);
  543. if (!rxl_descs) {
  544. err = -ENOMEM;
  545. goto err;
  546. }
  547. rxl_skbuff = kzalloc(RX_SKBS_SIZE, GFP_KERNEL);
  548. if (!rxl_skbuff) {
  549. err = -ENOMEM;
  550. goto err;
  551. }
  552. for (i = 0; i < RX_RING_SIZE; i++) {
  553. struct sk_buff *skb;
  554. skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
  555. if (!skb) {
  556. err = -ENOMEM;
  557. goto err;
  558. }
  559. rxl_skbuff[i] = skb;
  560. skb_reserve(skb, SKB_RESERVE_LEN);
  561. }
  562. return 0;
  563. err:
  564. return err;
  565. }
  566. static void adm5120_switch_rx_ring_free(void)
  567. {
  568. int i;
  569. if (rxl_skbuff) {
  570. for (i = 0; i < RX_RING_SIZE; i++)
  571. if (rxl_skbuff[i])
  572. kfree_skb(rxl_skbuff[i]);
  573. kfree(rxl_skbuff);
  574. }
  575. if (rxl_descs)
  576. dma_free_coherent(NULL, RX_DESCS_SIZE, rxl_descs,
  577. rxl_descs_dma);
  578. }
  579. static void adm5120_write_mac(struct net_device *dev)
  580. {
  581. struct adm5120_if_priv *priv = netdev_priv(dev);
  582. unsigned char *mac = dev->dev_addr;
  583. u32 t;
  584. t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT) |
  585. (mac[4] << MAC_WT1_MAC4_SHIFT) | (mac[5] << MAC_WT1_MAC5_SHIFT);
  586. sw_write_reg(SWITCH_REG_MAC_WT1, t);
  587. t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
  588. MAC_WT0_MAWC | MAC_WT0_WVE | (priv->vlan_no<<3);
  589. sw_write_reg(SWITCH_REG_MAC_WT0, t);
  590. while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD))
  591. ;
  592. }
  593. static void adm5120_set_vlan(char *matrix)
  594. {
  595. unsigned long val;
  596. int vlan_port, port;
  597. val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
  598. sw_write_reg(SWITCH_REG_VLAN_G1, val);
  599. val = matrix[4] + (matrix[5]<<8);
  600. sw_write_reg(SWITCH_REG_VLAN_G2, val);
  601. /* Now set/update the port vs. device lookup table */
  602. for (port = 0; port < SWITCH_NUM_PORTS; port++) {
  603. for (vlan_port = 0; vlan_port < SWITCH_NUM_PORTS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++)
  604. ;
  605. if (vlan_port < SWITCH_NUM_PORTS)
  606. adm5120_port[port] = adm5120_devs[vlan_port];
  607. else
  608. adm5120_port[port] = NULL;
  609. }
  610. }
  611. static void adm5120_switch_set_vlan_mac(unsigned int vlan, unsigned char *mac)
  612. {
  613. u32 t;
  614. t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT)
  615. | (mac[4] << MAC_WT1_MAC4_SHIFT)
  616. | (mac[5] << MAC_WT1_MAC5_SHIFT);
  617. sw_write_reg(SWITCH_REG_MAC_WT1, t);
  618. t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
  619. MAC_WT0_MAWC | MAC_WT0_WVE | (vlan << MAC_WT0_WVN_SHIFT) |
  620. (MAC_WT0_WAF_STATIC << MAC_WT0_WAF_SHIFT);
  621. sw_write_reg(SWITCH_REG_MAC_WT0, t);
  622. do {
  623. t = sw_read_reg(SWITCH_REG_MAC_WT0);
  624. } while ((t & MAC_WT0_MWD) == 0);
  625. }
  626. static void adm5120_switch_set_vlan_ports(unsigned int vlan, u32 ports)
  627. {
  628. unsigned int reg;
  629. u32 t;
  630. if (vlan < 4)
  631. reg = SWITCH_REG_VLAN_G1;
  632. else {
  633. vlan -= 4;
  634. reg = SWITCH_REG_VLAN_G2;
  635. }
  636. t = sw_read_reg(reg);
  637. t &= ~(0xFF << (vlan*8));
  638. t |= (ports << (vlan*8));
  639. sw_write_reg(reg, t);
  640. }
  641. /* ------------------------------------------------------------------------ */
  642. #ifdef CONFIG_ADM5120_SWITCH_NAPI
  643. static inline void adm5120_if_napi_enable(struct net_device *dev)
  644. {
  645. struct adm5120_if_priv *priv = netdev_priv(dev);
  646. napi_enable(&priv->napi);
  647. }
  648. static inline void adm5120_if_napi_disable(struct net_device *dev)
  649. {
  650. struct adm5120_if_priv *priv = netdev_priv(dev);
  651. napi_disable(&priv->napi);
  652. }
  653. #else
  654. static inline void adm5120_if_napi_enable(struct net_device *dev) {}
  655. static inline void adm5120_if_napi_disable(struct net_device *dev) {}
  656. #endif /* CONFIG_ADM5120_SWITCH_NAPI */
  657. static int adm5120_if_open(struct net_device *dev)
  658. {
  659. u32 t;
  660. int err;
  661. int i;
  662. adm5120_if_napi_enable(dev);
  663. err = request_irq(dev->irq, adm5120_switch_irq, IRQF_SHARED,
  664. dev->name, dev);
  665. if (err) {
  666. SW_ERR("unable to get irq for %s\n", dev->name);
  667. goto err;
  668. }
  669. if (!sw_used++)
  670. /* enable interrupts on first open */
  671. sw_int_unmask(SWITCH_INTS_USED);
  672. /* enable (additional) port */
  673. t = sw_read_reg(SWITCH_REG_PORT_CONF0);
  674. for (i = 0; i < SWITCH_NUM_PORTS; i++) {
  675. if (dev == adm5120_devs[i])
  676. t &= ~adm5120_eth_vlans[i];
  677. }
  678. sw_write_reg(SWITCH_REG_PORT_CONF0, t);
  679. netif_start_queue(dev);
  680. return 0;
  681. err:
  682. adm5120_if_napi_disable(dev);
  683. return err;
  684. }
  685. static int adm5120_if_stop(struct net_device *dev)
  686. {
  687. u32 t;
  688. int i;
  689. netif_stop_queue(dev);
  690. adm5120_if_napi_disable(dev);
  691. /* disable port if not assigned to other devices */
  692. t = sw_read_reg(SWITCH_REG_PORT_CONF0);
  693. t |= SWITCH_PORTS_NOCPU;
  694. for (i = 0; i < SWITCH_NUM_PORTS; i++) {
  695. if ((dev != adm5120_devs[i]) && netif_running(adm5120_devs[i]))
  696. t &= ~adm5120_eth_vlans[i];
  697. }
  698. sw_write_reg(SWITCH_REG_PORT_CONF0, t);
  699. if (!--sw_used)
  700. sw_int_mask(SWITCH_INTS_USED);
  701. free_irq(dev->irq, dev);
  702. return 0;
  703. }
  704. static int adm5120_if_hard_start_xmit(struct sk_buff *skb,
  705. struct net_device *dev)
  706. {
  707. struct dma_desc *desc;
  708. struct adm5120_if_priv *priv = netdev_priv(dev);
  709. unsigned int entry;
  710. unsigned long data;
  711. int i;
  712. /* lock switch irq */
  713. spin_lock_irq(&tx_lock);
  714. /* calculate the next TX descriptor entry. */
  715. entry = cur_txl % TX_RING_SIZE;
  716. desc = &txl_descs[entry];
  717. if (desc->buf1 & DESC_OWN) {
  718. /* We want to write a packet but the TX queue is still
  719. * occupied by the DMA. We are faster than the DMA... */
  720. SW_DBG("%s unable to transmit, packet dopped\n", dev->name);
  721. dev_kfree_skb(skb);
  722. dev->stats.tx_dropped++;
  723. return 0;
  724. }
  725. txl_skbuff[entry] = skb;
  726. data = (desc->buf1 & DESC_EOR);
  727. data |= DESC_ADDR(skb->data);
  728. desc->misc =
  729. ((skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len) << DESC_PKTLEN_SHIFT) |
  730. (0x1 << priv->vlan_no);
  731. desc->buflen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
  732. desc->buf1 = data | DESC_OWN;
  733. sw_write_reg(SWITCH_REG_SEND_TRIG, SEND_TRIG_STL);
  734. cur_txl++;
  735. if (cur_txl == dirty_txl + TX_QUEUE_LEN) {
  736. for (i = 0; i < SWITCH_NUM_PORTS; i++) {
  737. if (!adm5120_devs[i])
  738. continue;
  739. netif_stop_queue(adm5120_devs[i]);
  740. }
  741. }
  742. dev->trans_start = jiffies;
  743. spin_unlock_irq(&tx_lock);
  744. return 0;
  745. }
  746. static void adm5120_if_tx_timeout(struct net_device *dev)
  747. {
  748. SW_INFO("TX timeout on %s\n", dev->name);
  749. }
  750. static void adm5120_if_set_rx_mode(struct net_device *dev)
  751. {
  752. struct adm5120_if_priv *priv = netdev_priv(dev);
  753. u32 ports;
  754. u32 t;
  755. ports = adm5120_eth_vlans[priv->vlan_no] & SWITCH_PORTS_NOCPU;
  756. t = sw_read_reg(SWITCH_REG_CPUP_CONF);
  757. if (dev->flags & IFF_PROMISC)
  758. /* enable unknown packets */
  759. t &= ~(ports << CPUP_CONF_DUNP_SHIFT);
  760. else
  761. /* disable unknown packets */
  762. t |= (ports << CPUP_CONF_DUNP_SHIFT);
  763. if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
  764. netdev_mc_count(dev))
  765. /* enable multicast packets */
  766. t &= ~(ports << CPUP_CONF_DMCP_SHIFT);
  767. else
  768. /* disable multicast packets */
  769. t |= (ports << CPUP_CONF_DMCP_SHIFT);
  770. /* If there is any port configured to be in promiscuous mode, then the */
  771. /* Bridge Test Mode has to be activated. This will result in */
  772. /* transporting also packets learned in another VLAN to be forwarded */
  773. /* to the CPU. */
  774. /* The difficult scenario is when we want to build a bridge on the CPU.*/
  775. /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
  776. /* CPU port in VLAN1. Now we build a bridge on the CPU between */
  777. /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
  778. /* Now assume a packet with ethernet source address 99 enters port 0 */
  779. /* It will be forwarded to the CPU because it is unknown. Then the */
  780. /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
  781. /* When now a packet with ethernet destination address 99 comes in at */
  782. /* port 1 in VLAN1, then the switch has learned that this address is */
  783. /* located at port 0 in VLAN0. Therefore the switch will drop */
  784. /* this packet. In order to avoid this and to send the packet still */
  785. /* to the CPU, the Bridge Test Mode has to be activated. */
  786. /* Check if there is any vlan in promisc mode. */
  787. if (~t & (SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT))
  788. t |= CPUP_CONF_BTM; /* Enable Bridge Testing Mode */
  789. else
  790. t &= ~CPUP_CONF_BTM; /* Disable Bridge Testing Mode */
  791. sw_write_reg(SWITCH_REG_CPUP_CONF, t);
  792. }
  793. static int adm5120_if_set_mac_address(struct net_device *dev, void *p)
  794. {
  795. int ret;
  796. ret = eth_mac_addr(dev, p);
  797. if (ret)
  798. return ret;
  799. adm5120_write_mac(dev);
  800. return 0;
  801. }
  802. static int adm5120_if_do_ioctl(struct net_device *dev, struct ifreq *rq,
  803. int cmd)
  804. {
  805. int err;
  806. struct adm5120_sw_info info;
  807. struct adm5120_if_priv *priv = netdev_priv(dev);
  808. switch (cmd) {
  809. case SIOCGADMINFO:
  810. info.magic = 0x5120;
  811. info.ports = adm5120_nrdevs;
  812. info.vlan = priv->vlan_no;
  813. err = copy_to_user(rq->ifr_data, &info, sizeof(info));
  814. if (err)
  815. return -EFAULT;
  816. break;
  817. case SIOCSMATRIX:
  818. if (!capable(CAP_NET_ADMIN))
  819. return -EPERM;
  820. err = copy_from_user(adm5120_eth_vlans, rq->ifr_data,
  821. sizeof(adm5120_eth_vlans));
  822. if (err)
  823. return -EFAULT;
  824. adm5120_set_vlan(adm5120_eth_vlans);
  825. break;
  826. case SIOCGMATRIX:
  827. err = copy_to_user(rq->ifr_data, adm5120_eth_vlans,
  828. sizeof(adm5120_eth_vlans));
  829. if (err)
  830. return -EFAULT;
  831. break;
  832. default:
  833. return -EOPNOTSUPP;
  834. }
  835. return 0;
  836. }
  837. static const struct net_device_ops adm5120sw_netdev_ops = {
  838. .ndo_open = adm5120_if_open,
  839. .ndo_stop = adm5120_if_stop,
  840. .ndo_start_xmit = adm5120_if_hard_start_xmit,
  841. .ndo_set_rx_mode = adm5120_if_set_rx_mode,
  842. .ndo_do_ioctl = adm5120_if_do_ioctl,
  843. .ndo_tx_timeout = adm5120_if_tx_timeout,
  844. .ndo_validate_addr = eth_validate_addr,
  845. .ndo_change_mtu = eth_change_mtu,
  846. .ndo_set_mac_address = adm5120_if_set_mac_address,
  847. };
  848. static struct net_device *adm5120_if_alloc(void)
  849. {
  850. struct net_device *dev;
  851. struct adm5120_if_priv *priv;
  852. dev = alloc_etherdev(sizeof(*priv));
  853. if (!dev)
  854. return NULL;
  855. priv = netdev_priv(dev);
  856. priv->dev = dev;
  857. dev->irq = ADM5120_IRQ_SWITCH;
  858. dev->netdev_ops = &adm5120sw_netdev_ops;
  859. dev->watchdog_timeo = TX_TIMEOUT;
  860. #ifdef CONFIG_ADM5120_SWITCH_NAPI
  861. netif_napi_add(dev, &priv->napi, adm5120_if_poll, 64);
  862. #endif
  863. return dev;
  864. }
  865. /* ------------------------------------------------------------------------ */
  866. static void adm5120_switch_cleanup(void)
  867. {
  868. int i;
  869. /* disable interrupts */
  870. sw_int_mask(SWITCH_INTS_ALL);
  871. for (i = 0; i < SWITCH_NUM_PORTS; i++) {
  872. struct net_device *dev = adm5120_devs[i];
  873. if (dev) {
  874. unregister_netdev(dev);
  875. free_netdev(dev);
  876. }
  877. }
  878. adm5120_switch_tx_ring_free();
  879. adm5120_switch_rx_ring_free();
  880. }
  881. static int adm5120_switch_probe(struct platform_device *pdev)
  882. {
  883. u32 t;
  884. int i, err;
  885. adm5120_nrdevs = adm5120_eth_num_ports;
  886. t = CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
  887. SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT |
  888. SWITCH_PORTS_NOCPU << CPUP_CONF_DMCP_SHIFT ;
  889. sw_write_reg(SWITCH_REG_CPUP_CONF, t);
  890. t = (SWITCH_PORTS_NOCPU << PORT_CONF0_EMCP_SHIFT) |
  891. (SWITCH_PORTS_NOCPU << PORT_CONF0_BP_SHIFT) |
  892. (SWITCH_PORTS_NOCPU);
  893. sw_write_reg(SWITCH_REG_PORT_CONF0, t);
  894. /* setup ports to Autoneg/100M/Full duplex/Auto MDIX */
  895. t = SWITCH_PORTS_PHY |
  896. (SWITCH_PORTS_PHY << PHY_CNTL2_SC_SHIFT) |
  897. (SWITCH_PORTS_PHY << PHY_CNTL2_DC_SHIFT) |
  898. (SWITCH_PORTS_PHY << PHY_CNTL2_PHYR_SHIFT) |
  899. (SWITCH_PORTS_PHY << PHY_CNTL2_AMDIX_SHIFT) |
  900. PHY_CNTL2_RMAE;
  901. sw_write_reg(SWITCH_REG_PHY_CNTL2, t);
  902. t = sw_read_reg(SWITCH_REG_PHY_CNTL3);
  903. t |= PHY_CNTL3_RNT;
  904. sw_write_reg(SWITCH_REG_PHY_CNTL3, t);
  905. /* Force all the packets from all ports are low priority */
  906. sw_write_reg(SWITCH_REG_PRI_CNTL, 0);
  907. sw_int_mask(SWITCH_INTS_ALL);
  908. sw_int_ack(SWITCH_INTS_ALL);
  909. err = adm5120_switch_rx_ring_alloc();
  910. if (err)
  911. goto err;
  912. err = adm5120_switch_tx_ring_alloc();
  913. if (err)
  914. goto err;
  915. adm5120_switch_tx_ring_reset(txl_descs, txl_skbuff, TX_RING_SIZE);
  916. adm5120_switch_rx_ring_reset(rxl_descs, rxl_skbuff, RX_RING_SIZE);
  917. sw_write_reg(SWITCH_REG_SHDA, 0);
  918. sw_write_reg(SWITCH_REG_SLDA, KSEG1ADDR(txl_descs));
  919. sw_write_reg(SWITCH_REG_RHDA, 0);
  920. sw_write_reg(SWITCH_REG_RLDA, KSEG1ADDR(rxl_descs));
  921. for (i = 0; i < SWITCH_NUM_PORTS; i++) {
  922. struct net_device *dev;
  923. struct adm5120_if_priv *priv;
  924. dev = adm5120_if_alloc();
  925. if (!dev) {
  926. err = -ENOMEM;
  927. goto err;
  928. }
  929. adm5120_devs[i] = dev;
  930. priv = netdev_priv(dev);
  931. priv->vlan_no = i;
  932. priv->port_mask = adm5120_eth_vlans[i];
  933. memcpy(dev->dev_addr, adm5120_eth_macs[i], 6);
  934. adm5120_write_mac(dev);
  935. err = register_netdev(dev);
  936. if (err) {
  937. SW_INFO("%s register failed, error=%d\n",
  938. dev->name, err);
  939. goto err;
  940. }
  941. }
  942. /* setup vlan/port mapping after devs are filled up */
  943. adm5120_set_vlan(adm5120_eth_vlans);
  944. /* enable CPU port */
  945. t = sw_read_reg(SWITCH_REG_CPUP_CONF);
  946. t &= ~CPUP_CONF_DCPUP;
  947. sw_write_reg(SWITCH_REG_CPUP_CONF, t);
  948. return 0;
  949. err:
  950. adm5120_switch_cleanup();
  951. SW_ERR("init failed\n");
  952. return err;
  953. }
  954. static int adm5120_switch_remove(struct platform_device *pdev)
  955. {
  956. adm5120_switch_cleanup();
  957. return 0;
  958. }
  959. static struct platform_driver adm5120_switch_driver = {
  960. .probe = adm5120_switch_probe,
  961. .remove = adm5120_switch_remove,
  962. .driver = {
  963. .name = DRV_NAME,
  964. },
  965. };
  966. /* -------------------------------------------------------------------------- */
  967. static int __init adm5120_switch_mod_init(void)
  968. {
  969. int err;
  970. pr_info(DRV_DESC " version " DRV_VERSION "\n");
  971. err = platform_driver_register(&adm5120_switch_driver);
  972. return err;
  973. }
  974. static void __exit adm5120_switch_mod_exit(void)
  975. {
  976. platform_driver_unregister(&adm5120_switch_driver);
  977. }
  978. module_init(adm5120_switch_mod_init);
  979. module_exit(adm5120_switch_mod_exit);
  980. MODULE_LICENSE("GPL v2");
  981. MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
  982. MODULE_DESCRIPTION(DRV_DESC);
  983. MODULE_VERSION(DRV_VERSION);