002-ixp4xx_eth-use-parent-device-for-dma-allocations.patch 3.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. From 1d67040af0144c549f4db8144d2ccc253ff8639c Mon Sep 17 00:00:00 2001
  2. From: Jonas Gorski <jogo@openwrt.org>
  3. Date: Mon, 1 Jul 2013 16:39:28 +0200
  4. Subject: [PATCH 2/2] net: ixp4xx_eth: use parent device for dma allocations
  5. Now that the platfomr device provides a dma_cohorent_mask, use it for
  6. dma operations.
  7. This fixes ethernet on ixp4xx which was broken since 3.7.
  8. Signed-off-by: Jonas Gorski <jogo@openwrt.org>
  9. ---
  10. drivers/net/ethernet/xscale/ixp4xx_eth.c | 23 ++++++++++++-----------
  11. 1 file changed, 12 insertions(+), 11 deletions(-)
  12. --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
  13. +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
  14. @@ -657,10 +657,10 @@ static inline void queue_put_desc(unsign
  15. static inline void dma_unmap_tx(struct port *port, struct desc *desc)
  16. {
  17. #ifdef __ARMEB__
  18. - dma_unmap_single(&port->netdev->dev, desc->data,
  19. + dma_unmap_single(port->netdev->dev.parent, desc->data,
  20. desc->buf_len, DMA_TO_DEVICE);
  21. #else
  22. - dma_unmap_single(&port->netdev->dev, desc->data & ~3,
  23. + dma_unmap_single(port->netdev->dev.parent, desc->data & ~3,
  24. ALIGN((desc->data & 3) + desc->buf_len, 4),
  25. DMA_TO_DEVICE);
  26. #endif
  27. @@ -727,9 +727,9 @@ static int eth_poll(struct napi_struct *
  28. #ifdef __ARMEB__
  29. if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
  30. - phys = dma_map_single(&dev->dev, skb->data,
  31. + phys = dma_map_single(dev->dev.parent, skb->data,
  32. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  33. - if (dma_mapping_error(&dev->dev, phys)) {
  34. + if (dma_mapping_error(dev->dev.parent, phys)) {
  35. dev_kfree_skb(skb);
  36. skb = NULL;
  37. }
  38. @@ -752,10 +752,11 @@ static int eth_poll(struct napi_struct *
  39. #ifdef __ARMEB__
  40. temp = skb;
  41. skb = port->rx_buff_tab[n];
  42. - dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
  43. + dma_unmap_single(dev->dev.parent, desc->data - NET_IP_ALIGN,
  44. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  45. #else
  46. - dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
  47. + dma_sync_single_for_cpu(dev->dev.parent,
  48. + desc->data - NET_IP_ALIGN,
  49. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  50. memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
  51. ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
  52. @@ -874,7 +875,7 @@ static int eth_xmit(struct sk_buff *skb,
  53. memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
  54. #endif
  55. - phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
  56. + phys = dma_map_single(dev->dev.parent, mem, bytes, DMA_TO_DEVICE);
  57. if (dma_mapping_error(&dev->dev, phys)) {
  58. dev_kfree_skb(skb);
  59. #ifndef __ARMEB__
  60. @@ -1124,7 +1125,7 @@ static int init_queues(struct port *port
  61. int i;
  62. if (!ports_open) {
  63. - dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
  64. + dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
  65. POOL_ALLOC_SIZE, 32, 0);
  66. if (!dma_pool)
  67. return -ENOMEM;
  68. @@ -1152,9 +1153,9 @@ static int init_queues(struct port *port
  69. data = buff;
  70. #endif
  71. desc->buf_len = MAX_MRU;
  72. - desc->data = dma_map_single(&port->netdev->dev, data,
  73. + desc->data = dma_map_single(port->netdev->dev.parent, data,
  74. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  75. - if (dma_mapping_error(&port->netdev->dev, desc->data)) {
  76. + if (dma_mapping_error(port->netdev->dev.parent, desc->data)) {
  77. free_buffer(buff);
  78. return -EIO;
  79. }
  80. @@ -1174,7 +1175,7 @@ static void destroy_queues(struct port *
  81. struct desc *desc = rx_desc_ptr(port, i);
  82. buffer_t *buff = port->rx_buff_tab[i];
  83. if (buff) {
  84. - dma_unmap_single(&port->netdev->dev,
  85. + dma_unmap_single(port->netdev->dev.parent,
  86. desc->data - NET_IP_ALIGN,
  87. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  88. free_buffer(buff);