irq-rps.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. #include <linux/irqdomain.h>
  2. #include <linux/irq.h>
  3. #include <linux/of.h>
  4. #include <linux/of_address.h>
  5. #include <linux/of_irq.h>
  6. #include <linux/irqchip/chained_irq.h>
  7. #include <linux/err.h>
  8. #include <linux/io.h>
  9. #include <linux/version.h>
  10. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
  11. # include "irqchip.h"
  12. #else
  13. # include <linux/irqchip.h>
  14. #endif
  15. struct rps_chip_data {
  16. void __iomem *base;
  17. struct irq_chip chip;
  18. struct irq_domain *domain;
  19. } rps_data;
  20. enum {
  21. RPS_IRQ_BASE = 64,
  22. RPS_IRQ_COUNT = 32,
  23. PRS_HWIRQ_BASE = 0,
  24. RPS_STATUS = 0,
  25. RPS_RAW_STATUS = 4,
  26. RPS_UNMASK = 8,
  27. RPS_MASK = 0xc,
  28. };
  29. /*
  30. * Routines to acknowledge, disable and enable interrupts
  31. */
  32. static void rps_mask_irq(struct irq_data *d)
  33. {
  34. struct rps_chip_data *chip_data = irq_data_get_irq_chip_data(d);
  35. u32 mask = BIT(d->hwirq);
  36. iowrite32(mask, chip_data->base + RPS_MASK);
  37. }
  38. static void rps_unmask_irq(struct irq_data *d)
  39. {
  40. struct rps_chip_data *chip_data = irq_data_get_irq_chip_data(d);
  41. u32 mask = BIT(d->hwirq);
  42. iowrite32(mask, chip_data->base + RPS_UNMASK);
  43. }
  44. static struct irq_chip rps_chip = {
  45. .name = "RPS",
  46. .irq_mask = rps_mask_irq,
  47. .irq_unmask = rps_unmask_irq,
  48. };
  49. static int rps_irq_domain_xlate(struct irq_domain *d,
  50. struct device_node *controller,
  51. const u32 *intspec, unsigned int intsize,
  52. unsigned long *out_hwirq,
  53. unsigned int *out_type)
  54. {
  55. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
  56. if (d->of_node != controller)
  57. #else
  58. if (irq_domain_get_of_node(d) != controller)
  59. #endif
  60. return -EINVAL;
  61. if (intsize < 1)
  62. return -EINVAL;
  63. *out_hwirq = intspec[0];
  64. /* Honestly I do not know the type */
  65. *out_type = IRQ_TYPE_LEVEL_HIGH;
  66. return 0;
  67. }
  68. static int rps_irq_domain_map(struct irq_domain *d, unsigned int irq,
  69. irq_hw_number_t hw)
  70. {
  71. irq_set_chip_and_handler(irq, &rps_chip, handle_level_irq);
  72. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
  73. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  74. #else
  75. irq_set_probe(irq);
  76. #endif
  77. irq_set_chip_data(irq, d->host_data);
  78. return 0;
  79. }
  80. const struct irq_domain_ops rps_irq_domain_ops = {
  81. .map = rps_irq_domain_map,
  82. .xlate = rps_irq_domain_xlate,
  83. };
  84. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
  85. static void rps_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
  86. #else
  87. static void rps_handle_cascade_irq(struct irq_desc *desc)
  88. #endif
  89. {
  90. struct rps_chip_data *chip_data = irq_desc_get_handler_data(desc);
  91. struct irq_chip *chip = irq_desc_get_chip(desc);
  92. unsigned int cascade_irq, rps_irq;
  93. u32 status;
  94. chained_irq_enter(chip, desc);
  95. status = ioread32(chip_data->base + RPS_STATUS);
  96. rps_irq = __ffs(status);
  97. cascade_irq = irq_find_mapping(chip_data->domain, rps_irq);
  98. if (unlikely(rps_irq >= RPS_IRQ_COUNT))
  99. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
  100. handle_bad_irq(cascade_irq, desc);
  101. #else
  102. handle_bad_irq(desc);
  103. #endif
  104. else
  105. generic_handle_irq(cascade_irq);
  106. chained_irq_exit(chip, desc);
  107. }
  108. #ifdef CONFIG_OF
  109. int __init rps_of_init(struct device_node *node, struct device_node *parent)
  110. {
  111. void __iomem *rps_base;
  112. int irq_start = RPS_IRQ_BASE;
  113. int irq_base;
  114. int irq;
  115. if (WARN_ON(!node))
  116. return -ENODEV;
  117. rps_base = of_iomap(node, 0);
  118. WARN(!rps_base, "unable to map rps registers\n");
  119. rps_data.base = rps_base;
  120. irq_base = irq_alloc_descs(irq_start, 0, RPS_IRQ_COUNT, numa_node_id());
  121. if (IS_ERR_VALUE(irq_base)) {
  122. WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
  123. irq_start);
  124. irq_base = irq_start;
  125. }
  126. rps_data.domain = irq_domain_add_legacy(node, RPS_IRQ_COUNT, irq_base,
  127. PRS_HWIRQ_BASE, &rps_irq_domain_ops, &rps_data);
  128. if (WARN_ON(!rps_data.domain))
  129. return -ENOMEM;
  130. if (parent) {
  131. irq = irq_of_parse_and_map(node, 0);
  132. if (irq_set_handler_data(irq, &rps_data) != 0)
  133. BUG();
  134. irq_set_chained_handler(irq, rps_handle_cascade_irq);
  135. }
  136. return 0;
  137. }
  138. IRQCHIP_DECLARE(nas782x, "plxtech,nas782x-rps", rps_of_init);
  139. #endif