092-02-spi-Pump-transfers-inside-calling-context-for-spi_sy.patch 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. From: Mark Brown <broonie@kernel.org>
  2. Date: Tue, 9 Dec 2014 21:38:05 +0000
  3. Subject: [PATCH] spi: Pump transfers inside calling context for spi_sync()
  4. If we are using the standard SPI message pump (which all drivers should be
  5. transitioning over to) then special case the message enqueue and instead of
  6. starting the worker thread to push messages to the hardware do so in the
  7. context of the caller if the controller is idle. This avoids a context
  8. switch in the common case where the controller has a single user in a
  9. single thread, for short PIO transfers there may be no need to context
  10. switch away from the calling context to complete the transfer.
  11. The code is a bit more complex than is desirable in part due to the need
  12. to handle drivers not using the standard queue and in part due to handling
  13. the various combinations of bus locking and asynchronous submission in
  14. interrupt context.
  15. It is still suboptimal since it will still wake the message pump for each
  16. transfer in order to schedule idling of the hardware and if multiple
  17. contexts are using the controller simultaneously a caller may end up
  18. pumping a message for some random other thread rather than for itself,
  19. and if the thread ends up deferring due to another context idling the
  20. hardware then it will just busy wait. It can, however, have the benefit
  21. of aggregating power up and down of the hardware when a caller performs
  22. a series of transfers back to back without any need for the use of
  23. spi_async().
  24. Signed-off-by: Mark Brown <broonie@kernel.org>
  25. ---
  26. --- a/drivers/spi/spi.c
  27. +++ b/drivers/spi/spi.c
  28. @@ -882,6 +882,9 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_t
  29. * needs processing and if so call out to the driver to initialize hardware
  30. * and transfer each message.
  31. *
  32. + * Note that it is called both from the kthread itself and also from
  33. + * inside spi_sync(); the queue extraction handling at the top of the
  34. + * function should deal with this safely.
  35. */
  36. static void spi_pump_messages(struct kthread_work *work)
  37. {
  38. @@ -900,6 +903,13 @@ static void spi_pump_messages(struct kth
  39. return;
  40. }
  41. + /* If another context is idling the device then defer */
  42. + if (master->idling) {
  43. + queue_kthread_work(&master->kworker, &master->pump_messages);
  44. + spin_unlock_irqrestore(&master->queue_lock, flags);
  45. + return;
  46. + }
  47. +
  48. /* Check if the queue is idle */
  49. if (list_empty(&master->queue) || !master->running) {
  50. if (!master->busy) {
  51. @@ -907,7 +917,9 @@ static void spi_pump_messages(struct kth
  52. return;
  53. }
  54. master->busy = false;
  55. + master->idling = true;
  56. spin_unlock_irqrestore(&master->queue_lock, flags);
  57. +
  58. kfree(master->dummy_rx);
  59. master->dummy_rx = NULL;
  60. kfree(master->dummy_tx);
  61. @@ -921,6 +933,10 @@ static void spi_pump_messages(struct kth
  62. pm_runtime_put_autosuspend(master->dev.parent);
  63. }
  64. trace_spi_master_idle(master);
  65. +
  66. + spin_lock_irqsave(&master->queue_lock, flags);
  67. + master->idling = false;
  68. + spin_unlock_irqrestore(&master->queue_lock, flags);
  69. return;
  70. }
  71. @@ -1166,12 +1182,9 @@ static int spi_destroy_queue(struct spi_
  72. return 0;
  73. }
  74. -/**
  75. - * spi_queued_transfer - transfer function for queued transfers
  76. - * @spi: spi device which is requesting transfer
  77. - * @msg: spi message which is to handled is queued to driver queue
  78. - */
  79. -static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
  80. +static int __spi_queued_transfer(struct spi_device *spi,
  81. + struct spi_message *msg,
  82. + bool need_pump)
  83. {
  84. struct spi_master *master = spi->master;
  85. unsigned long flags;
  86. @@ -1186,13 +1199,23 @@ static int spi_queued_transfer(struct sp
  87. msg->status = -EINPROGRESS;
  88. list_add_tail(&msg->queue, &master->queue);
  89. - if (!master->busy)
  90. + if (!master->busy && need_pump)
  91. queue_kthread_work(&master->kworker, &master->pump_messages);
  92. spin_unlock_irqrestore(&master->queue_lock, flags);
  93. return 0;
  94. }
  95. +/**
  96. + * spi_queued_transfer - transfer function for queued transfers
  97. + * @spi: spi device which is requesting transfer
  98. + * @msg: spi message which is to handled is queued to driver queue
  99. + */
  100. +static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
  101. +{
  102. + return __spi_queued_transfer(spi, msg, true);
  103. +}
  104. +
  105. static int spi_master_initialize_queue(struct spi_master *master)
  106. {
  107. int ret;
  108. @@ -2104,19 +2127,46 @@ static int __spi_sync(struct spi_device
  109. DECLARE_COMPLETION_ONSTACK(done);
  110. int status;
  111. struct spi_master *master = spi->master;
  112. + unsigned long flags;
  113. +
  114. + status = __spi_validate(spi, message);
  115. + if (status != 0)
  116. + return status;
  117. message->complete = spi_complete;
  118. message->context = &done;
  119. + message->spi = spi;
  120. if (!bus_locked)
  121. mutex_lock(&master->bus_lock_mutex);
  122. - status = spi_async_locked(spi, message);
  123. + /* If we're not using the legacy transfer method then we will
  124. + * try to transfer in the calling context so special case.
  125. + * This code would be less tricky if we could remove the
  126. + * support for driver implemented message queues.
  127. + */
  128. + if (master->transfer == spi_queued_transfer) {
  129. + spin_lock_irqsave(&master->bus_lock_spinlock, flags);
  130. +
  131. + trace_spi_message_submit(message);
  132. +
  133. + status = __spi_queued_transfer(spi, message, false);
  134. +
  135. + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
  136. + } else {
  137. + status = spi_async_locked(spi, message);
  138. + }
  139. if (!bus_locked)
  140. mutex_unlock(&master->bus_lock_mutex);
  141. if (status == 0) {
  142. + /* Push out the messages in the calling context if we
  143. + * can.
  144. + */
  145. + if (master->transfer == spi_queued_transfer)
  146. + spi_pump_messages(&master->pump_messages);
  147. +
  148. wait_for_completion(&done);
  149. status = message->status;
  150. }
  151. --- a/include/linux/spi/spi.h
  152. +++ b/include/linux/spi/spi.h
  153. @@ -260,6 +260,7 @@ static inline void spi_unregister_driver
  154. * @pump_messages: work struct for scheduling work to the message pump
  155. * @queue_lock: spinlock to syncronise access to message queue
  156. * @queue: message queue
  157. + * @idling: the device is entering idle state
  158. * @cur_msg: the currently in-flight message
  159. * @cur_msg_prepared: spi_prepare_message was called for the currently
  160. * in-flight message
  161. @@ -425,6 +426,7 @@ struct spi_master {
  162. spinlock_t queue_lock;
  163. struct list_head queue;
  164. struct spi_message *cur_msg;
  165. + bool idling;
  166. bool busy;
  167. bool running;
  168. bool rt;