0092-drm-vc4-bo-cache-locking-fixes.patch 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. From 9d9f602905f1d7de7b0d93d8293ed144f7af0e21 Mon Sep 17 00:00:00 2001
  2. From: Eric Anholt <eric@anholt.net>
  3. Date: Mon, 19 Oct 2015 08:23:18 -0700
  4. Subject: [PATCH 092/381] drm/vc4: bo cache locking fixes.
  5. Signed-off-by: Eric Anholt <eric@anholt.net>
  6. ---
  7. drivers/gpu/drm/vc4/vc4_bo.c | 32 ++++++++++++++++++--------------
  8. drivers/gpu/drm/vc4/vc4_drv.h | 2 +-
  9. 2 files changed, 19 insertions(+), 15 deletions(-)
  10. --- a/drivers/gpu/drm/vc4/vc4_bo.c
  11. +++ b/drivers/gpu/drm/vc4/vc4_bo.c
  12. @@ -112,14 +112,14 @@ void vc4_bo_cache_purge(struct drm_devic
  13. {
  14. struct vc4_dev *vc4 = to_vc4_dev(dev);
  15. - spin_lock(&vc4->bo_lock);
  16. + mutex_lock(&vc4->bo_lock);
  17. while (!list_empty(&vc4->bo_cache.time_list)) {
  18. struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
  19. struct vc4_bo, unref_head);
  20. vc4_bo_remove_from_cache(bo);
  21. vc4_bo_destroy(bo);
  22. }
  23. - spin_unlock(&vc4->bo_lock);
  24. + mutex_unlock(&vc4->bo_lock);
  25. }
  26. struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size)
  27. @@ -134,18 +134,18 @@ struct vc4_bo *vc4_bo_create(struct drm_
  28. return NULL;
  29. /* First, try to get a vc4_bo from the kernel BO cache. */
  30. - spin_lock(&vc4->bo_lock);
  31. + mutex_lock(&vc4->bo_lock);
  32. if (page_index < vc4->bo_cache.size_list_size &&
  33. !list_empty(&vc4->bo_cache.size_list[page_index])) {
  34. struct vc4_bo *bo =
  35. list_first_entry(&vc4->bo_cache.size_list[page_index],
  36. struct vc4_bo, size_head);
  37. vc4_bo_remove_from_cache(bo);
  38. - spin_unlock(&vc4->bo_lock);
  39. + mutex_unlock(&vc4->bo_lock);
  40. kref_init(&bo->base.base.refcount);
  41. return bo;
  42. }
  43. - spin_unlock(&vc4->bo_lock);
  44. + mutex_unlock(&vc4->bo_lock);
  45. /* Otherwise, make a new BO. */
  46. for (pass = 0; ; pass++) {
  47. @@ -215,7 +215,7 @@ vc4_bo_cache_free_old(struct drm_device
  48. struct vc4_dev *vc4 = to_vc4_dev(dev);
  49. unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
  50. - spin_lock(&vc4->bo_lock);
  51. + mutex_lock(&vc4->bo_lock);
  52. while (!list_empty(&vc4->bo_cache.time_list)) {
  53. struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
  54. struct vc4_bo, unref_head);
  55. @@ -223,14 +223,14 @@ vc4_bo_cache_free_old(struct drm_device
  56. mod_timer(&vc4->bo_cache.time_timer,
  57. round_jiffies_up(jiffies +
  58. msecs_to_jiffies(1000)));
  59. - spin_unlock(&vc4->bo_lock);
  60. + mutex_unlock(&vc4->bo_lock);
  61. return;
  62. }
  63. vc4_bo_remove_from_cache(bo);
  64. vc4_bo_destroy(bo);
  65. }
  66. - spin_unlock(&vc4->bo_lock);
  67. + mutex_unlock(&vc4->bo_lock);
  68. }
  69. /* Called on the last userspace/kernel unreference of the BO. Returns
  70. @@ -248,21 +248,25 @@ void vc4_free_object(struct drm_gem_obje
  71. /* If the object references someone else's memory, we can't cache it.
  72. */
  73. if (gem_bo->import_attach) {
  74. + mutex_lock(&vc4->bo_lock);
  75. vc4_bo_destroy(bo);
  76. + mutex_unlock(&vc4->bo_lock);
  77. return;
  78. }
  79. /* Don't cache if it was publicly named. */
  80. if (gem_bo->name) {
  81. + mutex_lock(&vc4->bo_lock);
  82. vc4_bo_destroy(bo);
  83. + mutex_unlock(&vc4->bo_lock);
  84. return;
  85. }
  86. - spin_lock(&vc4->bo_lock);
  87. + mutex_lock(&vc4->bo_lock);
  88. cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
  89. if (!cache_list) {
  90. vc4_bo_destroy(bo);
  91. - spin_unlock(&vc4->bo_lock);
  92. + mutex_unlock(&vc4->bo_lock);
  93. return;
  94. }
  95. @@ -278,7 +282,7 @@ void vc4_free_object(struct drm_gem_obje
  96. vc4->bo_stats.num_cached++;
  97. vc4->bo_stats.size_cached += gem_bo->size;
  98. - spin_unlock(&vc4->bo_lock);
  99. + mutex_unlock(&vc4->bo_lock);
  100. vc4_bo_cache_free_old(dev);
  101. }
  102. @@ -465,7 +469,7 @@ void vc4_bo_cache_init(struct drm_device
  103. {
  104. struct vc4_dev *vc4 = to_vc4_dev(dev);
  105. - spin_lock_init(&vc4->bo_lock);
  106. + mutex_init(&vc4->bo_lock);
  107. INIT_LIST_HEAD(&vc4->bo_cache.time_list);
  108. @@ -498,9 +502,9 @@ int vc4_bo_stats_debugfs(struct seq_file
  109. struct vc4_dev *vc4 = to_vc4_dev(dev);
  110. struct vc4_bo_stats stats;
  111. - spin_lock(&vc4->bo_lock);
  112. + mutex_lock(&vc4->bo_lock);
  113. stats = vc4->bo_stats;
  114. - spin_unlock(&vc4->bo_lock);
  115. + mutex_unlock(&vc4->bo_lock);
  116. seq_printf(m, "num bos allocated: %d\n", stats.num_allocated);
  117. seq_printf(m, "size bos allocated: %dkb\n", stats.size_allocated / 1024);
  118. --- a/drivers/gpu/drm/vc4/vc4_drv.h
  119. +++ b/drivers/gpu/drm/vc4/vc4_drv.h
  120. @@ -49,7 +49,7 @@ struct vc4_dev {
  121. } bo_stats;
  122. /* Protects bo_cache and the BO stats. */
  123. - spinlock_t bo_lock;
  124. + struct mutex bo_lock;
  125. /* Sequence number for the last job queued in job_list.
  126. * Starts at 0 (no jobs emitted).