160-kmap_coherent.patch 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. --- a/arch/mips/include/asm/cpu-features.h
  2. +++ b/arch/mips/include/asm/cpu-features.h
  3. @@ -151,6 +151,9 @@
  4. #ifndef cpu_has_local_ebase
  5. #define cpu_has_local_ebase 1
  6. #endif
  7. +#ifndef cpu_use_kmap_coherent
  8. +#define cpu_use_kmap_coherent 1
  9. +#endif
  10. /*
  11. * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
  12. --- a/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
  13. +++ b/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
  14. @@ -79,4 +79,6 @@
  15. #define cpu_scache_line_size() 0
  16. #define cpu_has_vz 0
  17. +#define cpu_use_kmap_coherent 0
  18. +
  19. #endif /* __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H */
  20. --- a/arch/mips/mm/c-r4k.c
  21. +++ b/arch/mips/mm/c-r4k.c
  22. @@ -591,7 +591,7 @@ static inline void local_r4k_flush_cache
  23. */
  24. map_coherent = (cpu_has_dc_aliases &&
  25. page_mapped(page) && !Page_dcache_dirty(page));
  26. - if (map_coherent)
  27. + if (map_coherent && cpu_use_kmap_coherent)
  28. vaddr = kmap_coherent(page, addr);
  29. else
  30. vaddr = kmap_atomic(page);
  31. @@ -616,7 +616,7 @@ static inline void local_r4k_flush_cache
  32. }
  33. if (vaddr) {
  34. - if (map_coherent)
  35. + if (map_coherent && cpu_use_kmap_coherent)
  36. kunmap_coherent();
  37. else
  38. kunmap_atomic(vaddr);
  39. --- a/arch/mips/mm/init.c
  40. +++ b/arch/mips/mm/init.c
  41. @@ -155,7 +155,7 @@ void copy_user_highpage(struct page *to,
  42. void *vfrom, *vto;
  43. vto = kmap_atomic(to);
  44. - if (cpu_has_dc_aliases &&
  45. + if (cpu_has_dc_aliases && cpu_use_kmap_coherent &&
  46. page_mapped(from) && !Page_dcache_dirty(from)) {
  47. vfrom = kmap_coherent(from, vaddr);
  48. copy_page(vto, vfrom);
  49. @@ -177,7 +177,7 @@ void copy_to_user_page(struct vm_area_st
  50. struct page *page, unsigned long vaddr, void *dst, const void *src,
  51. unsigned long len)
  52. {
  53. - if (cpu_has_dc_aliases &&
  54. + if (cpu_has_dc_aliases && cpu_use_kmap_coherent &&
  55. page_mapped(page) && !Page_dcache_dirty(page)) {
  56. void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  57. memcpy(vto, src, len);
  58. @@ -195,7 +195,7 @@ void copy_from_user_page(struct vm_area_
  59. struct page *page, unsigned long vaddr, void *dst, const void *src,
  60. unsigned long len)
  61. {
  62. - if (cpu_has_dc_aliases &&
  63. + if (cpu_has_dc_aliases && cpu_use_kmap_coherent &&
  64. page_mapped(page) && !Page_dcache_dirty(page)) {
  65. void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  66. memcpy(dst, vfrom, len);