secbuf.c 2.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /*
  2. * Part of Very Secure FTPd
  3. * Licence: GPL v2
  4. * Author: Chris Evans
  5. * secbuf.c
  6. *
  7. * Here are some routines providing the (possibly silly) concept of a secure
  8. * buffer. A secure buffer may not be overflowed. A single byte overflow
  9. * will cause the program to safely terminate.
  10. */
  11. #include "secbuf.h"
  12. #include "utility.h"
  13. #include "sysutil.h"
  14. #include "sysdeputil.h"
  15. void
  16. vsf_secbuf_alloc(char** p_ptr, unsigned int size)
  17. {
  18. unsigned int page_offset;
  19. unsigned int round_up;
  20. char* p_mmap;
  21. char* p_no_access_page;
  22. unsigned int page_size = vsf_sysutil_getpagesize();
  23. /* Free any previous buffer */
  24. vsf_secbuf_free(p_ptr);
  25. /* Round up to next page size */
  26. page_offset = size % page_size;
  27. if (page_offset)
  28. {
  29. unsigned int num_pages = size / page_size;
  30. num_pages++;
  31. round_up = num_pages * page_size;
  32. }
  33. else
  34. {
  35. /* Allocation is on a page-size boundary */
  36. round_up = size;
  37. }
  38. /* Add on another two pages to make inaccessible */
  39. round_up += page_size * 2;
  40. p_mmap = vsf_sysutil_map_anon_pages(round_up);
  41. /* Map the first and last page inaccessible */
  42. p_no_access_page = p_mmap + round_up - page_size;
  43. vsf_sysutil_memprotect(p_no_access_page, page_size, kVSFSysUtilMapProtNone);
  44. /* Before we make the "before" page inaccessible, store the size in it.
  45. * A little hack so that we don't need to explicitly be passed the size
  46. * when freeing an existing secure buffer
  47. */
  48. *((unsigned int*)p_mmap) = round_up;
  49. p_no_access_page = p_mmap;
  50. vsf_sysutil_memprotect(p_no_access_page, page_size, kVSFSysUtilMapProtNone);
  51. p_mmap += page_size;
  52. if (page_offset)
  53. {
  54. p_mmap += (page_size - page_offset);
  55. }
  56. *p_ptr = p_mmap;
  57. }
  58. void
  59. vsf_secbuf_free(char** p_ptr)
  60. {
  61. unsigned int map_size;
  62. unsigned long page_offset;
  63. char* p_mmap = *p_ptr;
  64. unsigned int page_size = vsf_sysutil_getpagesize();
  65. if (p_mmap == 0)
  66. {
  67. return;
  68. }
  69. /* Calculate the actual start of the mmap region */
  70. page_offset = (unsigned long) p_mmap % page_size;
  71. if (page_offset)
  72. {
  73. p_mmap -= page_offset;
  74. }
  75. p_mmap -= page_size;
  76. /* First make the first page readable so we can get the size */
  77. vsf_sysutil_memprotect(p_mmap, page_size, kVSFSysUtilMapProtReadOnly);
  78. /* Extract the mapping size */
  79. map_size = *((unsigned int*)p_mmap);
  80. /* Lose the mapping */
  81. vsf_sysutil_memunmap(p_mmap, map_size);
  82. }