Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  *  Tiny C Memory and bounds checker
  3.  *
  4.  *  Copyright (c) 2002 Fabrice Bellard
  5.  *
  6.  *  This program is free software; you can redistribute it and/or modify
  7.  *  it under the terms of the GNU General Public License as published by
  8.  *  the Free Software Foundation; either version 2 of the License, or
  9.  *  (at your option) any later version.
  10.  *
  11.  *  This program is distributed in the hope that it will be useful,
  12.  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  13.  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14.  *  GNU General Public License for more details.
  15.  *
  16.  *  You should have received a copy of the GNU General Public License
  17.  *  along with this program; if not, write to the Free Software
  18.  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19.  */
  20. #include <stdlib.h>
  21. #include <stdio.h>
  22. #include <stdarg.h>
  23. #include <string.h>
  24. #ifndef __FreeBSD__
  25. #include <malloc.h>
  26. #endif
  27.  
  28. //#define BOUND_DEBUG
  29.  
  30. /* define so that bound array is static (faster, but use memory if
  31.    bound checking not used) */
  32. //#define BOUND_STATIC
  33.  
  34. /* use malloc hooks. Currently the code cannot be reliable if no hooks */
  35. #define CONFIG_TCC_MALLOC_HOOKS
  36.  
  37. #define HAVE_MEMALIGN
  38.  
  39. #if defined(__FreeBSD__) || defined(__dietlibc__)
  40. #warning Bound checking not fully supported on FreeBSD
  41. #undef CONFIG_TCC_MALLOC_HOOKS
  42. #undef HAVE_MEMALIGN
  43. #endif
  44.  
  45. #define BOUND_T1_BITS 13
  46. #define BOUND_T2_BITS 11
  47. #define BOUND_T3_BITS (32 - BOUND_T1_BITS - BOUND_T2_BITS)
  48.  
  49. #define BOUND_T1_SIZE (1 << BOUND_T1_BITS)
  50. #define BOUND_T2_SIZE (1 << BOUND_T2_BITS)
  51. #define BOUND_T3_SIZE (1 << BOUND_T3_BITS)
  52. #define BOUND_E_BITS  4
  53.  
  54. #define BOUND_T23_BITS (BOUND_T2_BITS + BOUND_T3_BITS)
  55. #define BOUND_T23_SIZE (1 << BOUND_T23_BITS)
  56.  
  57.  
  58. /* this pointer is generated when bound check is incorrect */
  59. #define INVALID_POINTER ((void *)(-2))
  60. /* size of an empty region */
  61. #define EMPTY_SIZE        0xffffffff
  62. /* size of an invalid region */
  63. #define INVALID_SIZE      0
  64.  
  65. typedef struct BoundEntry {
  66.     unsigned long start;
  67.     unsigned long size;
  68.     struct BoundEntry *next;
  69.     unsigned long is_invalid; /* true if pointers outside region are invalid */
  70. } BoundEntry;
  71.  
  72. /* external interface */
  73. void __bound_init(void);
  74. void __bound_new_region(void *p, unsigned long size);
  75. int __bound_delete_region(void *p);
  76.  
  77. #define FASTCALL __attribute__((regparm(3)))
  78.  
  79. void *__bound_malloc(size_t size, const void *caller);
  80. void *__bound_memalign(size_t size, size_t align, const void *caller);
  81. void __bound_free(void *ptr, const void *caller);
  82. void *__bound_realloc(void *ptr, size_t size, const void *caller);
  83. static void *libc_malloc(size_t size);
  84. static void libc_free(void *ptr);
  85. static void install_malloc_hooks(void);
  86. static void restore_malloc_hooks(void);
  87.  
  88. #ifdef CONFIG_TCC_MALLOC_HOOKS
  89. static void *saved_malloc_hook;
  90. static void *saved_free_hook;
  91. static void *saved_realloc_hook;
  92. static void *saved_memalign_hook;
  93. #endif
  94.  
  95. /* linker definitions */
  96. extern char _end;
  97.  
  98. /* TCC definitions */
  99. extern char __bounds_start; /* start of static bounds table */
  100. /* error message, just for TCC */
  101. const char *__bound_error_msg;
  102.  
  103. /* runtime error output */
  104. extern void rt_error(unsigned long pc, const char *fmt, ...);
  105.  
  106. #ifdef BOUND_STATIC
  107. static BoundEntry *__bound_t1[BOUND_T1_SIZE]; /* page table */
  108. #else
  109. static BoundEntry **__bound_t1; /* page table */
  110. #endif
  111. static BoundEntry *__bound_empty_t2;   /* empty page, for unused pages */
  112. static BoundEntry *__bound_invalid_t2; /* invalid page, for invalid pointers */
  113.  
  114. static BoundEntry *__bound_find_region(BoundEntry *e1, void *p)
  115. {
  116.     unsigned long addr, tmp;
  117.     BoundEntry *e;
  118.  
  119.     e = e1;
  120.     while (e != NULL) {
  121.         addr = (unsigned long)p;
  122.         addr -= e->start;
  123.         if (addr <= e->size) {
  124.             /* put region at the head */
  125.             tmp = e1->start;
  126.             e1->start = e->start;
  127.             e->start = tmp;
  128.             tmp = e1->size;
  129.             e1->size = e->size;
  130.             e->size = tmp;
  131.             return e1;
  132.         }
  133.         e = e->next;
  134.     }
  135.     /* no entry found: return empty entry or invalid entry */
  136.     if (e1->is_invalid)
  137.         return __bound_invalid_t2;
  138.     else
  139.         return __bound_empty_t2;
  140. }
  141.  
  142. /* print a bound error message */
  143. static void bound_error(const char *fmt, ...)
  144. {
  145.     __bound_error_msg = fmt;
  146.     *(int *)0 = 0; /* force a runtime error */
  147. }
  148.  
  149. static void bound_alloc_error(void)
  150. {
  151.     bound_error("not enough memory for bound checking code");
  152. }
  153.  
  154. /* currently, tcc cannot compile that because we use GNUC extensions */
  155. #if !defined(__TINYC__)
  156.  
  157. /* return '(p + offset)' for pointer arithmetic (a pointer can reach
  158.    the end of a region in this case */
  159. void * FASTCALL __bound_ptr_add(void *p, int offset)
  160. {
  161.     unsigned long addr = (unsigned long)p;
  162.     BoundEntry *e;
  163. #if defined(BOUND_DEBUG)
  164.     printf("add: 0x%x %d\n", (int)p, offset);
  165. #endif
  166.  
  167.     e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
  168.     e = (BoundEntry *)((char *)e +
  169.                        ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
  170.                         ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
  171.     addr -= e->start;
  172.     if (addr > e->size) {
  173.         e = __bound_find_region(e, p);
  174.         addr = (unsigned long)p - e->start;
  175.     }
  176.     addr += offset;
  177.     if (addr > e->size)
  178.         return INVALID_POINTER; /* return an invalid pointer */
  179.     return p + offset;
  180. }
  181.  
  182. /* return '(p + offset)' for pointer indirection (the resulting must
  183.    be strictly inside the region */
  184. #define BOUND_PTR_INDIR(dsize)                                          \
  185. void * FASTCALL __bound_ptr_indir ## dsize (void *p, int offset)        \
  186. {                                                                       \
  187.     unsigned long addr = (unsigned long)p;                              \
  188.     BoundEntry *e;                                                      \
  189.                                                                         \
  190.     e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];            \
  191.     e = (BoundEntry *)((char *)e +                                      \
  192.                        ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &      \
  193.                         ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));        \
  194.     addr -= e->start;                                                   \
  195.     if (addr > e->size) {                                               \
  196.         e = __bound_find_region(e, p);                                  \
  197.         addr = (unsigned long)p - e->start;                             \
  198.     }                                                                   \
  199.     addr += offset + dsize;                                             \
  200.     if (addr > e->size)                                                 \
  201.         return INVALID_POINTER; /* return an invalid pointer */         \
  202.     return p + offset;                                                  \
  203. }
  204.  
  205. #ifdef __i386__
  206. /* return the frame pointer of the caller */
  207. #define GET_CALLER_FP(fp)\
  208. {\
  209.     unsigned long *fp1;\
  210.     __asm__ __volatile__ ("movl %%ebp,%0" :"=g" (fp1));\
  211.     fp = fp1[0];\
  212. }
  213. #else
  214. #error put code to extract the calling frame pointer
  215. #endif
  216.  
  217. /* called when entering a function to add all the local regions */
  218. void FASTCALL __bound_local_new(void *p1)
  219. {
  220.     unsigned long addr, size, fp, *p = p1;
  221.     GET_CALLER_FP(fp);
  222.     for(;;) {
  223.         addr = p[0];
  224.         if (addr == 0)
  225.             break;
  226.         addr += fp;
  227.         size = p[1];
  228.         p += 2;
  229.         __bound_new_region((void *)addr, size);
  230.     }
  231. }
  232.  
  233. /* called when leaving a function to delete all the local regions */
  234. void FASTCALL __bound_local_delete(void *p1)
  235. {
  236.     unsigned long addr, fp, *p = p1;
  237.     GET_CALLER_FP(fp);
  238.     for(;;) {
  239.         addr = p[0];
  240.         if (addr == 0)
  241.             break;
  242.         addr += fp;
  243.         p += 2;
  244.         __bound_delete_region((void *)addr);
  245.     }
  246. }
  247.  
  248. #else
  249.  
  250. void __bound_local_new(void *p)
  251. {
  252. }
  253. void __bound_local_delete(void *p)
  254. {
  255. }
  256.  
  257. void *__bound_ptr_add(void *p, int offset)
  258. {
  259.     return p + offset;
  260. }
  261.  
  262. #define BOUND_PTR_INDIR(dsize)                               \
  263. void *__bound_ptr_indir ## dsize (void *p, int offset)       \
  264. {                                                            \
  265.     return p + offset;                                       \
  266. }
  267. #endif
  268.  
  269. BOUND_PTR_INDIR(1)
  270. BOUND_PTR_INDIR(2)
  271. BOUND_PTR_INDIR(4)
  272. BOUND_PTR_INDIR(8)
  273. BOUND_PTR_INDIR(12)
  274. BOUND_PTR_INDIR(16)
  275.  
  276. static BoundEntry *__bound_new_page(void)
  277. {
  278.     BoundEntry *page;
  279.     int i;
  280.  
  281.     page = libc_malloc(sizeof(BoundEntry) * BOUND_T2_SIZE);
  282.     if (!page)
  283.         bound_alloc_error();
  284.     for(i=0;i<BOUND_T2_SIZE;i++) {
  285.         /* put empty entries */
  286.         page[i].start = 0;
  287.         page[i].size = EMPTY_SIZE;
  288.         page[i].next = NULL;
  289.         page[i].is_invalid = 0;
  290.     }
  291.     return page;
  292. }
  293.  
  294. /* currently we use malloc(). Should use bound_new_page() */
  295. static BoundEntry *bound_new_entry(void)
  296. {
  297.     BoundEntry *e;
  298.     e = libc_malloc(sizeof(BoundEntry));
  299.     return e;
  300. }
  301.  
  302. static void bound_free_entry(BoundEntry *e)
  303. {
  304.     libc_free(e);
  305. }
  306.  
  307. static inline BoundEntry *get_page(int index)
  308. {
  309.     BoundEntry *page;
  310.     page = __bound_t1[index];
  311.     if (page == __bound_empty_t2 || page == __bound_invalid_t2) {
  312.         /* create a new page if necessary */
  313.         page = __bound_new_page();
  314.         __bound_t1[index] = page;
  315.     }
  316.     return page;
  317. }
  318.  
  319. /* mark a region as being invalid (can only be used during init) */
  320. static void mark_invalid(unsigned long addr, unsigned long size)
  321. {
  322.     unsigned long start, end;
  323.     BoundEntry *page;
  324.     int t1_start, t1_end, i, j, t2_start, t2_end;
  325.  
  326.     start = addr;
  327.     end = addr + size;
  328.  
  329.     t2_start = (start + BOUND_T3_SIZE - 1) >> BOUND_T3_BITS;
  330.     if (end != 0)
  331.         t2_end = end >> BOUND_T3_BITS;
  332.     else
  333.         t2_end = 1 << (BOUND_T1_BITS + BOUND_T2_BITS);
  334.  
  335. #if 0
  336.     printf("mark_invalid: start = %x %x\n", t2_start, t2_end);
  337. #endif
  338.    
  339.     /* first we handle full pages */
  340.     t1_start = (t2_start + BOUND_T2_SIZE - 1) >> BOUND_T2_BITS;
  341.     t1_end = t2_end >> BOUND_T2_BITS;
  342.  
  343.     i = t2_start & (BOUND_T2_SIZE - 1);
  344.     j = t2_end & (BOUND_T2_SIZE - 1);
  345.    
  346.     if (t1_start == t1_end) {
  347.         page = get_page(t2_start >> BOUND_T2_BITS);
  348.         for(; i < j; i++) {
  349.             page[i].size = INVALID_SIZE;
  350.             page[i].is_invalid = 1;
  351.         }
  352.     } else {
  353.         if (i > 0) {
  354.             page = get_page(t2_start >> BOUND_T2_BITS);
  355.             for(; i < BOUND_T2_SIZE; i++) {
  356.                 page[i].size = INVALID_SIZE;
  357.                 page[i].is_invalid = 1;
  358.             }
  359.         }
  360.         for(i = t1_start; i < t1_end; i++) {
  361.             __bound_t1[i] = __bound_invalid_t2;
  362.         }
  363.         if (j != 0) {
  364.             page = get_page(t1_end);
  365.             for(i = 0; i < j; i++) {
  366.                 page[i].size = INVALID_SIZE;
  367.                 page[i].is_invalid = 1;
  368.             }
  369.         }
  370.     }
  371. }
  372.  
  373. void __bound_init(void)
  374. {
  375.     int i;
  376.     BoundEntry *page;
  377.     unsigned long start, size;
  378.     int *p;
  379.  
  380.     /* save malloc hooks and install bound check hooks */
  381.     install_malloc_hooks();
  382.  
  383. #ifndef BOUND_STATIC
  384.     __bound_t1 = libc_malloc(BOUND_T1_SIZE * sizeof(BoundEntry *));
  385.     if (!__bound_t1)
  386.         bound_alloc_error();
  387. #endif
  388.     __bound_empty_t2 = __bound_new_page();
  389.     for(i=0;i<BOUND_T1_SIZE;i++) {
  390.         __bound_t1[i] = __bound_empty_t2;
  391.     }
  392.  
  393.     page = __bound_new_page();
  394.     for(i=0;i<BOUND_T2_SIZE;i++) {
  395.         /* put invalid entries */
  396.         page[i].start = 0;
  397.         page[i].size = INVALID_SIZE;
  398.         page[i].next = NULL;
  399.         page[i].is_invalid = 1;
  400.     }
  401.     __bound_invalid_t2 = page;
  402.  
  403.     /* invalid pointer zone */
  404.     start = (unsigned long)INVALID_POINTER & ~(BOUND_T23_SIZE - 1);
  405.     size = BOUND_T23_SIZE;
  406.     mark_invalid(start, size);
  407.  
  408. #if !defined(__TINYC__) && defined(CONFIG_TCC_MALLOC_HOOKS)
  409.     /* malloc zone is also marked invalid. can only use that with
  410.        hooks because all libs should use the same malloc. The solution
  411.        would be to build a new malloc for tcc. */
  412.     start = (unsigned long)&_end;
  413.     size = 128 * 0x100000;
  414.     mark_invalid(start, size);
  415. #endif
  416.  
  417.     /* add all static bound check values */
  418.     p = (int *)&__bounds_start;
  419.     while (p[0] != 0) {
  420.         __bound_new_region((void *)p[0], p[1]);
  421.         p += 2;
  422.     }
  423. }
  424.  
  425. static inline void add_region(BoundEntry *e,
  426.                               unsigned long start, unsigned long size)
  427. {
  428.     BoundEntry *e1;
  429.     if (e->start == 0) {
  430.         /* no region : add it */
  431.         e->start = start;
  432.         e->size = size;
  433.     } else {
  434.         /* already regions in the list: add it at the head */
  435.         e1 = bound_new_entry();
  436.         e1->start = e->start;
  437.         e1->size = e->size;
  438.         e1->next = e->next;
  439.         e->start = start;
  440.         e->size = size;
  441.         e->next = e1;
  442.     }
  443. }
  444.  
  445. /* create a new region. It should not already exist in the region list */
  446. void __bound_new_region(void *p, unsigned long size)
  447. {
  448.     unsigned long start, end;
  449.     BoundEntry *page, *e, *e2;
  450.     int t1_start, t1_end, i, t2_start, t2_end;
  451.  
  452.     start = (unsigned long)p;
  453.     end = start + size;
  454.     t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
  455.     t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
  456.  
  457.     /* start */
  458.     page = get_page(t1_start);
  459.     t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
  460.         ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
  461.     t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
  462.         ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
  463. #ifdef BOUND_DEBUG
  464.     printf("new %lx %lx %x %x %x %x\n",
  465.            start, end, t1_start, t1_end, t2_start, t2_end);
  466. #endif
  467.  
  468.     e = (BoundEntry *)((char *)page + t2_start);
  469.     add_region(e, start, size);
  470.  
  471.     if (t1_end == t1_start) {
  472.         /* same ending page */
  473.         e2 = (BoundEntry *)((char *)page + t2_end);
  474.         if (e2 > e) {
  475.             e++;
  476.             for(;e<e2;e++) {
  477.                 e->start = start;
  478.                 e->size = size;
  479.             }
  480.             add_region(e, start, size);
  481.         }
  482.     } else {
  483.         /* mark until end of page */
  484.         e2 = page + BOUND_T2_SIZE;
  485.         e++;
  486.         for(;e<e2;e++) {
  487.             e->start = start;
  488.             e->size = size;
  489.         }
  490.         /* mark intermediate pages, if any */
  491.         for(i=t1_start+1;i<t1_end;i++) {
  492.             page = get_page(i);
  493.             e2 = page + BOUND_T2_SIZE;
  494.             for(e=page;e<e2;e++) {
  495.                 e->start = start;
  496.                 e->size = size;
  497.             }
  498.         }
  499.         /* last page */
  500.         page = get_page(t1_end);
  501.         e2 = (BoundEntry *)((char *)page + t2_end);
  502.         for(e=page;e<e2;e++) {
  503.             e->start = start;
  504.             e->size = size;
  505.         }
  506.         add_region(e, start, size);
  507.     }
  508. }
  509.  
  510. /* delete a region */
  511. static inline void delete_region(BoundEntry *e,
  512.                                  void *p, unsigned long empty_size)
  513. {
  514.     unsigned long addr;
  515.     BoundEntry *e1;
  516.  
  517.     addr = (unsigned long)p;
  518.     addr -= e->start;
  519.     if (addr <= e->size) {
  520.         /* region found is first one */
  521.         e1 = e->next;
  522.         if (e1 == NULL) {
  523.             /* no more region: mark it empty */
  524.             e->start = 0;
  525.             e->size = empty_size;
  526.         } else {
  527.             /* copy next region in head */
  528.             e->start = e1->start;
  529.             e->size = e1->size;
  530.             e->next = e1->next;
  531.             bound_free_entry(e1);
  532.         }
  533.     } else {
  534.         /* find the matching region */
  535.         for(;;) {
  536.             e1 = e;
  537.             e = e->next;
  538.             /* region not found: do nothing */
  539.             if (e == NULL)
  540.                 break;
  541.             addr = (unsigned long)p - e->start;
  542.             if (addr <= e->size) {
  543.                 /* found: remove entry */
  544.                 e1->next = e->next;
  545.                 bound_free_entry(e);
  546.                 break;
  547.             }
  548.         }
  549.     }
  550. }
  551.  
  552. /* WARNING: 'p' must be the starting point of the region. */
  553. /* return non zero if error */
  554. int __bound_delete_region(void *p)
  555. {
  556.     unsigned long start, end, addr, size, empty_size;
  557.     BoundEntry *page, *e, *e2;
  558.     int t1_start, t1_end, t2_start, t2_end, i;
  559.  
  560.     start = (unsigned long)p;
  561.     t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
  562.     t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
  563.         ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
  564.    
  565.     /* find region size */
  566.     page = __bound_t1[t1_start];
  567.     e = (BoundEntry *)((char *)page + t2_start);
  568.     addr = start - e->start;
  569.     if (addr > e->size)
  570.         e = __bound_find_region(e, p);
  571.     /* test if invalid region */
  572.     if (e->size == EMPTY_SIZE || (unsigned long)p != e->start)
  573.         return -1;
  574.     /* compute the size we put in invalid regions */
  575.     if (e->is_invalid)
  576.         empty_size = INVALID_SIZE;
  577.     else
  578.         empty_size = EMPTY_SIZE;
  579.     size = e->size;
  580.     end = start + size;
  581.  
  582.     /* now we can free each entry */
  583.     t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
  584.     t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
  585.         ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
  586.  
  587.     delete_region(e, p, empty_size);
  588.     if (t1_end == t1_start) {
  589.         /* same ending page */
  590.         e2 = (BoundEntry *)((char *)page + t2_end);
  591.         if (e2 > e) {
  592.             e++;
  593.             for(;e<e2;e++) {
  594.                 e->start = 0;
  595.                 e->size = empty_size;
  596.             }
  597.             delete_region(e, p, empty_size);
  598.         }
  599.     } else {
  600.         /* mark until end of page */
  601.         e2 = page + BOUND_T2_SIZE;
  602.         e++;
  603.         for(;e<e2;e++) {
  604.             e->start = 0;
  605.             e->size = empty_size;
  606.         }
  607.         /* mark intermediate pages, if any */
  608.         /* XXX: should free them */
  609.         for(i=t1_start+1;i<t1_end;i++) {
  610.             page = get_page(i);
  611.             e2 = page + BOUND_T2_SIZE;
  612.             for(e=page;e<e2;e++) {
  613.                 e->start = 0;
  614.                 e->size = empty_size;
  615.             }
  616.         }
  617.         /* last page */
  618.         page = get_page(t2_end);
  619.         e2 = (BoundEntry *)((char *)page + t2_end);
  620.         for(e=page;e<e2;e++) {
  621.             e->start = 0;
  622.             e->size = empty_size;
  623.         }
  624.         delete_region(e, p, empty_size);
  625.     }
  626.     return 0;
  627. }
  628.  
  629. /* return the size of the region starting at p, or EMPTY_SIZE if non
  630.    existant region. */
  631. static unsigned long get_region_size(void *p)
  632. {
  633.     unsigned long addr = (unsigned long)p;
  634.     BoundEntry *e;
  635.  
  636.     e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
  637.     e = (BoundEntry *)((char *)e +
  638.                        ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
  639.                         ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
  640.     addr -= e->start;
  641.     if (addr > e->size)
  642.         e = __bound_find_region(e, p);
  643.     if (e->start != (unsigned long)p)
  644.         return EMPTY_SIZE;
  645.     return e->size;
  646. }
  647.  
  648. /* patched memory functions */
  649.  
  650. static void install_malloc_hooks(void)
  651. {
  652. #ifdef CONFIG_TCC_MALLOC_HOOKS
  653.     saved_malloc_hook = __malloc_hook;
  654.     saved_free_hook = __free_hook;
  655.     saved_realloc_hook = __realloc_hook;
  656.     saved_memalign_hook = __memalign_hook;
  657.     __malloc_hook = __bound_malloc;
  658.     __free_hook = __bound_free;
  659.     __realloc_hook = __bound_realloc;
  660.     __memalign_hook = __bound_memalign;
  661. #endif
  662. }
  663.  
  664. static void restore_malloc_hooks(void)
  665. {
  666. #ifdef CONFIG_TCC_MALLOC_HOOKS
  667.     __malloc_hook = saved_malloc_hook;
  668.     __free_hook = saved_free_hook;
  669.     __realloc_hook = saved_realloc_hook;
  670.     __memalign_hook = saved_memalign_hook;
  671. #endif
  672. }
  673.  
  674. static void *libc_malloc(size_t size)
  675. {
  676.     void *ptr;
  677.     restore_malloc_hooks();
  678.     ptr = malloc(size);
  679.     install_malloc_hooks();
  680.     return ptr;
  681. }
  682.  
  683. static void libc_free(void *ptr)
  684. {
  685.     restore_malloc_hooks();
  686.     free(ptr);
  687.     install_malloc_hooks();
  688. }
  689.  
  690. /* XXX: we should use a malloc which ensure that it is unlikely that
  691.    two malloc'ed data have the same address if 'free' are made in
  692.    between. */
  693. void *__bound_malloc(size_t size, const void *caller)
  694. {
  695.     void *ptr;
  696.    
  697.     /* we allocate one more byte to ensure the regions will be
  698.        separated by at least one byte. With the glibc malloc, it may
  699.        be in fact not necessary */
  700.     ptr = libc_malloc(size + 1);
  701.    
  702.     if (!ptr)
  703.         return NULL;
  704.     __bound_new_region(ptr, size);
  705.     return ptr;
  706. }
  707.  
  708. void *__bound_memalign(size_t size, size_t align, const void *caller)
  709. {
  710.     void *ptr;
  711.  
  712.     restore_malloc_hooks();
  713.  
  714. #ifndef HAVE_MEMALIGN
  715.     if (align > 4) {
  716.         /* XXX: handle it ? */
  717.         ptr = NULL;
  718.     } else {
  719.         /* we suppose that malloc aligns to at least four bytes */
  720.         ptr = malloc(size + 1);
  721.     }
  722. #else
  723.     /* we allocate one more byte to ensure the regions will be
  724.        separated by at least one byte. With the glibc malloc, it may
  725.        be in fact not necessary */
  726.     ptr = memalign(size + 1, align);
  727. #endif
  728.    
  729.     install_malloc_hooks();
  730.    
  731.     if (!ptr)
  732.         return NULL;
  733.     __bound_new_region(ptr, size);
  734.     return ptr;
  735. }
  736.  
  737. void __bound_free(void *ptr, const void *caller)
  738. {
  739.     if (ptr == NULL)
  740.         return;
  741.     if (__bound_delete_region(ptr) != 0)
  742.         bound_error("freeing invalid region");
  743.  
  744.     libc_free(ptr);
  745. }
  746.  
  747. void *__bound_realloc(void *ptr, size_t size, const void *caller)
  748. {
  749.     void *ptr1;
  750.     int old_size;
  751.  
  752.     if (size == 0) {
  753.         __bound_free(ptr, caller);
  754.         return NULL;
  755.     } else {
  756.         ptr1 = __bound_malloc(size, caller);
  757.         if (ptr == NULL || ptr1 == NULL)
  758.             return ptr1;
  759.         old_size = get_region_size(ptr);
  760.         if (old_size == EMPTY_SIZE)
  761.             bound_error("realloc'ing invalid pointer");
  762.         memcpy(ptr1, ptr, old_size);
  763.         __bound_free(ptr, caller);
  764.         return ptr1;
  765.     }
  766. }
  767.  
  768. #ifndef CONFIG_TCC_MALLOC_HOOKS
  769. void *__bound_calloc(size_t nmemb, size_t size)
  770. {
  771.     void *ptr;
  772.     size = size * nmemb;
  773.     ptr = __bound_malloc(size, NULL);
  774.     if (!ptr)
  775.         return NULL;
  776.     memset(ptr, 0, size);
  777.     return ptr;
  778. }
  779. #endif
  780.  
  781. #if 0
  782. static void bound_dump(void)
  783. {
  784.     BoundEntry *page, *e;
  785.     int i, j;
  786.  
  787.     printf("region dump:\n");
  788.     for(i=0;i<BOUND_T1_SIZE;i++) {
  789.         page = __bound_t1[i];
  790.         for(j=0;j<BOUND_T2_SIZE;j++) {
  791.             e = page + j;
  792.             /* do not print invalid or empty entries */
  793.             if (e->size != EMPTY_SIZE && e->start != 0) {
  794.                 printf("%08x:",
  795.                        (i << (BOUND_T2_BITS + BOUND_T3_BITS)) +
  796.                        (j << BOUND_T3_BITS));
  797.                 do {
  798.                     printf(" %08lx:%08lx", e->start, e->start + e->size);
  799.                     e = e->next;
  800.                 } while (e != NULL);
  801.                 printf("\n");
  802.             }
  803.         }
  804.     }
  805. }
  806. #endif
  807.  
  808. /* some useful checked functions */
  809.  
  810. /* check that (p ... p + size - 1) lies inside 'p' region, if any */
  811. static void __bound_check(const void *p, size_t size)
  812. {
  813.     if (size == 0)
  814.         return;
  815.     p = __bound_ptr_add((void *)p, size);
  816.     if (p == INVALID_POINTER)
  817.         bound_error("invalid pointer");
  818. }
  819.  
  820. void *__bound_memcpy(void *dst, const void *src, size_t size)
  821. {
  822.     __bound_check(dst, size);
  823.     __bound_check(src, size);
  824.     /* check also region overlap */
  825.     if (src >= dst && src < dst + size)
  826.         bound_error("overlapping regions in memcpy()");
  827.     return memcpy(dst, src, size);
  828. }
  829.  
  830. void *__bound_memmove(void *dst, const void *src, size_t size)
  831. {
  832.     __bound_check(dst, size);
  833.     __bound_check(src, size);
  834.     return memmove(dst, src, size);
  835. }
  836.  
  837. void *__bound_memset(void *dst, int c, size_t size)
  838. {
  839.     __bound_check(dst, size);
  840.     return memset(dst, c, size);
  841. }
  842.  
  843. /* XXX: could be optimized */
  844. int __bound_strlen(const char *s)
  845. {
  846.     const char *p;
  847.     int len;
  848.  
  849.     len = 0;
  850.     for(;;) {
  851.         p = __bound_ptr_indir1((char *)s, len);
  852.         if (p == INVALID_POINTER)
  853.             bound_error("bad pointer in strlen()");
  854.         if (*p == '\0')
  855.             break;
  856.         len++;
  857.     }
  858.     return len;
  859. }
  860.  
  861. char *__bound_strcpy(char *dst, const char *src)
  862. {
  863.     int len;
  864.     len = __bound_strlen(src);
  865.     return __bound_memcpy(dst, src, len + 1);
  866. }
  867.  
  868.