《操作系统》的实验代码。
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

310 rivejä
6.8 KiB

10 vuotta sitten
  1. #include <defs.h>
  2. #include <list.h>
  3. #include <memlayout.h>
  4. #include <assert.h>
  5. #include <kmalloc.h>
  6. #include <sync.h>
  7. #include <pmm.h>
  8. #include <stdio.h>
  9. /*
  10. * SLOB Allocator: Simple List Of Blocks
  11. *
  12. * Matt Mackall <mpm@selenic.com> 12/30/03
  13. *
  14. * How SLOB works:
  15. *
  16. * The core of SLOB is a traditional K&R style heap allocator, with
  17. * support for returning aligned objects. The granularity of this
  18. * allocator is 8 bytes on x86, though it's perhaps possible to reduce
  19. * this to 4 if it's deemed worth the effort. The slob heap is a
  20. * singly-linked list of pages from __get_free_page, grown on demand
  21. * and allocation from the heap is currently first-fit.
  22. *
  23. * Above this is an implementation of kmalloc/kfree. Blocks returned
  24. * from kmalloc are 8-byte aligned and prepended with a 8-byte header.
  25. * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
  26. * __get_free_pages directly so that it can return page-aligned blocks
  27. * and keeps a linked list of such pages and their orders. These
  28. * objects are detected in kfree() by their page alignment.
  29. *
  30. * SLAB is emulated on top of SLOB by simply calling constructors and
  31. * destructors for every SLAB allocation. Objects are returned with
  32. * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
  33. * set, in which case the low-level allocator will fragment blocks to
  34. * create the proper alignment. Again, objects of page-size or greater
  35. * are allocated by calling __get_free_pages. As SLAB objects know
  36. * their size, no separate size bookkeeping is necessary and there is
  37. * essentially no allocation space overhead.
  38. */
  39. //some helper
  40. #define spin_lock_irqsave(l, f) local_intr_save(f)
  41. #define spin_unlock_irqrestore(l, f) local_intr_restore(f)
  42. typedef unsigned int gfp_t;
  43. #ifndef PAGE_SIZE
  44. #define PAGE_SIZE PGSIZE
  45. #endif
  46. #ifndef L1_CACHE_BYTES
  47. #define L1_CACHE_BYTES 64
  48. #endif
  49. #ifndef ALIGN
  50. #define ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
  51. #endif
  52. struct slob_block {
  53. int units;
  54. struct slob_block *next;
  55. };
  56. typedef struct slob_block slob_t;
  57. #define SLOB_UNIT sizeof(slob_t)
  58. #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
  59. #define SLOB_ALIGN L1_CACHE_BYTES
  60. struct bigblock {
  61. int order;
  62. void *pages;
  63. struct bigblock *next;
  64. };
  65. typedef struct bigblock bigblock_t;
  66. static slob_t arena = { .next = &arena, .units = 1 };
  67. static slob_t *slobfree = &arena;
  68. static bigblock_t *bigblocks;
  69. static void* __slob_get_free_pages(gfp_t gfp, int order)
  70. {
  71. struct Page * page = alloc_pages(1 << order);
  72. if(!page)
  73. return NULL;
  74. return page2kva(page);
  75. }
  76. #define __slob_get_free_page(gfp) __slob_get_free_pages(gfp, 0)
  77. static inline void __slob_free_pages(unsigned long kva, int order)
  78. {
  79. free_pages(kva2page(kva), 1 << order);
  80. }
  81. static void slob_free(void *b, int size);
  82. static void *slob_alloc(size_t size, gfp_t gfp, int align)
  83. {
  84. assert( (size + SLOB_UNIT) < PAGE_SIZE );
  85. slob_t *prev, *cur, *aligned = 0;
  86. int delta = 0, units = SLOB_UNITS(size);
  87. unsigned long flags;
  88. spin_lock_irqsave(&slob_lock, flags);
  89. prev = slobfree;
  90. for (cur = prev->next; ; prev = cur, cur = cur->next) {
  91. if (align) {
  92. aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  93. delta = aligned - cur;
  94. }
  95. if (cur->units >= units + delta) { /* room enough? */
  96. if (delta) { /* need to fragment head to align? */
  97. aligned->units = cur->units - delta;
  98. aligned->next = cur->next;
  99. cur->next = aligned;
  100. cur->units = delta;
  101. prev = cur;
  102. cur = aligned;
  103. }
  104. if (cur->units == units) /* exact fit? */
  105. prev->next = cur->next; /* unlink */
  106. else { /* fragment */
  107. prev->next = cur + units;
  108. prev->next->units = cur->units - units;
  109. prev->next->next = cur->next;
  110. cur->units = units;
  111. }
  112. slobfree = prev;
  113. spin_unlock_irqrestore(&slob_lock, flags);
  114. return cur;
  115. }
  116. if (cur == slobfree) {
  117. spin_unlock_irqrestore(&slob_lock, flags);
  118. if (size == PAGE_SIZE) /* trying to shrink arena? */
  119. return 0;
  120. cur = (slob_t *)__slob_get_free_page(gfp);
  121. if (!cur)
  122. return 0;
  123. slob_free(cur, PAGE_SIZE);
  124. spin_lock_irqsave(&slob_lock, flags);
  125. cur = slobfree;
  126. }
  127. }
  128. }
  129. static void slob_free(void *block, int size)
  130. {
  131. slob_t *cur, *b = (slob_t *)block;
  132. unsigned long flags;
  133. if (!block)
  134. return;
  135. if (size)
  136. b->units = SLOB_UNITS(size);
  137. /* Find reinsertion point */
  138. spin_lock_irqsave(&slob_lock, flags);
  139. for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
  140. if (cur >= cur->next && (b > cur || b < cur->next))
  141. break;
  142. if (b + b->units == cur->next) {
  143. b->units += cur->next->units;
  144. b->next = cur->next->next;
  145. } else
  146. b->next = cur->next;
  147. if (cur + cur->units == b) {
  148. cur->units += b->units;
  149. cur->next = b->next;
  150. } else
  151. cur->next = b;
  152. slobfree = cur;
  153. spin_unlock_irqrestore(&slob_lock, flags);
  154. }
  155. void check_slab(void) {
  156. cprintf("check_slab() success\n");
  157. }
  158. void
  159. slab_init(void) {
  160. cprintf("use SLOB allocator\n");
  161. check_slab();
  162. }
  163. inline void
  164. kmalloc_init(void) {
  165. slab_init();
  166. cprintf("kmalloc_init() succeeded!\n");
  167. }
  168. size_t
  169. slab_allocated(void) {
  170. return 0;
  171. }
  172. size_t
  173. kallocated(void) {
  174. return slab_allocated();
  175. }
  176. static int find_order(int size)
  177. {
  178. int order = 0;
  179. for ( ; size > 4096 ; size >>=1)
  180. order++;
  181. return order;
  182. }
  183. static void *__kmalloc(size_t size, gfp_t gfp)
  184. {
  185. slob_t *m;
  186. bigblock_t *bb;
  187. unsigned long flags;
  188. if (size < PAGE_SIZE - SLOB_UNIT) {
  189. m = slob_alloc(size + SLOB_UNIT, gfp, 0);
  190. return m ? (void *)(m + 1) : 0;
  191. }
  192. bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
  193. if (!bb)
  194. return 0;
  195. bb->order = find_order(size);
  196. bb->pages = (void *)__slob_get_free_pages(gfp, bb->order);
  197. if (bb->pages) {
  198. spin_lock_irqsave(&block_lock, flags);
  199. bb->next = bigblocks;
  200. bigblocks = bb;
  201. spin_unlock_irqrestore(&block_lock, flags);
  202. return bb->pages;
  203. }
  204. slob_free(bb, sizeof(bigblock_t));
  205. return 0;
  206. }
  207. void *
  208. kmalloc(size_t size)
  209. {
  210. return __kmalloc(size, 0);
  211. }
  212. void kfree(void *block)
  213. {
  214. bigblock_t *bb, **last = &bigblocks;
  215. unsigned long flags;
  216. if (!block)
  217. return;
  218. if (!((unsigned long)block & (PAGE_SIZE-1))) {
  219. /* might be on the big block list */
  220. spin_lock_irqsave(&block_lock, flags);
  221. for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
  222. if (bb->pages == block) {
  223. *last = bb->next;
  224. spin_unlock_irqrestore(&block_lock, flags);
  225. __slob_free_pages((unsigned long)block, bb->order);
  226. slob_free(bb, sizeof(bigblock_t));
  227. return;
  228. }
  229. }
  230. spin_unlock_irqrestore(&block_lock, flags);
  231. }
  232. slob_free((slob_t *)block - 1, 0);
  233. return;
  234. }
  235. unsigned int ksize(const void *block)
  236. {
  237. bigblock_t *bb;
  238. unsigned long flags;
  239. if (!block)
  240. return 0;
  241. if (!((unsigned long)block & (PAGE_SIZE-1))) {
  242. spin_lock_irqsave(&block_lock, flags);
  243. for (bb = bigblocks; bb; bb = bb->next)
  244. if (bb->pages == block) {
  245. spin_unlock_irqrestore(&slob_lock, flags);
  246. return PAGE_SIZE << bb->order;
  247. }
  248. spin_unlock_irqrestore(&block_lock, flags);
  249. }
  250. return ((slob_t *)block - 1)->units * SLOB_UNIT;
  251. }