《操作系统》的实验代码。
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

305 lines
6.8 KiB

12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
12 years ago
  1. #include <defs.h>
  2. #include <list.h>
  3. #include <memlayout.h>
  4. #include <assert.h>
  5. #include <kmalloc.h>
  6. #include <sync.h>
  7. #include <pmm.h>
  8. #include <stdio.h>
  9. /*
  10. * SLOB Allocator: Simple List Of Blocks
  11. *
  12. * Matt Mackall <mpm@selenic.com> 12/30/03
  13. *
  14. * How SLOB works:
  15. *
  16. * The core of SLOB is a traditional K&R style heap allocator, with
  17. * support for returning aligned objects. The granularity of this
  18. * allocator is 8 bytes on x86, though it's perhaps possible to reduce
  19. * this to 4 if it's deemed worth the effort. The slob heap is a
  20. * singly-linked list of pages from __get_free_page, grown on demand
  21. * and allocation from the heap is currently first-fit.
  22. *
  23. * Above this is an implementation of kmalloc/kfree. Blocks returned
  24. * from kmalloc are 8-byte aligned and prepended with a 8-byte header.
  25. * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
  26. * __get_free_pages directly so that it can return page-aligned blocks
  27. * and keeps a linked list of such pages and their orders. These
  28. * objects are detected in kfree() by their page alignment.
  29. *
  30. * SLAB is emulated on top of SLOB by simply calling constructors and
  31. * destructors for every SLAB allocation. Objects are returned with
  32. * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
  33. * set, in which case the low-level allocator will fragment blocks to
  34. * create the proper alignment. Again, objects of page-size or greater
  35. * are allocated by calling __get_free_pages. As SLAB objects know
  36. * their size, no separate size bookkeeping is necessary and there is
  37. * essentially no allocation space overhead.
  38. */
  39. //some helper
  40. #define spin_lock_irqsave(l, f) local_intr_save(f)
  41. #define spin_unlock_irqrestore(l, f) local_intr_restore(f)
  42. typedef unsigned int gfp_t;
  43. #ifndef PAGE_SIZE
  44. #define PAGE_SIZE PGSIZE
  45. #endif
  46. #ifndef L1_CACHE_BYTES
  47. #define L1_CACHE_BYTES 64
  48. #endif
  49. #ifndef ALIGN
  50. #define ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
  51. #endif
  52. struct slob_block {
  53. int units;
  54. struct slob_block *next;
  55. };
  56. typedef struct slob_block slob_t;
  57. #define SLOB_UNIT sizeof(slob_t)
  58. #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
  59. #define SLOB_ALIGN L1_CACHE_BYTES
  60. struct bigblock {
  61. int order;
  62. void *pages;
  63. struct bigblock *next;
  64. };
  65. typedef struct bigblock bigblock_t;
  66. static slob_t arena = { .next = &arena, .units = 1 };
  67. static slob_t *slobfree = &arena;
  68. static bigblock_t *bigblocks;
  69. static void* __slob_get_free_pages(gfp_t gfp, int order)
  70. {
  71. struct Page * page = alloc_pages(1 << order);
  72. if(!page)
  73. return NULL;
  74. return page2kva(page);
  75. }
  76. #define __slob_get_free_page(gfp) __slob_get_free_pages(gfp, 0)
  77. static inline void __slob_free_pages(unsigned long kva, int order)
  78. {
  79. free_pages(kva2page(kva), 1 << order);
  80. }
  81. static void slob_free(void *b, int size);
  82. static void *slob_alloc(size_t size, gfp_t gfp, int align)
  83. {
  84. assert( (size + SLOB_UNIT) < PAGE_SIZE );
  85. slob_t *prev, *cur, *aligned = 0;
  86. int delta = 0, units = SLOB_UNITS(size);
  87. unsigned long flags;
  88. spin_lock_irqsave(&slob_lock, flags);
  89. prev = slobfree;
  90. for (cur = prev->next; ; prev = cur, cur = cur->next) {
  91. if (align) {
  92. aligned = (slob_t *)ALIGN((unsigned long)cur, align);
  93. delta = aligned - cur;
  94. }
  95. if (cur->units >= units + delta) { /* room enough? */
  96. if (delta) { /* need to fragment head to align? */
  97. aligned->units = cur->units - delta;
  98. aligned->next = cur->next;
  99. cur->next = aligned;
  100. cur->units = delta;
  101. prev = cur;
  102. cur = aligned;
  103. }
  104. if (cur->units == units) /* exact fit? */
  105. prev->next = cur->next; /* unlink */
  106. else { /* fragment */
  107. prev->next = cur + units;
  108. prev->next->units = cur->units - units;
  109. prev->next->next = cur->next;
  110. cur->units = units;
  111. }
  112. slobfree = prev;
  113. spin_unlock_irqrestore(&slob_lock, flags);
  114. return cur;
  115. }
  116. if (cur == slobfree) {
  117. spin_unlock_irqrestore(&slob_lock, flags);
  118. if (size == PAGE_SIZE) /* trying to shrink arena? */
  119. return 0;
  120. cur = (slob_t *)__slob_get_free_page(gfp);
  121. if (!cur)
  122. return 0;
  123. slob_free(cur, PAGE_SIZE);
  124. spin_lock_irqsave(&slob_lock, flags);
  125. cur = slobfree;
  126. }
  127. }
  128. }
  129. static void slob_free(void *block, int size)
  130. {
  131. slob_t *cur, *b = (slob_t *)block;
  132. unsigned long flags;
  133. if (!block)
  134. return;
  135. if (size)
  136. b->units = SLOB_UNITS(size);
  137. /* Find reinsertion point */
  138. spin_lock_irqsave(&slob_lock, flags);
  139. for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
  140. if (cur >= cur->next && (b > cur || b < cur->next))
  141. break;
  142. if (b + b->units == cur->next) {
  143. b->units += cur->next->units;
  144. b->next = cur->next->next;
  145. } else
  146. b->next = cur->next;
  147. if (cur + cur->units == b) {
  148. cur->units += b->units;
  149. cur->next = b->next;
  150. } else
  151. cur->next = b;
  152. slobfree = cur;
  153. spin_unlock_irqrestore(&slob_lock, flags);
  154. }
  155. void
  156. slob_init(void) {
  157. cprintf("use SLOB allocator\n");
  158. }
  159. inline void
  160. kmalloc_init(void) {
  161. slob_init();
  162. cprintf("kmalloc_init() succeeded!\n");
  163. }
  164. size_t
  165. slob_allocated(void) {
  166. return 0;
  167. }
  168. size_t
  169. kallocated(void) {
  170. return slob_allocated();
  171. }
  172. static int find_order(int size)
  173. {
  174. int order = 0;
  175. for ( ; size > 4096 ; size >>=1)
  176. order++;
  177. return order;
  178. }
  179. static void *__kmalloc(size_t size, gfp_t gfp)
  180. {
  181. slob_t *m;
  182. bigblock_t *bb;
  183. unsigned long flags;
  184. if (size < PAGE_SIZE - SLOB_UNIT) {
  185. m = slob_alloc(size + SLOB_UNIT, gfp, 0);
  186. return m ? (void *)(m + 1) : 0;
  187. }
  188. bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
  189. if (!bb)
  190. return 0;
  191. bb->order = find_order(size);
  192. bb->pages = (void *)__slob_get_free_pages(gfp, bb->order);
  193. if (bb->pages) {
  194. spin_lock_irqsave(&block_lock, flags);
  195. bb->next = bigblocks;
  196. bigblocks = bb;
  197. spin_unlock_irqrestore(&block_lock, flags);
  198. return bb->pages;
  199. }
  200. slob_free(bb, sizeof(bigblock_t));
  201. return 0;
  202. }
  203. void *
  204. kmalloc(size_t size)
  205. {
  206. return __kmalloc(size, 0);
  207. }
  208. void kfree(void *block)
  209. {
  210. bigblock_t *bb, **last = &bigblocks;
  211. unsigned long flags;
  212. if (!block)
  213. return;
  214. if (!((unsigned long)block & (PAGE_SIZE-1))) {
  215. /* might be on the big block list */
  216. spin_lock_irqsave(&block_lock, flags);
  217. for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
  218. if (bb->pages == block) {
  219. *last = bb->next;
  220. spin_unlock_irqrestore(&block_lock, flags);
  221. __slob_free_pages((unsigned long)block, bb->order);
  222. slob_free(bb, sizeof(bigblock_t));
  223. return;
  224. }
  225. }
  226. spin_unlock_irqrestore(&block_lock, flags);
  227. }
  228. slob_free((slob_t *)block - 1, 0);
  229. return;
  230. }
  231. unsigned int ksize(const void *block)
  232. {
  233. bigblock_t *bb;
  234. unsigned long flags;
  235. if (!block)
  236. return 0;
  237. if (!((unsigned long)block & (PAGE_SIZE-1))) {
  238. spin_lock_irqsave(&block_lock, flags);
  239. for (bb = bigblocks; bb; bb = bb->next)
  240. if (bb->pages == block) {
  241. spin_unlock_irqrestore(&slob_lock, flags);
  242. return PAGE_SIZE << bb->order;
  243. }
  244. spin_unlock_irqrestore(&block_lock, flags);
  245. }
  246. return ((slob_t *)block - 1)->units * SLOB_UNIT;
  247. }