《操作系统》的实验代码。
Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

432 rindas
14 KiB

pirms 10 gadiem
  1. #include <vmm.h>
  2. #include <sync.h>
  3. #include <string.h>
  4. #include <assert.h>
  5. #include <stdio.h>
  6. #include <error.h>
  7. #include <pmm.h>
  8. #include <x86.h>
  9. #include <swap.h>
  10. /*
  11. vmm design include two parts: mm_struct (mm) & vma_struct (vma)
  12. mm is the memory manager for the set of continuous virtual memory
  13. area which have the same PDT. vma is a continuous virtual memory area.
  14. There a linear link list for vma & a redblack link list for vma in mm.
  15. ---------------
  16. mm related functions:
  17. golbal functions
  18. struct mm_struct * mm_create(void)
  19. void mm_destroy(struct mm_struct *mm)
  20. int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr)
  21. --------------
  22. vma related functions:
  23. global functions
  24. struct vma_struct * vma_create (uintptr_t vm_start, uintptr_t vm_end,...)
  25. void insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma)
  26. struct vma_struct * find_vma(struct mm_struct *mm, uintptr_t addr)
  27. local functions
  28. inline void check_vma_overlap(struct vma_struct *prev, struct vma_struct *next)
  29. ---------------
  30. check correctness functions
  31. void check_vmm(void);
  32. void check_vma_struct(void);
  33. void check_pgfault(void);
  34. */
  35. static void check_vmm(void);
  36. static void check_vma_struct(void);
  37. static void check_pgfault(void);
  38. // mm_create - alloc a mm_struct & initialize it.
  39. struct mm_struct *
  40. mm_create(void) {
  41. struct mm_struct *mm = kmalloc(sizeof(struct mm_struct));
  42. if (mm != NULL) {
  43. list_init(&(mm->mmap_list));
  44. mm->mmap_cache = NULL;
  45. mm->pgdir = NULL;
  46. mm->map_count = 0;
  47. if (swap_init_ok) swap_init_mm(mm);
  48. else mm->sm_priv = NULL;
  49. }
  50. return mm;
  51. }
  52. // vma_create - alloc a vma_struct & initialize it. (addr range: vm_start~vm_end)
  53. struct vma_struct *
  54. vma_create(uintptr_t vm_start, uintptr_t vm_end, uint32_t vm_flags) {
  55. struct vma_struct *vma = kmalloc(sizeof(struct vma_struct));
  56. if (vma != NULL) {
  57. vma->vm_start = vm_start;
  58. vma->vm_end = vm_end;
  59. vma->vm_flags = vm_flags;
  60. }
  61. return vma;
  62. }
  63. // find_vma - find a vma (vma->vm_start <= addr <= vma_vm_end)
  64. struct vma_struct *
  65. find_vma(struct mm_struct *mm, uintptr_t addr) {
  66. struct vma_struct *vma = NULL;
  67. if (mm != NULL) {
  68. vma = mm->mmap_cache;
  69. if (!(vma != NULL && vma->vm_start <= addr && vma->vm_end > addr)) {
  70. bool found = 0;
  71. list_entry_t *list = &(mm->mmap_list), *le = list;
  72. while ((le = list_next(le)) != list) {
  73. vma = le2vma(le, list_link);
  74. if (vma->vm_start<=addr && addr < vma->vm_end) {
  75. found = 1;
  76. break;
  77. }
  78. }
  79. if (!found) {
  80. vma = NULL;
  81. }
  82. }
  83. if (vma != NULL) {
  84. mm->mmap_cache = vma;
  85. }
  86. }
  87. return vma;
  88. }
  89. // check_vma_overlap - check if vma1 overlaps vma2 ?
  90. static inline void
  91. check_vma_overlap(struct vma_struct *prev, struct vma_struct *next) {
  92. assert(prev->vm_start < prev->vm_end);
  93. assert(prev->vm_end <= next->vm_start);
  94. assert(next->vm_start < next->vm_end);
  95. }
  96. // insert_vma_struct -insert vma in mm's list link
  97. void
  98. insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma) {
  99. assert(vma->vm_start < vma->vm_end);
  100. list_entry_t *list = &(mm->mmap_list);
  101. list_entry_t *le_prev = list, *le_next;
  102. list_entry_t *le = list;
  103. while ((le = list_next(le)) != list) {
  104. struct vma_struct *mmap_prev = le2vma(le, list_link);
  105. if (mmap_prev->vm_start > vma->vm_start) {
  106. break;
  107. }
  108. le_prev = le;
  109. }
  110. le_next = list_next(le_prev);
  111. /* check overlap */
  112. if (le_prev != list) {
  113. check_vma_overlap(le2vma(le_prev, list_link), vma);
  114. }
  115. if (le_next != list) {
  116. check_vma_overlap(vma, le2vma(le_next, list_link));
  117. }
  118. vma->vm_mm = mm;
  119. list_add_after(le_prev, &(vma->list_link));
  120. mm->map_count ++;
  121. }
  122. // mm_destroy - free mm and mm internal fields
  123. void
  124. mm_destroy(struct mm_struct *mm) {
  125. list_entry_t *list = &(mm->mmap_list), *le;
  126. while ((le = list_next(list)) != list) {
  127. list_del(le);
  128. kfree(le2vma(le, list_link),sizeof(struct vma_struct)); //kfree vma
  129. }
  130. kfree(mm, sizeof(struct mm_struct)); //kfree mm
  131. mm=NULL;
  132. }
  133. // vmm_init - initialize virtual memory management
  134. // - now just call check_vmm to check correctness of vmm
  135. void
  136. vmm_init(void) {
  137. check_vmm();
  138. }
  139. // check_vmm - check correctness of vmm
  140. static void
  141. check_vmm(void) {
  142. size_t nr_free_pages_store = nr_free_pages();
  143. check_vma_struct();
  144. check_pgfault();
  145. assert(nr_free_pages_store == nr_free_pages());
  146. cprintf("check_vmm() succeeded.\n");
  147. }
  148. static void
  149. check_vma_struct(void) {
  150. size_t nr_free_pages_store = nr_free_pages();
  151. struct mm_struct *mm = mm_create();
  152. assert(mm != NULL);
  153. int step1 = 10, step2 = step1 * 10;
  154. int i;
  155. for (i = step1; i >= 1; i --) {
  156. struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
  157. assert(vma != NULL);
  158. insert_vma_struct(mm, vma);
  159. }
  160. for (i = step1 + 1; i <= step2; i ++) {
  161. struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
  162. assert(vma != NULL);
  163. insert_vma_struct(mm, vma);
  164. }
  165. list_entry_t *le = list_next(&(mm->mmap_list));
  166. for (i = 1; i <= step2; i ++) {
  167. assert(le != &(mm->mmap_list));
  168. struct vma_struct *mmap = le2vma(le, list_link);
  169. assert(mmap->vm_start == i * 5 && mmap->vm_end == i * 5 + 2);
  170. le = list_next(le);
  171. }
  172. for (i = 5; i <= 5 * step2; i +=5) {
  173. struct vma_struct *vma1 = find_vma(mm, i);
  174. assert(vma1 != NULL);
  175. struct vma_struct *vma2 = find_vma(mm, i+1);
  176. assert(vma2 != NULL);
  177. struct vma_struct *vma3 = find_vma(mm, i+2);
  178. assert(vma3 == NULL);
  179. struct vma_struct *vma4 = find_vma(mm, i+3);
  180. assert(vma4 == NULL);
  181. struct vma_struct *vma5 = find_vma(mm, i+4);
  182. assert(vma5 == NULL);
  183. assert(vma1->vm_start == i && vma1->vm_end == i + 2);
  184. assert(vma2->vm_start == i && vma2->vm_end == i + 2);
  185. }
  186. for (i =4; i>=0; i--) {
  187. struct vma_struct *vma_below_5= find_vma(mm,i);
  188. if (vma_below_5 != NULL ) {
  189. cprintf("vma_below_5: i %x, start %x, end %x\n",i, vma_below_5->vm_start, vma_below_5->vm_end);
  190. }
  191. assert(vma_below_5 == NULL);
  192. }
  193. mm_destroy(mm);
  194. assert(nr_free_pages_store == nr_free_pages());
  195. cprintf("check_vma_struct() succeeded!\n");
  196. }
  197. struct mm_struct *check_mm_struct;
  198. // check_pgfault - check correctness of pgfault handler
  199. static void
  200. check_pgfault(void) {
  201. size_t nr_free_pages_store = nr_free_pages();
  202. check_mm_struct = mm_create();
  203. assert(check_mm_struct != NULL);
  204. struct mm_struct *mm = check_mm_struct;
  205. pde_t *pgdir = mm->pgdir = boot_pgdir;
  206. assert(pgdir[0] == 0);
  207. struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE);
  208. assert(vma != NULL);
  209. insert_vma_struct(mm, vma);
  210. uintptr_t addr = 0x100;
  211. assert(find_vma(mm, addr) == vma);
  212. int i, sum = 0;
  213. for (i = 0; i < 100; i ++) {
  214. *(char *)(addr + i) = i;
  215. sum += i;
  216. }
  217. for (i = 0; i < 100; i ++) {
  218. sum -= *(char *)(addr + i);
  219. }
  220. assert(sum == 0);
  221. page_remove(pgdir, ROUNDDOWN(addr, PGSIZE));
  222. free_page(pa2page(pgdir[0]));
  223. pgdir[0] = 0;
  224. mm->pgdir = NULL;
  225. mm_destroy(mm);
  226. check_mm_struct = NULL;
  227. assert(nr_free_pages_store == nr_free_pages());
  228. cprintf("check_pgfault() succeeded!\n");
  229. }
  230. //page fault number
  231. volatile unsigned int pgfault_num=0;
  232. /* do_pgfault - interrupt handler to process the page fault execption
  233. * @mm : the control struct for a set of vma using the same PDT
  234. * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
  235. * @addr : the addr which causes a memory access exception, (the contents of the CR2 register)
  236. *
  237. * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
  238. * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
  239. * the exception and recovering from it.
  240. * (1) The contents of the CR2 register. The processor loads the CR2 register with the
  241. * 32-bit linear address that generated the exception. The do_pgfault fun can
  242. * use this address to locate the corresponding page directory and page-table
  243. * entries.
  244. * (2) An error code on the kernel stack. The error code for a page fault has a format different from
  245. * that for other exceptions. The error code tells the exception handler three things:
  246. * -- The P flag (bit 0) indicates whether the exception was due to a not-present page (0)
  247. * or to either an access rights violation or the use of a reserved bit (1).
  248. * -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
  249. * was a read (0) or write (1).
  250. * -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
  251. * or supervisor mode (0) at the time of the exception.
  252. */
  253. int
  254. do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
  255. int ret = -E_INVAL;
  256. //try to find a vma which include addr
  257. struct vma_struct *vma = find_vma(mm, addr);
  258. pgfault_num++;
  259. //If the addr is in the range of a mm's vma?
  260. if (vma == NULL || vma->vm_start > addr) {
  261. cprintf("not valid addr %x, and can not find it in vma\n", addr);
  262. goto failed;
  263. }
  264. //check the error_code
  265. switch (error_code & 3) {
  266. default:
  267. /* error code flag : default is 3 ( W/R=1, P=1): write, present */
  268. case 2: /* error code flag : (W/R=1, P=0): write, not present */
  269. if (!(vma->vm_flags & VM_WRITE)) {
  270. cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n");
  271. goto failed;
  272. }
  273. break;
  274. case 1: /* error code flag : (W/R=0, P=1): read, present */
  275. cprintf("do_pgfault failed: error code flag = read AND present\n");
  276. goto failed;
  277. case 0: /* error code flag : (W/R=0, P=0): read, not present */
  278. if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
  279. cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n");
  280. goto failed;
  281. }
  282. }
  283. /* IF (write an existed addr ) OR
  284. * (write an non_existed addr && addr is writable) OR
  285. * (read an non_existed addr && addr is readable)
  286. * THEN
  287. * continue process
  288. */
  289. uint32_t perm = PTE_U;
  290. if (vma->vm_flags & VM_WRITE) {
  291. perm |= PTE_W;
  292. }
  293. addr = ROUNDDOWN(addr, PGSIZE);
  294. ret = -E_NO_MEM;
  295. pte_t *ptep=NULL;
  296. /*LAB3 EXERCISE 1: YOUR CODE
  297. * Maybe you want help comment, BELOW comments can help you finish the code
  298. *
  299. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  300. * MACROs or Functions:
  301. * get_pte : get an pte and return the kernel virtual address of this pte for la
  302. * if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1')
  303. * pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup
  304. * an addr map pa<--->la with linear address la and the PDT pgdir
  305. * DEFINES:
  306. * VM_WRITE : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable
  307. * PTE_W 0x002 // page table/directory entry flags bit : Writeable
  308. * PTE_U 0x004 // page table/directory entry flags bit : User can access
  309. * VARIABLES:
  310. * mm->pgdir : the PDT of these vma
  311. *
  312. */
  313. #if 0
  314. /*LAB3 EXERCISE 1: YOUR CODE*/
  315. ptep = ??? //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
  316. if (*ptep == 0) {
  317. //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
  318. }
  319. else {
  320. /*LAB3 EXERCISE 2: YOUR CODE
  321. * Now we think this pte is a swap entry, we should load data from disk to a page with phy addr,
  322. * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page.
  323. *
  324. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  325. * MACROs or Functions:
  326. * swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr,
  327. * find the addr of disk page, read the content of disk page into this memroy page
  328. * page_insert build the map of phy addr of an Page with the linear addr la
  329. * swap_map_swappable set the page swappable
  330. */
  331. if(swap_init_ok) {
  332. struct Page *page=NULL;
  333. //(1)According to the mm AND addr, try to load the content of right disk page
  334. // into the memory which page managed.
  335. //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr
  336. //(3) make the page swappable.
  337. }
  338. else {
  339. cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
  340. goto failed;
  341. }
  342. }
  343. #endif
  344. // try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
  345. // (notice the 3th parameter '1')
  346. if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) {
  347. cprintf("get_pte in do_pgfault failed\n");
  348. goto failed;
  349. }
  350. if (*ptep == 0) { // if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
  351. if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) {
  352. cprintf("pgdir_alloc_page in do_pgfault failed\n");
  353. goto failed;
  354. }
  355. }
  356. else { // if this pte is a swap entry, then load data from disk to a page with phy addr
  357. // and call page_insert to map the phy addr with logical addr
  358. if(swap_init_ok) {
  359. struct Page *page=NULL;
  360. if ((ret = swap_in(mm, addr, &page)) != 0) {
  361. cprintf("swap_in in do_pgfault failed\n");
  362. goto failed;
  363. }
  364. page_insert(mm->pgdir, page, addr, perm);
  365. swap_map_swappable(mm, addr, page, 1);
  366. }
  367. else {
  368. cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
  369. goto failed;
  370. }
  371. }
  372. ret = 0;
  373. failed:
  374. return ret;
  375. }