《操作系统》的实验代码。
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

527 lines
17 KiB

пре 12 година
пре 12 година
пре 12 година
пре 12 година
пре 12 година
пре 12 година
пре 12 година
пре 12 година
  1. #include <vmm.h>
  2. #include <sync.h>
  3. #include <string.h>
  4. #include <assert.h>
  5. #include <stdio.h>
  6. #include <error.h>
  7. #include <pmm.h>
  8. #include <x86.h>
  9. #include <swap.h>
  10. #include <kmalloc.h>
  11. /*
  12. vmm design include two parts: mm_struct (mm) & vma_struct (vma)
  13. mm is the memory manager for the set of continuous virtual memory
  14. area which have the same PDT. vma is a continuous virtual memory area.
  15. There a linear link list for vma & a redblack link list for vma in mm.
  16. ---------------
  17. mm related functions:
  18. golbal functions
  19. struct mm_struct * mm_create(void)
  20. void mm_destroy(struct mm_struct *mm)
  21. int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr)
  22. --------------
  23. vma related functions:
  24. global functions
  25. struct vma_struct * vma_create (uintptr_t vm_start, uintptr_t vm_end,...)
  26. void insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma)
  27. struct vma_struct * find_vma(struct mm_struct *mm, uintptr_t addr)
  28. local functions
  29. inline void check_vma_overlap(struct vma_struct *prev, struct vma_struct *next)
  30. ---------------
  31. check correctness functions
  32. void check_vmm(void);
  33. void check_vma_struct(void);
  34. void check_pgfault(void);
  35. */
  36. static void check_vmm(void);
  37. static void check_vma_struct(void);
  38. static void check_pgfault(void);
  39. // mm_create - alloc a mm_struct & initialize it.
  40. struct mm_struct *
  41. mm_create(void) {
  42. struct mm_struct *mm = kmalloc(sizeof(struct mm_struct));
  43. if (mm != NULL) {
  44. list_init(&(mm->mmap_list));
  45. mm->mmap_cache = NULL;
  46. mm->pgdir = NULL;
  47. mm->map_count = 0;
  48. if (swap_init_ok) swap_init_mm(mm);
  49. else mm->sm_priv = NULL;
  50. set_mm_count(mm, 0);
  51. lock_init(&(mm->mm_lock));
  52. }
  53. return mm;
  54. }
  55. // vma_create - alloc a vma_struct & initialize it. (addr range: vm_start~vm_end)
  56. struct vma_struct *
  57. vma_create(uintptr_t vm_start, uintptr_t vm_end, uint32_t vm_flags) {
  58. struct vma_struct *vma = kmalloc(sizeof(struct vma_struct));
  59. if (vma != NULL) {
  60. vma->vm_start = vm_start;
  61. vma->vm_end = vm_end;
  62. vma->vm_flags = vm_flags;
  63. }
  64. return vma;
  65. }
  66. // find_vma - find a vma (vma->vm_start <= addr <= vma_vm_end)
  67. struct vma_struct *
  68. find_vma(struct mm_struct *mm, uintptr_t addr) {
  69. struct vma_struct *vma = NULL;
  70. if (mm != NULL) {
  71. vma = mm->mmap_cache;
  72. if (!(vma != NULL && vma->vm_start <= addr && vma->vm_end > addr)) {
  73. bool found = 0;
  74. list_entry_t *list = &(mm->mmap_list), *le = list;
  75. while ((le = list_next(le)) != list) {
  76. vma = le2vma(le, list_link);
  77. if (vma->vm_start<=addr && addr < vma->vm_end) {
  78. found = 1;
  79. break;
  80. }
  81. }
  82. if (!found) {
  83. vma = NULL;
  84. }
  85. }
  86. if (vma != NULL) {
  87. mm->mmap_cache = vma;
  88. }
  89. }
  90. return vma;
  91. }
  92. // check_vma_overlap - check if vma1 overlaps vma2 ?
  93. static inline void
  94. check_vma_overlap(struct vma_struct *prev, struct vma_struct *next) {
  95. assert(prev->vm_start < prev->vm_end);
  96. assert(prev->vm_end <= next->vm_start);
  97. assert(next->vm_start < next->vm_end);
  98. }
  99. // insert_vma_struct -insert vma in mm's list link
  100. void
  101. insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma) {
  102. assert(vma->vm_start < vma->vm_end);
  103. list_entry_t *list = &(mm->mmap_list);
  104. list_entry_t *le_prev = list, *le_next;
  105. list_entry_t *le = list;
  106. while ((le = list_next(le)) != list) {
  107. struct vma_struct *mmap_prev = le2vma(le, list_link);
  108. if (mmap_prev->vm_start > vma->vm_start) {
  109. break;
  110. }
  111. le_prev = le;
  112. }
  113. le_next = list_next(le_prev);
  114. /* check overlap */
  115. if (le_prev != list) {
  116. check_vma_overlap(le2vma(le_prev, list_link), vma);
  117. }
  118. if (le_next != list) {
  119. check_vma_overlap(vma, le2vma(le_next, list_link));
  120. }
  121. vma->vm_mm = mm;
  122. list_add_after(le_prev, &(vma->list_link));
  123. mm->map_count ++;
  124. }
  125. // mm_destroy - free mm and mm internal fields
  126. void
  127. mm_destroy(struct mm_struct *mm) {
  128. assert(mm_count(mm) == 0);
  129. list_entry_t *list = &(mm->mmap_list), *le;
  130. while ((le = list_next(list)) != list) {
  131. list_del(le);
  132. kfree(le2vma(le, list_link)); //kfree vma
  133. }
  134. kfree(mm); //kfree mm
  135. mm=NULL;
  136. }
  137. int
  138. mm_map(struct mm_struct *mm, uintptr_t addr, size_t len, uint32_t vm_flags,
  139. struct vma_struct **vma_store) {
  140. uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE);
  141. if (!USER_ACCESS(start, end)) {
  142. return -E_INVAL;
  143. }
  144. assert(mm != NULL);
  145. int ret = -E_INVAL;
  146. struct vma_struct *vma;
  147. if ((vma = find_vma(mm, start)) != NULL && end > vma->vm_start) {
  148. goto out;
  149. }
  150. ret = -E_NO_MEM;
  151. if ((vma = vma_create(start, end, vm_flags)) == NULL) {
  152. goto out;
  153. }
  154. insert_vma_struct(mm, vma);
  155. if (vma_store != NULL) {
  156. *vma_store = vma;
  157. }
  158. ret = 0;
  159. out:
  160. return ret;
  161. }
  162. int
  163. dup_mmap(struct mm_struct *to, struct mm_struct *from) {
  164. assert(to != NULL && from != NULL);
  165. list_entry_t *list = &(from->mmap_list), *le = list;
  166. while ((le = list_prev(le)) != list) {
  167. struct vma_struct *vma, *nvma;
  168. vma = le2vma(le, list_link);
  169. nvma = vma_create(vma->vm_start, vma->vm_end, vma->vm_flags);
  170. if (nvma == NULL) {
  171. return -E_NO_MEM;
  172. }
  173. insert_vma_struct(to, nvma);
  174. bool share = 0;
  175. if (copy_range(to->pgdir, from->pgdir, vma->vm_start, vma->vm_end, share) != 0) {
  176. return -E_NO_MEM;
  177. }
  178. }
  179. return 0;
  180. }
  181. void
  182. exit_mmap(struct mm_struct *mm) {
  183. assert(mm != NULL && mm_count(mm) == 0);
  184. pde_t *pgdir = mm->pgdir;
  185. list_entry_t *list = &(mm->mmap_list), *le = list;
  186. while ((le = list_next(le)) != list) {
  187. struct vma_struct *vma = le2vma(le, list_link);
  188. unmap_range(pgdir, vma->vm_start, vma->vm_end);
  189. }
  190. while ((le = list_next(le)) != list) {
  191. struct vma_struct *vma = le2vma(le, list_link);
  192. exit_range(pgdir, vma->vm_start, vma->vm_end);
  193. }
  194. }
  195. bool
  196. copy_from_user(struct mm_struct *mm, void *dst, const void *src, size_t len, bool writable) {
  197. if (!user_mem_check(mm, (uintptr_t)src, len, writable)) {
  198. return 0;
  199. }
  200. memcpy(dst, src, len);
  201. return 1;
  202. }
  203. bool
  204. copy_to_user(struct mm_struct *mm, void *dst, const void *src, size_t len) {
  205. if (!user_mem_check(mm, (uintptr_t)dst, len, 1)) {
  206. return 0;
  207. }
  208. memcpy(dst, src, len);
  209. return 1;
  210. }
  211. // vmm_init - initialize virtual memory management
  212. // - now just call check_vmm to check correctness of vmm
  213. void
  214. vmm_init(void) {
  215. check_vmm();
  216. }
  217. // check_vmm - check correctness of vmm
  218. static void
  219. check_vmm(void) {
  220. size_t nr_free_pages_store = nr_free_pages();
  221. check_vma_struct();
  222. check_pgfault();
  223. cprintf("check_vmm() succeeded.\n");
  224. }
  225. static void
  226. check_vma_struct(void) {
  227. size_t nr_free_pages_store = nr_free_pages();
  228. struct mm_struct *mm = mm_create();
  229. assert(mm != NULL);
  230. int step1 = 10, step2 = step1 * 10;
  231. int i;
  232. for (i = step1; i >= 1; i --) {
  233. struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
  234. assert(vma != NULL);
  235. insert_vma_struct(mm, vma);
  236. }
  237. for (i = step1 + 1; i <= step2; i ++) {
  238. struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
  239. assert(vma != NULL);
  240. insert_vma_struct(mm, vma);
  241. }
  242. list_entry_t *le = list_next(&(mm->mmap_list));
  243. for (i = 1; i <= step2; i ++) {
  244. assert(le != &(mm->mmap_list));
  245. struct vma_struct *mmap = le2vma(le, list_link);
  246. assert(mmap->vm_start == i * 5 && mmap->vm_end == i * 5 + 2);
  247. le = list_next(le);
  248. }
  249. for (i = 5; i <= 5 * step2; i +=5) {
  250. struct vma_struct *vma1 = find_vma(mm, i);
  251. assert(vma1 != NULL);
  252. struct vma_struct *vma2 = find_vma(mm, i+1);
  253. assert(vma2 != NULL);
  254. struct vma_struct *vma3 = find_vma(mm, i+2);
  255. assert(vma3 == NULL);
  256. struct vma_struct *vma4 = find_vma(mm, i+3);
  257. assert(vma4 == NULL);
  258. struct vma_struct *vma5 = find_vma(mm, i+4);
  259. assert(vma5 == NULL);
  260. assert(vma1->vm_start == i && vma1->vm_end == i + 2);
  261. assert(vma2->vm_start == i && vma2->vm_end == i + 2);
  262. }
  263. for (i =4; i>=0; i--) {
  264. struct vma_struct *vma_below_5= find_vma(mm,i);
  265. if (vma_below_5 != NULL ) {
  266. cprintf("vma_below_5: i %x, start %x, end %x\n",i, vma_below_5->vm_start, vma_below_5->vm_end);
  267. }
  268. assert(vma_below_5 == NULL);
  269. }
  270. mm_destroy(mm);
  271. cprintf("check_vma_struct() succeeded!\n");
  272. }
  273. struct mm_struct *check_mm_struct;
  274. // check_pgfault - check correctness of pgfault handler
  275. static void
  276. check_pgfault(void) {
  277. size_t nr_free_pages_store = nr_free_pages();
  278. check_mm_struct = mm_create();
  279. assert(check_mm_struct != NULL);
  280. struct mm_struct *mm = check_mm_struct;
  281. pde_t *pgdir = mm->pgdir = boot_pgdir;
  282. assert(pgdir[0] == 0);
  283. struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE);
  284. assert(vma != NULL);
  285. insert_vma_struct(mm, vma);
  286. uintptr_t addr = 0x100;
  287. assert(find_vma(mm, addr) == vma);
  288. int i, sum = 0;
  289. for (i = 0; i < 100; i ++) {
  290. *(char *)(addr + i) = i;
  291. sum += i;
  292. }
  293. for (i = 0; i < 100; i ++) {
  294. sum -= *(char *)(addr + i);
  295. }
  296. assert(sum == 0);
  297. page_remove(pgdir, ROUNDDOWN(addr, PGSIZE));
  298. free_page(pde2page(pgdir[0]));
  299. pgdir[0] = 0;
  300. mm->pgdir = NULL;
  301. mm_destroy(mm);
  302. check_mm_struct = NULL;
  303. assert(nr_free_pages_store == nr_free_pages());
  304. cprintf("check_pgfault() succeeded!\n");
  305. }
  306. //page fault number
  307. volatile unsigned int pgfault_num=0;
  308. /* do_pgfault - interrupt handler to process the page fault execption
  309. * @mm : the control struct for a set of vma using the same PDT
  310. * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
  311. * @addr : the addr which causes a memory access exception, (the contents of the CR2 register)
  312. *
  313. * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
  314. * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
  315. * the exception and recovering from it.
  316. * (1) The contents of the CR2 register. The processor loads the CR2 register with the
  317. * 32-bit linear address that generated the exception. The do_pgfault fun can
  318. * use this address to locate the corresponding page directory and page-table
  319. * entries.
  320. * (2) An error code on the kernel stack. The error code for a page fault has a format different from
  321. * that for other exceptions. The error code tells the exception handler three things:
  322. * -- The P flag (bit 0) indicates whether the exception was due to a not-present page (0)
  323. * or to either an access rights violation or the use of a reserved bit (1).
  324. * -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
  325. * was a read (0) or write (1).
  326. * -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
  327. * or supervisor mode (0) at the time of the exception.
  328. */
  329. int
  330. do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
  331. int ret = -E_INVAL;
  332. //try to find a vma which include addr
  333. struct vma_struct *vma = find_vma(mm, addr);
  334. pgfault_num++;
  335. //If the addr is in the range of a mm's vma?
  336. if (vma == NULL || vma->vm_start > addr) {
  337. cprintf("not valid addr %x, and can not find it in vma\n", addr);
  338. goto failed;
  339. }
  340. //check the error_code
  341. switch (error_code & 3) {
  342. default:
  343. /* error code flag : default is 3 ( W/R=1, P=1): write, present */
  344. case 2: /* error code flag : (W/R=1, P=0): write, not present */
  345. if (!(vma->vm_flags & VM_WRITE)) {
  346. cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n");
  347. goto failed;
  348. }
  349. break;
  350. case 1: /* error code flag : (W/R=0, P=1): read, present */
  351. cprintf("do_pgfault failed: error code flag = read AND present\n");
  352. goto failed;
  353. case 0: /* error code flag : (W/R=0, P=0): read, not present */
  354. if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
  355. cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n");
  356. goto failed;
  357. }
  358. }
  359. /* IF (write an existed addr ) OR
  360. * (write an non_existed addr && addr is writable) OR
  361. * (read an non_existed addr && addr is readable)
  362. * THEN
  363. * continue process
  364. */
  365. uint32_t perm = PTE_U;
  366. if (vma->vm_flags & VM_WRITE) {
  367. perm |= PTE_W;
  368. }
  369. addr = ROUNDDOWN(addr, PGSIZE);
  370. ret = -E_NO_MEM;
  371. pte_t *ptep=NULL;
  372. /*LAB3 EXERCISE 1: YOUR CODE
  373. * Maybe you want help comment, BELOW comments can help you finish the code
  374. *
  375. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  376. * MACROs or Functions:
  377. * get_pte : get an pte and return the kernel virtual address of this pte for la
  378. * if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1')
  379. * pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup
  380. * an addr map pa<--->la with linear address la and the PDT pgdir
  381. * DEFINES:
  382. * VM_WRITE : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable
  383. * PTE_W 0x002 // page table/directory entry flags bit : Writeable
  384. * PTE_U 0x004 // page table/directory entry flags bit : User can access
  385. * VARIABLES:
  386. * mm->pgdir : the PDT of these vma
  387. *
  388. */
  389. #if 0
  390. /*LAB3 EXERCISE 1: YOUR CODE*/
  391. ptep = ??? //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
  392. if (*ptep == 0) {
  393. //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
  394. }
  395. else {
  396. /*LAB3 EXERCISE 2: YOUR CODE
  397. * Now we think this pte is a swap entry, we should load data from disk to a page with phy addr,
  398. * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page.
  399. *
  400. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  401. * MACROs or Functions:
  402. * swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr,
  403. * find the addr of disk page, read the content of disk page into this memroy page
  404. * page_insert build the map of phy addr of an Page with the linear addr la
  405. * swap_map_swappable set the page swappable
  406. */
  407. /*
  408. * LAB5 CHALLENGE ( the implmentation Copy on Write)
  409. There are 2 situlations when code comes here.
  410. 1) *ptep & PTE_P == 1, it means one process try to write a readonly page.
  411. If the vma includes this addr is writable, then we can set the page writable by rewrite the *ptep.
  412. This method could be used to implement the Copy on Write (COW) thchnology(a fast fork process method).
  413. 2) *ptep & PTE_P == 0 & but *ptep!=0, it means this pte is a swap entry.
  414. We should add the LAB3's results here.
  415. */
  416. if(swap_init_ok) {
  417. struct Page *page=NULL;
  418. //(1)According to the mm AND addr, try to load the content of right disk page
  419. // into the memory which page managed.
  420. //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr
  421. //(3) make the page swappable.
  422. //(4) [NOTICE]: you myabe need to update your lab3's implementation for LAB5's normal execution.
  423. }
  424. else {
  425. cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
  426. goto failed;
  427. }
  428. }
  429. #endif
  430. ret = 0;
  431. failed:
  432. return ret;
  433. }
  434. bool
  435. user_mem_check(struct mm_struct *mm, uintptr_t addr, size_t len, bool write) {
  436. if (mm != NULL) {
  437. if (!USER_ACCESS(addr, addr + len)) {
  438. return 0;
  439. }
  440. struct vma_struct *vma;
  441. uintptr_t start = addr, end = addr + len;
  442. while (start < end) {
  443. if ((vma = find_vma(mm, start)) == NULL || start < vma->vm_start) {
  444. return 0;
  445. }
  446. if (!(vma->vm_flags & ((write) ? VM_WRITE : VM_READ))) {
  447. return 0;
  448. }
  449. if (write && (vma->vm_flags & VM_STACK)) {
  450. if (start < vma->vm_start + PGSIZE) { //check stack start & size
  451. return 0;
  452. }
  453. }
  454. start = vma->vm_end;
  455. }
  456. return 1;
  457. }
  458. return KERN_ACCESS(addr, addr + len);
  459. }