《操作系统》的实验代码。
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

594 lines
19 KiB

10 years ago
10 years ago
10 years ago
  1. #include <vmm.h>
  2. #include <sync.h>
  3. #include <string.h>
  4. #include <assert.h>
  5. #include <stdio.h>
  6. #include <error.h>
  7. #include <pmm.h>
  8. #include <x86.h>
  9. #include <swap.h>
  10. #include <kmalloc.h>
  11. /*
  12. vmm design include two parts: mm_struct (mm) & vma_struct (vma)
  13. mm is the memory manager for the set of continuous virtual memory
  14. area which have the same PDT. vma is a continuous virtual memory area.
  15. There a linear link list for vma & a redblack link list for vma in mm.
  16. ---------------
  17. mm related functions:
  18. golbal functions
  19. struct mm_struct * mm_create(void)
  20. void mm_destroy(struct mm_struct *mm)
  21. int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr)
  22. --------------
  23. vma related functions:
  24. global functions
  25. struct vma_struct * vma_create (uintptr_t vm_start, uintptr_t vm_end,...)
  26. void insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma)
  27. struct vma_struct * find_vma(struct mm_struct *mm, uintptr_t addr)
  28. local functions
  29. inline void check_vma_overlap(struct vma_struct *prev, struct vma_struct *next)
  30. ---------------
  31. check correctness functions
  32. void check_vmm(void);
  33. void check_vma_struct(void);
  34. void check_pgfault(void);
  35. */
  36. static void check_vmm(void);
  37. static void check_vma_struct(void);
  38. static void check_pgfault(void);
  39. // mm_create - alloc a mm_struct & initialize it.
  40. struct mm_struct *
  41. mm_create(void) {
  42. struct mm_struct *mm = kmalloc(sizeof(struct mm_struct));
  43. if (mm != NULL) {
  44. list_init(&(mm->mmap_list));
  45. mm->mmap_cache = NULL;
  46. mm->pgdir = NULL;
  47. mm->map_count = 0;
  48. if (swap_init_ok) swap_init_mm(mm);
  49. else mm->sm_priv = NULL;
  50. set_mm_count(mm, 0);
  51. sem_init(&(mm->mm_sem), 1);
  52. }
  53. return mm;
  54. }
  55. // vma_create - alloc a vma_struct & initialize it. (addr range: vm_start~vm_end)
  56. struct vma_struct *
  57. vma_create(uintptr_t vm_start, uintptr_t vm_end, uint32_t vm_flags) {
  58. struct vma_struct *vma = kmalloc(sizeof(struct vma_struct));
  59. if (vma != NULL) {
  60. vma->vm_start = vm_start;
  61. vma->vm_end = vm_end;
  62. vma->vm_flags = vm_flags;
  63. }
  64. return vma;
  65. }
  66. // find_vma - find a vma (vma->vm_start <= addr <= vma_vm_end)
  67. struct vma_struct *
  68. find_vma(struct mm_struct *mm, uintptr_t addr) {
  69. struct vma_struct *vma = NULL;
  70. if (mm != NULL) {
  71. vma = mm->mmap_cache;
  72. if (!(vma != NULL && vma->vm_start <= addr && vma->vm_end > addr)) {
  73. bool found = 0;
  74. list_entry_t *list = &(mm->mmap_list), *le = list;
  75. while ((le = list_next(le)) != list) {
  76. vma = le2vma(le, list_link);
  77. if (vma->vm_start<=addr && addr < vma->vm_end) {
  78. found = 1;
  79. break;
  80. }
  81. }
  82. if (!found) {
  83. vma = NULL;
  84. }
  85. }
  86. if (vma != NULL) {
  87. mm->mmap_cache = vma;
  88. }
  89. }
  90. return vma;
  91. }
  92. // check_vma_overlap - check if vma1 overlaps vma2 ?
  93. static inline void
  94. check_vma_overlap(struct vma_struct *prev, struct vma_struct *next) {
  95. assert(prev->vm_start < prev->vm_end);
  96. assert(prev->vm_end <= next->vm_start);
  97. assert(next->vm_start < next->vm_end);
  98. }
  99. // insert_vma_struct -insert vma in mm's list link
  100. void
  101. insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma) {
  102. assert(vma->vm_start < vma->vm_end);
  103. list_entry_t *list = &(mm->mmap_list);
  104. list_entry_t *le_prev = list, *le_next;
  105. list_entry_t *le = list;
  106. while ((le = list_next(le)) != list) {
  107. struct vma_struct *mmap_prev = le2vma(le, list_link);
  108. if (mmap_prev->vm_start > vma->vm_start) {
  109. break;
  110. }
  111. le_prev = le;
  112. }
  113. le_next = list_next(le_prev);
  114. /* check overlap */
  115. if (le_prev != list) {
  116. check_vma_overlap(le2vma(le_prev, list_link), vma);
  117. }
  118. if (le_next != list) {
  119. check_vma_overlap(vma, le2vma(le_next, list_link));
  120. }
  121. vma->vm_mm = mm;
  122. list_add_after(le_prev, &(vma->list_link));
  123. mm->map_count ++;
  124. }
  125. // mm_destroy - free mm and mm internal fields
  126. void
  127. mm_destroy(struct mm_struct *mm) {
  128. assert(mm_count(mm) == 0);
  129. list_entry_t *list = &(mm->mmap_list), *le;
  130. while ((le = list_next(list)) != list) {
  131. list_del(le);
  132. kfree(le2vma(le, list_link)); //kfree vma
  133. }
  134. kfree(mm); //kfree mm
  135. mm=NULL;
  136. }
  137. int
  138. mm_map(struct mm_struct *mm, uintptr_t addr, size_t len, uint32_t vm_flags,
  139. struct vma_struct **vma_store) {
  140. uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE);
  141. if (!USER_ACCESS(start, end)) {
  142. return -E_INVAL;
  143. }
  144. assert(mm != NULL);
  145. int ret = -E_INVAL;
  146. struct vma_struct *vma;
  147. if ((vma = find_vma(mm, start)) != NULL && end > vma->vm_start) {
  148. goto out;
  149. }
  150. ret = -E_NO_MEM;
  151. if ((vma = vma_create(start, end, vm_flags)) == NULL) {
  152. goto out;
  153. }
  154. insert_vma_struct(mm, vma);
  155. if (vma_store != NULL) {
  156. *vma_store = vma;
  157. }
  158. ret = 0;
  159. out:
  160. return ret;
  161. }
  162. int
  163. dup_mmap(struct mm_struct *to, struct mm_struct *from) {
  164. assert(to != NULL && from != NULL);
  165. list_entry_t *list = &(from->mmap_list), *le = list;
  166. while ((le = list_prev(le)) != list) {
  167. struct vma_struct *vma, *nvma;
  168. vma = le2vma(le, list_link);
  169. nvma = vma_create(vma->vm_start, vma->vm_end, vma->vm_flags);
  170. if (nvma == NULL) {
  171. return -E_NO_MEM;
  172. }
  173. insert_vma_struct(to, nvma);
  174. bool share = 0;
  175. if (copy_range(to->pgdir, from->pgdir, vma->vm_start, vma->vm_end, share) != 0) {
  176. return -E_NO_MEM;
  177. }
  178. }
  179. return 0;
  180. }
  181. void
  182. exit_mmap(struct mm_struct *mm) {
  183. assert(mm != NULL && mm_count(mm) == 0);
  184. pde_t *pgdir = mm->pgdir;
  185. list_entry_t *list = &(mm->mmap_list), *le = list;
  186. while ((le = list_next(le)) != list) {
  187. struct vma_struct *vma = le2vma(le, list_link);
  188. unmap_range(pgdir, vma->vm_start, vma->vm_end);
  189. }
  190. while ((le = list_next(le)) != list) {
  191. struct vma_struct *vma = le2vma(le, list_link);
  192. exit_range(pgdir, vma->vm_start, vma->vm_end);
  193. }
  194. }
  195. bool
  196. copy_from_user(struct mm_struct *mm, void *dst, const void *src, size_t len, bool writable) {
  197. if (!user_mem_check(mm, (uintptr_t)src, len, writable)) {
  198. return 0;
  199. }
  200. memcpy(dst, src, len);
  201. return 1;
  202. }
  203. bool
  204. copy_to_user(struct mm_struct *mm, void *dst, const void *src, size_t len) {
  205. if (!user_mem_check(mm, (uintptr_t)dst, len, 1)) {
  206. return 0;
  207. }
  208. memcpy(dst, src, len);
  209. return 1;
  210. }
  211. // vmm_init - initialize virtual memory management
  212. // - now just call check_vmm to check correctness of vmm
  213. void
  214. vmm_init(void) {
  215. check_vmm();
  216. }
  217. // check_vmm - check correctness of vmm
  218. static void
  219. check_vmm(void) {
  220. size_t nr_free_pages_store = nr_free_pages();
  221. check_vma_struct();
  222. check_pgfault();
  223. //assert(nr_free_pages_store == nr_free_pages());
  224. cprintf("check_vmm() succeeded.\n");
  225. }
  226. static void
  227. check_vma_struct(void) {
  228. size_t nr_free_pages_store = nr_free_pages();
  229. struct mm_struct *mm = mm_create();
  230. assert(mm != NULL);
  231. int step1 = 10, step2 = step1 * 10;
  232. int i;
  233. for (i = step1; i >= 1; i --) {
  234. struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
  235. assert(vma != NULL);
  236. insert_vma_struct(mm, vma);
  237. }
  238. for (i = step1 + 1; i <= step2; i ++) {
  239. struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
  240. assert(vma != NULL);
  241. insert_vma_struct(mm, vma);
  242. }
  243. list_entry_t *le = list_next(&(mm->mmap_list));
  244. for (i = 1; i <= step2; i ++) {
  245. assert(le != &(mm->mmap_list));
  246. struct vma_struct *mmap = le2vma(le, list_link);
  247. assert(mmap->vm_start == i * 5 && mmap->vm_end == i * 5 + 2);
  248. le = list_next(le);
  249. }
  250. for (i = 5; i <= 5 * step2; i +=5) {
  251. struct vma_struct *vma1 = find_vma(mm, i);
  252. assert(vma1 != NULL);
  253. struct vma_struct *vma2 = find_vma(mm, i+1);
  254. assert(vma2 != NULL);
  255. struct vma_struct *vma3 = find_vma(mm, i+2);
  256. assert(vma3 == NULL);
  257. struct vma_struct *vma4 = find_vma(mm, i+3);
  258. assert(vma4 == NULL);
  259. struct vma_struct *vma5 = find_vma(mm, i+4);
  260. assert(vma5 == NULL);
  261. assert(vma1->vm_start == i && vma1->vm_end == i + 2);
  262. assert(vma2->vm_start == i && vma2->vm_end == i + 2);
  263. }
  264. for (i =4; i>=0; i--) {
  265. struct vma_struct *vma_below_5= find_vma(mm,i);
  266. if (vma_below_5 != NULL ) {
  267. cprintf("vma_below_5: i %x, start %x, end %x\n",i, vma_below_5->vm_start, vma_below_5->vm_end);
  268. }
  269. assert(vma_below_5 == NULL);
  270. }
  271. mm_destroy(mm);
  272. // assert(nr_free_pages_store == nr_free_pages());
  273. cprintf("check_vma_struct() succeeded!\n");
  274. }
  275. struct mm_struct *check_mm_struct;
  276. // check_pgfault - check correctness of pgfault handler
  277. static void
  278. check_pgfault(void) {
  279. size_t nr_free_pages_store = nr_free_pages();
  280. check_mm_struct = mm_create();
  281. assert(check_mm_struct != NULL);
  282. struct mm_struct *mm = check_mm_struct;
  283. pde_t *pgdir = mm->pgdir = boot_pgdir;
  284. assert(pgdir[0] == 0);
  285. struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE);
  286. assert(vma != NULL);
  287. insert_vma_struct(mm, vma);
  288. uintptr_t addr = 0x100;
  289. assert(find_vma(mm, addr) == vma);
  290. int i, sum = 0;
  291. for (i = 0; i < 100; i ++) {
  292. *(char *)(addr + i) = i;
  293. sum += i;
  294. }
  295. for (i = 0; i < 100; i ++) {
  296. sum -= *(char *)(addr + i);
  297. }
  298. assert(sum == 0);
  299. page_remove(pgdir, ROUNDDOWN(addr, PGSIZE));
  300. free_page(pde2page(pgdir[0]));
  301. pgdir[0] = 0;
  302. mm->pgdir = NULL;
  303. mm_destroy(mm);
  304. check_mm_struct = NULL;
  305. assert(nr_free_pages_store == nr_free_pages());
  306. cprintf("check_pgfault() succeeded!\n");
  307. }
  308. //page fault number
  309. volatile unsigned int pgfault_num=0;
  310. /* do_pgfault - interrupt handler to process the page fault execption
  311. * @mm : the control struct for a set of vma using the same PDT
  312. * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
  313. * @addr : the addr which causes a memory access exception, (the contents of the CR2 register)
  314. *
  315. * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
  316. * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
  317. * the exception and recovering from it.
  318. * (1) The contents of the CR2 register. The processor loads the CR2 register with the
  319. * 32-bit linear address that generated the exception. The do_pgfault fun can
  320. * use this address to locate the corresponding page directory and page-table
  321. * entries.
  322. * (2) An error code on the kernel stack. The error code for a page fault has a format different from
  323. * that for other exceptions. The error code tells the exception handler three things:
  324. * -- The P flag (bit 0) indicates whether the exception was due to a not-present page (0)
  325. * or to either an access rights violation or the use of a reserved bit (1).
  326. * -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
  327. * was a read (0) or write (1).
  328. * -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
  329. * or supervisor mode (0) at the time of the exception.
  330. */
  331. int
  332. do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
  333. int ret = -E_INVAL;
  334. //try to find a vma which include addr
  335. struct vma_struct *vma = find_vma(mm, addr);
  336. pgfault_num++;
  337. //If the addr is in the range of a mm's vma?
  338. if (vma == NULL || vma->vm_start > addr) {
  339. cprintf("not valid addr %x, and can not find it in vma\n", addr);
  340. goto failed;
  341. }
  342. //check the error_code
  343. switch (error_code & 3) {
  344. default:
  345. /* error code flag : default is 3 ( W/R=1, P=1): write, present */
  346. case 2: /* error code flag : (W/R=1, P=0): write, not present */
  347. if (!(vma->vm_flags & VM_WRITE)) {
  348. cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n");
  349. goto failed;
  350. }
  351. break;
  352. case 1: /* error code flag : (W/R=0, P=1): read, present */
  353. cprintf("do_pgfault failed: error code flag = read AND present\n");
  354. goto failed;
  355. case 0: /* error code flag : (W/R=0, P=0): read, not present */
  356. if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
  357. cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n");
  358. goto failed;
  359. }
  360. }
  361. /* IF (write an existed addr ) OR
  362. * (write an non_existed addr && addr is writable) OR
  363. * (read an non_existed addr && addr is readable)
  364. * THEN
  365. * continue process
  366. */
  367. uint32_t perm = PTE_U;
  368. if (vma->vm_flags & VM_WRITE) {
  369. perm |= PTE_W;
  370. }
  371. addr = ROUNDDOWN(addr, PGSIZE);
  372. ret = -E_NO_MEM;
  373. pte_t *ptep=NULL;
  374. /*LAB3 EXERCISE 1: YOUR CODE
  375. * Maybe you want help comment, BELOW comments can help you finish the code
  376. *
  377. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  378. * MACROs or Functions:
  379. * get_pte : get an pte and return the kernel virtual address of this pte for la
  380. * if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1')
  381. * pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup
  382. * an addr map pa<--->la with linear address la and the PDT pgdir
  383. * DEFINES:
  384. * VM_WRITE : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable
  385. * PTE_W 0x002 // page table/directory entry flags bit : Writeable
  386. * PTE_U 0x004 // page table/directory entry flags bit : User can access
  387. * VARIABLES:
  388. * mm->pgdir : the PDT of these vma
  389. *
  390. */
  391. #if 0
  392. /*LAB3 EXERCISE 1: YOUR CODE*/
  393. ptep = ??? //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
  394. if (*ptep == 0) {
  395. //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
  396. }
  397. else {
  398. /*LAB3 EXERCISE 2: YOUR CODE
  399. * Now we think this pte is a swap entry, we should load data from disk to a page with phy addr,
  400. * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page.
  401. *
  402. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  403. * MACROs or Functions:
  404. * swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr,
  405. * find the addr of disk page, read the content of disk page into this memroy page
  406. * page_insert build the map of phy addr of an Page with the linear addr la
  407. * swap_map_swappable set the page swappable
  408. */
  409. /*
  410. * LAB5 CHALLENGE ( the implmentation Copy on Write)
  411. There are 2 situlations when code comes here.
  412. 1) *ptep & PTE_P == 1, it means one process try to write a readonly page.
  413. If the vma includes this addr is writable, then we can set the page writable by rewrite the *ptep.
  414. This method could be used to implement the Copy on Write (COW) thchnology(a fast fork process method).
  415. 2) *ptep & PTE_P == 0 & but *ptep!=0, it means this pte is a swap entry.
  416. We should add the LAB3's results here.
  417. */
  418. if(swap_init_ok) {
  419. struct Page *page=NULL;
  420. //(1)According to the mm AND addr, try to load the content of right disk page
  421. // into the memory which page managed.
  422. //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr
  423. //(3) make the page swappable.
  424. //(4) [NOTICE]: you myabe need to update your lab3's implementation for LAB5's normal execution.
  425. }
  426. else {
  427. cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
  428. goto failed;
  429. }
  430. }
  431. #endif
  432. // try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
  433. // (notice the 3th parameter '1')
  434. if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) {
  435. cprintf("get_pte in do_pgfault failed\n");
  436. goto failed;
  437. }
  438. if (*ptep == 0) { // if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
  439. if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) {
  440. cprintf("pgdir_alloc_page in do_pgfault failed\n");
  441. goto failed;
  442. }
  443. }
  444. else {
  445. struct Page *page=NULL;
  446. cprintf("do pgfault: ptep %x, pte %x\n",ptep, *ptep);
  447. if (*ptep & PTE_P) {
  448. //if process write to this existed readonly page (PTE_P means existed), then should be here now.
  449. //we can implement the delayed memory space copy for fork child process (AKA copy on write, COW).
  450. //we didn't implement now, we will do it in future.
  451. panic("error write a non-writable pte");
  452. //page = pte2page(*ptep);
  453. } else{
  454. // if this pte is a swap entry, then load data from disk to a page with phy addr
  455. // and call page_insert to map the phy addr with logical addr
  456. if(swap_init_ok) {
  457. if ((ret = swap_in(mm, addr, &page)) != 0) {
  458. cprintf("swap_in in do_pgfault failed\n");
  459. goto failed;
  460. }
  461. }
  462. else {
  463. cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
  464. goto failed;
  465. }
  466. }
  467. page_insert(mm->pgdir, page, addr, perm);
  468. swap_map_swappable(mm, addr, page, 1);
  469. page->pra_vaddr = addr;
  470. }
  471. ret = 0;
  472. failed:
  473. return ret;
  474. }
  475. bool
  476. user_mem_check(struct mm_struct *mm, uintptr_t addr, size_t len, bool write) {
  477. if (mm != NULL) {
  478. if (!USER_ACCESS(addr, addr + len)) {
  479. return 0;
  480. }
  481. struct vma_struct *vma;
  482. uintptr_t start = addr, end = addr + len;
  483. while (start < end) {
  484. if ((vma = find_vma(mm, start)) == NULL || start < vma->vm_start) {
  485. return 0;
  486. }
  487. if (!(vma->vm_flags & ((write) ? VM_WRITE : VM_READ))) {
  488. return 0;
  489. }
  490. if (write && (vma->vm_flags & VM_STACK)) {
  491. if (start < vma->vm_start + PGSIZE) { //check stack start & size
  492. return 0;
  493. }
  494. }
  495. start = vma->vm_end;
  496. }
  497. return 1;
  498. }
  499. return KERN_ACCESS(addr, addr + len);
  500. }
  501. bool
  502. copy_string(struct mm_struct *mm, char *dst, const char *src, size_t maxn) {
  503. size_t alen, part = ROUNDDOWN((uintptr_t)src + PGSIZE, PGSIZE) - (uintptr_t)src;
  504. while (1) {
  505. if (part > maxn) {
  506. part = maxn;
  507. }
  508. if (!user_mem_check(mm, (uintptr_t)src, part, 0)) {
  509. return 0;
  510. }
  511. if ((alen = strnlen(src, part)) < part) {
  512. memcpy(dst, src, alen + 1);
  513. return 1;
  514. }
  515. if (part == maxn) {
  516. return 0;
  517. }
  518. memcpy(dst, src, part);
  519. dst += part, src += part, maxn -= part;
  520. part = PGSIZE;
  521. }
  522. }