《操作系统》的实验代码。
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

517 lines
16 KiB

12 years ago
12 years ago
  1. #include <vmm.h>
  2. #include <sync.h>
  3. #include <string.h>
  4. #include <assert.h>
  5. #include <stdio.h>
  6. #include <error.h>
  7. #include <pmm.h>
  8. #include <x86.h>
  9. #include <swap.h>
  10. #include <kmalloc.h>
  11. /*
  12. vmm design include two parts: mm_struct (mm) & vma_struct (vma)
  13. mm is the memory manager for the set of continuous virtual memory
  14. area which have the same PDT. vma is a continuous virtual memory area.
  15. There a linear link list for vma & a redblack link list for vma in mm.
  16. ---------------
  17. mm related functions:
  18. golbal functions
  19. struct mm_struct * mm_create(void)
  20. void mm_destroy(struct mm_struct *mm)
  21. int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr)
  22. --------------
  23. vma related functions:
  24. global functions
  25. struct vma_struct * vma_create (uintptr_t vm_start, uintptr_t vm_end,...)
  26. void insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma)
  27. struct vma_struct * find_vma(struct mm_struct *mm, uintptr_t addr)
  28. local functions
  29. inline void check_vma_overlap(struct vma_struct *prev, struct vma_struct *next)
  30. ---------------
  31. check correctness functions
  32. void check_vmm(void);
  33. void check_vma_struct(void);
  34. void check_pgfault(void);
  35. */
  36. static void check_vmm(void);
  37. static void check_vma_struct(void);
  38. static void check_pgfault(void);
  39. // mm_create - alloc a mm_struct & initialize it.
  40. struct mm_struct *
  41. mm_create(void) {
  42. struct mm_struct *mm = kmalloc(sizeof(struct mm_struct));
  43. if (mm != NULL) {
  44. list_init(&(mm->mmap_list));
  45. mm->mmap_cache = NULL;
  46. mm->pgdir = NULL;
  47. mm->map_count = 0;
  48. if (swap_init_ok) swap_init_mm(mm);
  49. else mm->sm_priv = NULL;
  50. set_mm_count(mm, 0);
  51. sem_init(&(mm->mm_sem), 1);
  52. }
  53. return mm;
  54. }
  55. // vma_create - alloc a vma_struct & initialize it. (addr range: vm_start~vm_end)
  56. struct vma_struct *
  57. vma_create(uintptr_t vm_start, uintptr_t vm_end, uint32_t vm_flags) {
  58. struct vma_struct *vma = kmalloc(sizeof(struct vma_struct));
  59. if (vma != NULL) {
  60. vma->vm_start = vm_start;
  61. vma->vm_end = vm_end;
  62. vma->vm_flags = vm_flags;
  63. }
  64. return vma;
  65. }
  66. // find_vma - find a vma (vma->vm_start <= addr <= vma_vm_end)
  67. struct vma_struct *
  68. find_vma(struct mm_struct *mm, uintptr_t addr) {
  69. struct vma_struct *vma = NULL;
  70. if (mm != NULL) {
  71. vma = mm->mmap_cache;
  72. if (!(vma != NULL && vma->vm_start <= addr && vma->vm_end > addr)) {
  73. bool found = 0;
  74. list_entry_t *list = &(mm->mmap_list), *le = list;
  75. while ((le = list_next(le)) != list) {
  76. vma = le2vma(le, list_link);
  77. if (addr < vma->vm_end) {
  78. found = 1;
  79. break;
  80. }
  81. }
  82. if (!found) {
  83. vma = NULL;
  84. }
  85. }
  86. if (vma != NULL) {
  87. mm->mmap_cache = vma;
  88. }
  89. }
  90. return vma;
  91. }
  92. // check_vma_overlap - check if vma1 overlaps vma2 ?
  93. static inline void
  94. check_vma_overlap(struct vma_struct *prev, struct vma_struct *next) {
  95. assert(prev->vm_start < prev->vm_end);
  96. assert(prev->vm_end <= next->vm_start);
  97. assert(next->vm_start < next->vm_end);
  98. }
  99. // insert_vma_struct -insert vma in mm's list link
  100. void
  101. insert_vma_struct(struct mm_struct *mm, struct vma_struct *vma) {
  102. assert(vma->vm_start < vma->vm_end);
  103. list_entry_t *list = &(mm->mmap_list);
  104. list_entry_t *le_prev = list, *le_next;
  105. list_entry_t *le = list;
  106. while ((le = list_next(le)) != list) {
  107. struct vma_struct *mmap_prev = le2vma(le, list_link);
  108. if (mmap_prev->vm_start > vma->vm_start) {
  109. break;
  110. }
  111. le_prev = le;
  112. }
  113. le_next = list_next(le_prev);
  114. /* check overlap */
  115. if (le_prev != list) {
  116. check_vma_overlap(le2vma(le_prev, list_link), vma);
  117. }
  118. if (le_next != list) {
  119. check_vma_overlap(vma, le2vma(le_next, list_link));
  120. }
  121. vma->vm_mm = mm;
  122. list_add_after(le_prev, &(vma->list_link));
  123. mm->map_count ++;
  124. }
  125. // mm_destroy - free mm and mm internal fields
  126. void
  127. mm_destroy(struct mm_struct *mm) {
  128. assert(mm_count(mm) == 0);
  129. list_entry_t *list = &(mm->mmap_list), *le;
  130. while ((le = list_next(list)) != list) {
  131. list_del(le);
  132. kfree(le2vma(le, list_link)); //kfree vma
  133. }
  134. kfree(mm); //kfree mm
  135. mm=NULL;
  136. }
  137. int
  138. mm_map(struct mm_struct *mm, uintptr_t addr, size_t len, uint32_t vm_flags,
  139. struct vma_struct **vma_store) {
  140. uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE);
  141. if (!USER_ACCESS(start, end)) {
  142. return -E_INVAL;
  143. }
  144. assert(mm != NULL);
  145. int ret = -E_INVAL;
  146. struct vma_struct *vma;
  147. if ((vma = find_vma(mm, start)) != NULL && end > vma->vm_start) {
  148. goto out;
  149. }
  150. ret = -E_NO_MEM;
  151. if ((vma = vma_create(start, end, vm_flags)) == NULL) {
  152. goto out;
  153. }
  154. insert_vma_struct(mm, vma);
  155. if (vma_store != NULL) {
  156. *vma_store = vma;
  157. }
  158. ret = 0;
  159. out:
  160. return ret;
  161. }
  162. int
  163. dup_mmap(struct mm_struct *to, struct mm_struct *from) {
  164. assert(to != NULL && from != NULL);
  165. list_entry_t *list = &(from->mmap_list), *le = list;
  166. while ((le = list_prev(le)) != list) {
  167. struct vma_struct *vma, *nvma;
  168. vma = le2vma(le, list_link);
  169. nvma = vma_create(vma->vm_start, vma->vm_end, vma->vm_flags);
  170. if (nvma == NULL) {
  171. return -E_NO_MEM;
  172. }
  173. insert_vma_struct(to, nvma);
  174. bool share = 0;
  175. if (copy_range(to->pgdir, from->pgdir, vma->vm_start, vma->vm_end, share) != 0) {
  176. return -E_NO_MEM;
  177. }
  178. }
  179. return 0;
  180. }
  181. void
  182. exit_mmap(struct mm_struct *mm) {
  183. assert(mm != NULL && mm_count(mm) == 0);
  184. pde_t *pgdir = mm->pgdir;
  185. list_entry_t *list = &(mm->mmap_list), *le = list;
  186. while ((le = list_next(le)) != list) {
  187. struct vma_struct *vma = le2vma(le, list_link);
  188. unmap_range(pgdir, vma->vm_start, vma->vm_end);
  189. }
  190. while ((le = list_next(le)) != list) {
  191. struct vma_struct *vma = le2vma(le, list_link);
  192. exit_range(pgdir, vma->vm_start, vma->vm_end);
  193. }
  194. }
  195. bool
  196. copy_from_user(struct mm_struct *mm, void *dst, const void *src, size_t len, bool writable) {
  197. if (!user_mem_check(mm, (uintptr_t)src, len, writable)) {
  198. return 0;
  199. }
  200. memcpy(dst, src, len);
  201. return 1;
  202. }
  203. bool
  204. copy_to_user(struct mm_struct *mm, void *dst, const void *src, size_t len) {
  205. if (!user_mem_check(mm, (uintptr_t)dst, len, 1)) {
  206. return 0;
  207. }
  208. memcpy(dst, src, len);
  209. return 1;
  210. }
  211. // vmm_init - initialize virtual memory management
  212. // - now just call check_vmm to check correctness of vmm
  213. void
  214. vmm_init(void) {
  215. check_vmm();
  216. }
  217. // check_vmm - check correctness of vmm
  218. static void
  219. check_vmm(void) {
  220. size_t nr_free_pages_store = nr_free_pages();
  221. check_vma_struct();
  222. check_pgfault();
  223. assert(nr_free_pages_store == nr_free_pages());
  224. cprintf("check_vmm() succeeded.\n");
  225. }
  226. static void
  227. check_vma_struct(void) {
  228. size_t nr_free_pages_store = nr_free_pages();
  229. struct mm_struct *mm = mm_create();
  230. assert(mm != NULL);
  231. int step1 = 10, step2 = step1 * 10;
  232. int i;
  233. for (i = step1; i >= 0; i --) {
  234. struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
  235. assert(vma != NULL);
  236. insert_vma_struct(mm, vma);
  237. }
  238. for (i = step1 + 1; i <= step2; i ++) {
  239. struct vma_struct *vma = vma_create(i * 5, i * 5 + 2, 0);
  240. assert(vma != NULL);
  241. insert_vma_struct(mm, vma);
  242. }
  243. list_entry_t *le = list_next(&(mm->mmap_list));
  244. for (i = 0; i <= step2; i ++) {
  245. assert(le != &(mm->mmap_list));
  246. struct vma_struct *mmap = le2vma(le, list_link);
  247. assert(mmap->vm_start == i * 5 && mmap->vm_end == i * 5 + 2);
  248. le = list_next(le);
  249. }
  250. for (i = 0; i < 5 * step2 + 2; i ++) {
  251. struct vma_struct *vma = find_vma(mm, i);
  252. assert(vma != NULL);
  253. int j = i / 5;
  254. if (i >= 5 * j + 2) {
  255. j ++;
  256. }
  257. assert(vma->vm_start == j * 5 && vma->vm_end == j * 5 + 2);
  258. }
  259. mm_destroy(mm);
  260. assert(nr_free_pages_store == nr_free_pages());
  261. cprintf("check_vma_struct() succeeded!\n");
  262. }
  263. struct mm_struct *check_mm_struct;
  264. // check_pgfault - check correctness of pgfault handler
  265. static void
  266. check_pgfault(void) {
  267. size_t nr_free_pages_store = nr_free_pages();
  268. check_mm_struct = mm_create();
  269. assert(check_mm_struct != NULL);
  270. struct mm_struct *mm = check_mm_struct;
  271. pde_t *pgdir = mm->pgdir = boot_pgdir;
  272. assert(pgdir[0] == 0);
  273. struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE);
  274. assert(vma != NULL);
  275. insert_vma_struct(mm, vma);
  276. uintptr_t addr = 0x100;
  277. assert(find_vma(mm, addr) == vma);
  278. int i, sum = 0;
  279. for (i = 0; i < 100; i ++) {
  280. *(char *)(addr + i) = i;
  281. sum += i;
  282. }
  283. for (i = 0; i < 100; i ++) {
  284. sum -= *(char *)(addr + i);
  285. }
  286. assert(sum == 0);
  287. page_remove(pgdir, ROUNDDOWN(addr, PGSIZE));
  288. free_page(pa2page(pgdir[0]));
  289. pgdir[0] = 0;
  290. mm->pgdir = NULL;
  291. mm_destroy(mm);
  292. check_mm_struct = NULL;
  293. assert(nr_free_pages_store == nr_free_pages());
  294. cprintf("check_pgfault() succeeded!\n");
  295. }
  296. //page fault number
  297. volatile unsigned int pgfault_num=0;
  298. /* do_pgfault - interrupt handler to process the page fault execption
  299. * @mm : the control struct for a set of vma using the same PDT
  300. * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware
  301. * @addr : the addr which causes a memory access exception, (the contents of the CR2 register)
  302. *
  303. * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault
  304. * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing
  305. * the exception and recovering from it.
  306. * (1) The contents of the CR2 register. The processor loads the CR2 register with the
  307. * 32-bit linear address that generated the exception. The do_pgfault fun can
  308. * use this address to locate the corresponding page directory and page-table
  309. * entries.
  310. * (2) An error code on the kernel stack. The error code for a page fault has a format different from
  311. * that for other exceptions. The error code tells the exception handler three things:
  312. * -- The P flag (bit 0) indicates whether the exception was due to a not-present page (0)
  313. * or to either an access rights violation or the use of a reserved bit (1).
  314. * -- The W/R flag (bit 1) indicates whether the memory access that caused the exception
  315. * was a read (0) or write (1).
  316. * -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1)
  317. * or supervisor mode (0) at the time of the exception.
  318. */
  319. int
  320. do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) {
  321. int ret = -E_INVAL;
  322. //try to find a vma which include addr
  323. struct vma_struct *vma = find_vma(mm, addr);
  324. pgfault_num++;
  325. //If the addr is in the range of a mm's vma?
  326. if (vma == NULL || vma->vm_start > addr) {
  327. cprintf("not valid addr %x, and can not find it in vma\n", addr);
  328. goto failed;
  329. }
  330. //check the error_code
  331. switch (error_code & 3) {
  332. default:
  333. /* error code flag : default is 3 ( W/R=1, P=1): write, present */
  334. case 2: /* error code flag : (W/R=1, P=0): write, not present */
  335. if (!(vma->vm_flags & VM_WRITE)) {
  336. cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n");
  337. goto failed;
  338. }
  339. break;
  340. case 1: /* error code flag : (W/R=0, P=1): read, present */
  341. cprintf("do_pgfault failed: error code flag = read AND present\n");
  342. goto failed;
  343. case 0: /* error code flag : (W/R=0, P=0): read, not present */
  344. if (!(vma->vm_flags & (VM_READ | VM_EXEC))) {
  345. cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n");
  346. goto failed;
  347. }
  348. }
  349. /* IF (write an existed addr ) OR
  350. * (write an non_existed addr && addr is writable) OR
  351. * (read an non_existed addr && addr is readable)
  352. * THEN
  353. * continue process
  354. */
  355. uint32_t perm = PTE_U;
  356. if (vma->vm_flags & VM_WRITE) {
  357. perm |= PTE_W;
  358. }
  359. addr = ROUNDDOWN(addr, PGSIZE);
  360. ret = -E_NO_MEM;
  361. pte_t *ptep=NULL;
  362. /*LAB3 EXERCISE 1: YOUR CODE
  363. * Maybe you want help comment, BELOW comments can help you finish the code
  364. *
  365. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  366. * MACROs or Functions:
  367. * get_pte : get an pte and return the kernel virtual address of this pte for la
  368. * if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1')
  369. * pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup
  370. * an addr map pa<--->la with linear address la and the PDT pgdir
  371. * DEFINES:
  372. * VM_WRITE : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable
  373. * PTE_W 0x002 // page table/directory entry flags bit : Writeable
  374. * PTE_U 0x004 // page table/directory entry flags bit : User can access
  375. * VARIABLES:
  376. * mm->pgdir : the PDT of these vma
  377. *
  378. */
  379. #if 0
  380. /*LAB3 EXERCISE 1: YOUR CODE*/
  381. ptep = ??? //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT.
  382. if (*ptep == 0) {
  383. //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr
  384. }
  385. else {
  386. /*LAB3 EXERCISE 2: YOUR CODE
  387. * Now we think this pte is a swap entry, we should load data from disk to a page with phy addr,
  388. * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page.
  389. *
  390. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  391. * MACROs or Functions:
  392. * swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr,
  393. * find the addr of disk page, read the content of disk page into this memroy page
  394. * page_insert build the map of phy addr of an Page with the linear addr la
  395. * swap_map_swappable set the page swappable
  396. */
  397. /*
  398. * LAB5 CHALLENGE ( the implmentation Copy on Write)
  399. There are 2 situlations when code comes here.
  400. 1) *ptep & PTE_P == 1, it means one process try to write a readonly page.
  401. If the vma includes this addr is writable, then we can set the page writable by rewrite the *ptep.
  402. This method could be used to implement the Copy on Write (COW) thchnology(a fast fork process method).
  403. 2) *ptep & PTE_P == 0 & but *ptep!=0, it means this pte is a swap entry.
  404. We should add the LAB3's results here.
  405. */
  406. if(swap_init_ok) {
  407. struct Page *page=NULL;
  408. //(1)According to the mm AND addr, try to load the content of right disk page
  409. // into the memory which page managed.
  410. //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr
  411. //(3) make the page swappable.
  412. //(4) [NOTICE]: you myabe need to update your lab3's implementation for LAB5's normal execution.
  413. }
  414. else {
  415. cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep);
  416. goto failed;
  417. }
  418. }
  419. #endif
  420. ret = 0;
  421. failed:
  422. return ret;
  423. }
  424. bool
  425. user_mem_check(struct mm_struct *mm, uintptr_t addr, size_t len, bool write) {
  426. if (mm != NULL) {
  427. if (!USER_ACCESS(addr, addr + len)) {
  428. return 0;
  429. }
  430. struct vma_struct *vma;
  431. uintptr_t start = addr, end = addr + len;
  432. while (start < end) {
  433. if ((vma = find_vma(mm, start)) == NULL || start < vma->vm_start) {
  434. return 0;
  435. }
  436. if (!(vma->vm_flags & ((write) ? VM_WRITE : VM_READ))) {
  437. return 0;
  438. }
  439. if (write && (vma->vm_flags & VM_STACK)) {
  440. if (start < vma->vm_start + PGSIZE) { //check stack start & size
  441. return 0;
  442. }
  443. }
  444. start = vma->vm_end;
  445. }
  446. return 1;
  447. }
  448. return KERN_ACCESS(addr, addr + len);
  449. }