《操作系统》的实验代码。
Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

785 linhas
26 KiB

10 anos atrás
10 anos atrás
10 anos atrás
10 anos atrás
10 anos atrás
10 anos atrás
  1. #include <defs.h>
  2. #include <x86.h>
  3. #include <stdio.h>
  4. #include <string.h>
  5. #include <mmu.h>
  6. #include <memlayout.h>
  7. #include <pmm.h>
  8. #include <default_pmm.h>
  9. #include <sync.h>
  10. #include <error.h>
  11. #include <swap.h>
  12. #include <vmm.h>
  13. #include <kmalloc.h>
  14. /* *
  15. * Task State Segment:
  16. *
  17. * The TSS may reside anywhere in memory. A special segment register called
  18. * the Task Register (TR) holds a segment selector that points a valid TSS
  19. * segment descriptor which resides in the GDT. Therefore, to use a TSS
  20. * the following must be done in function gdt_init:
  21. * - create a TSS descriptor entry in GDT
  22. * - add enough information to the TSS in memory as needed
  23. * - load the TR register with a segment selector for that segment
  24. *
  25. * There are several fileds in TSS for specifying the new stack pointer when a
  26. * privilege level change happens. But only the fields SS0 and ESP0 are useful
  27. * in our os kernel.
  28. *
  29. * The field SS0 contains the stack segment selector for CPL = 0, and the ESP0
  30. * contains the new ESP value for CPL = 0. When an interrupt happens in protected
  31. * mode, the x86 CPU will look in the TSS for SS0 and ESP0 and load their value
  32. * into SS and ESP respectively.
  33. * */
  34. static struct taskstate ts = {0};
  35. // virtual address of physicall page array
  36. struct Page *pages;
  37. // amount of physical memory (in pages)
  38. size_t npage = 0;
  39. // virtual address of boot-time page directory
  40. pde_t *boot_pgdir = NULL;
  41. // physical address of boot-time page directory
  42. uintptr_t boot_cr3;
  43. // physical memory management
  44. const struct pmm_manager *pmm_manager;
  45. /* *
  46. * The page directory entry corresponding to the virtual address range
  47. * [VPT, VPT + PTSIZE) points to the page directory itself. Thus, the page
  48. * directory is treated as a page table as well as a page directory.
  49. *
  50. * One result of treating the page directory as a page table is that all PTEs
  51. * can be accessed though a "virtual page table" at virtual address VPT. And the
  52. * PTE for number n is stored in vpt[n].
  53. *
  54. * A second consequence is that the contents of the current page directory will
  55. * always available at virtual address PGADDR(PDX(VPT), PDX(VPT), 0), to which
  56. * vpd is set bellow.
  57. * */
  58. pte_t * const vpt = (pte_t *)VPT;
  59. pde_t * const vpd = (pde_t *)PGADDR(PDX(VPT), PDX(VPT), 0);
  60. /* *
  61. * Global Descriptor Table:
  62. *
  63. * The kernel and user segments are identical (except for the DPL). To load
  64. * the %ss register, the CPL must equal the DPL. Thus, we must duplicate the
  65. * segments for the user and the kernel. Defined as follows:
  66. * - 0x0 : unused (always faults -- for trapping NULL far pointers)
  67. * - 0x8 : kernel code segment
  68. * - 0x10: kernel data segment
  69. * - 0x18: user code segment
  70. * - 0x20: user data segment
  71. * - 0x28: defined for tss, initialized in gdt_init
  72. * */
  73. static struct segdesc gdt[] = {
  74. SEG_NULL,
  75. [SEG_KTEXT] = SEG(STA_X | STA_R, 0x0, 0xFFFFFFFF, DPL_KERNEL),
  76. [SEG_KDATA] = SEG(STA_W, 0x0, 0xFFFFFFFF, DPL_KERNEL),
  77. [SEG_UTEXT] = SEG(STA_X | STA_R, 0x0, 0xFFFFFFFF, DPL_USER),
  78. [SEG_UDATA] = SEG(STA_W, 0x0, 0xFFFFFFFF, DPL_USER),
  79. [SEG_TSS] = SEG_NULL,
  80. };
  81. static struct pseudodesc gdt_pd = {
  82. sizeof(gdt) - 1, (uintptr_t)gdt
  83. };
  84. static void check_alloc_page(void);
  85. static void check_pgdir(void);
  86. static void check_boot_pgdir(void);
  87. /* *
  88. * lgdt - load the global descriptor table register and reset the
  89. * data/code segement registers for kernel.
  90. * */
  91. static inline void
  92. lgdt(struct pseudodesc *pd) {
  93. asm volatile ("lgdt (%0)" :: "r" (pd));
  94. asm volatile ("movw %%ax, %%gs" :: "a" (USER_DS));
  95. asm volatile ("movw %%ax, %%fs" :: "a" (USER_DS));
  96. asm volatile ("movw %%ax, %%es" :: "a" (KERNEL_DS));
  97. asm volatile ("movw %%ax, %%ds" :: "a" (KERNEL_DS));
  98. asm volatile ("movw %%ax, %%ss" :: "a" (KERNEL_DS));
  99. // reload cs
  100. asm volatile ("ljmp %0, $1f\n 1:\n" :: "i" (KERNEL_CS));
  101. }
  102. /* *
  103. * load_esp0 - change the ESP0 in default task state segment,
  104. * so that we can use different kernel stack when we trap frame
  105. * user to kernel.
  106. * */
  107. void
  108. load_esp0(uintptr_t esp0) {
  109. ts.ts_esp0 = esp0;
  110. }
  111. /* gdt_init - initialize the default GDT and TSS */
  112. static void
  113. gdt_init(void) {
  114. // set boot kernel stack and default SS0
  115. load_esp0((uintptr_t)bootstacktop);
  116. ts.ts_ss0 = KERNEL_DS;
  117. // initialize the TSS filed of the gdt
  118. gdt[SEG_TSS] = SEGTSS(STS_T32A, (uintptr_t)&ts, sizeof(ts), DPL_KERNEL);
  119. // reload all segment registers
  120. lgdt(&gdt_pd);
  121. // load the TSS
  122. ltr(GD_TSS);
  123. }
  124. //init_pmm_manager - initialize a pmm_manager instance
  125. static void
  126. init_pmm_manager(void) {
  127. pmm_manager = &default_pmm_manager;
  128. cprintf("memory management: %s\n", pmm_manager->name);
  129. pmm_manager->init();
  130. }
  131. //init_memmap - call pmm->init_memmap to build Page struct for free memory
  132. static void
  133. init_memmap(struct Page *base, size_t n) {
  134. pmm_manager->init_memmap(base, n);
  135. }
  136. //alloc_pages - call pmm->alloc_pages to allocate a continuous n*PAGESIZE memory
  137. struct Page *
  138. alloc_pages(size_t n) {
  139. struct Page *page=NULL;
  140. bool intr_flag;
  141. while (1)
  142. {
  143. local_intr_save(intr_flag);
  144. {
  145. page = pmm_manager->alloc_pages(n);
  146. }
  147. local_intr_restore(intr_flag);
  148. if (page != NULL || n > 1 || swap_init_ok == 0) break;
  149. extern struct mm_struct *check_mm_struct;
  150. //cprintf("page %x, call swap_out in alloc_pages %d\n",page, n);
  151. swap_out(check_mm_struct, n, 0);
  152. }
  153. //cprintf("n %d,get page %x, No %d in alloc_pages\n",n,page,(page-pages));
  154. return page;
  155. }
  156. //free_pages - call pmm->free_pages to free a continuous n*PAGESIZE memory
  157. void
  158. free_pages(struct Page *base, size_t n) {
  159. bool intr_flag;
  160. local_intr_save(intr_flag);
  161. {
  162. pmm_manager->free_pages(base, n);
  163. }
  164. local_intr_restore(intr_flag);
  165. }
  166. //nr_free_pages - call pmm->nr_free_pages to get the size (nr*PAGESIZE)
  167. //of current free memory
  168. size_t
  169. nr_free_pages(void) {
  170. size_t ret;
  171. bool intr_flag;
  172. local_intr_save(intr_flag);
  173. {
  174. ret = pmm_manager->nr_free_pages();
  175. }
  176. local_intr_restore(intr_flag);
  177. return ret;
  178. }
  179. /* pmm_init - initialize the physical memory management */
  180. static void
  181. page_init(void) {
  182. struct e820map *memmap = (struct e820map *)(0x8000 + KERNBASE);
  183. uint64_t maxpa = 0;
  184. cprintf("e820map:\n");
  185. int i;
  186. for (i = 0; i < memmap->nr_map; i ++) {
  187. uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size;
  188. cprintf(" memory: %08llx, [%08llx, %08llx], type = %d.\n",
  189. memmap->map[i].size, begin, end - 1, memmap->map[i].type);
  190. if (memmap->map[i].type == E820_ARM) {
  191. if (maxpa < end && begin < KMEMSIZE) {
  192. maxpa = end;
  193. }
  194. }
  195. }
  196. if (maxpa > KMEMSIZE) {
  197. maxpa = KMEMSIZE;
  198. }
  199. extern char end[];
  200. npage = maxpa / PGSIZE;
  201. pages = (struct Page *)ROUNDUP((void *)end, PGSIZE);
  202. for (i = 0; i < npage; i ++) {
  203. SetPageReserved(pages + i);
  204. }
  205. uintptr_t freemem = PADDR((uintptr_t)pages + sizeof(struct Page) * npage);
  206. for (i = 0; i < memmap->nr_map; i ++) {
  207. uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size;
  208. if (memmap->map[i].type == E820_ARM) {
  209. if (begin < freemem) {
  210. begin = freemem;
  211. }
  212. if (end > KMEMSIZE) {
  213. end = KMEMSIZE;
  214. }
  215. if (begin < end) {
  216. begin = ROUNDUP(begin, PGSIZE);
  217. end = ROUNDDOWN(end, PGSIZE);
  218. if (begin < end) {
  219. init_memmap(pa2page(begin), (end - begin) / PGSIZE);
  220. }
  221. }
  222. }
  223. }
  224. }
  225. static void
  226. enable_paging(void) {
  227. lcr3(boot_cr3);
  228. // turn on paging
  229. uint32_t cr0 = rcr0();
  230. cr0 |= CR0_PE | CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM | CR0_MP;
  231. cr0 &= ~(CR0_TS | CR0_EM);
  232. lcr0(cr0);
  233. }
  234. //boot_map_segment - setup&enable the paging mechanism
  235. // parameters
  236. // la: linear address of this memory need to map (after x86 segment map)
  237. // size: memory size
  238. // pa: physical address of this memory
  239. // perm: permission of this memory
  240. static void
  241. boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, uintptr_t pa, uint32_t perm) {
  242. assert(PGOFF(la) == PGOFF(pa));
  243. size_t n = ROUNDUP(size + PGOFF(la), PGSIZE) / PGSIZE;
  244. la = ROUNDDOWN(la, PGSIZE);
  245. pa = ROUNDDOWN(pa, PGSIZE);
  246. for (; n > 0; n --, la += PGSIZE, pa += PGSIZE) {
  247. pte_t *ptep = get_pte(pgdir, la, 1);
  248. assert(ptep != NULL);
  249. *ptep = pa | PTE_P | perm;
  250. }
  251. }
  252. //boot_alloc_page - allocate one page using pmm->alloc_pages(1)
  253. // return value: the kernel virtual address of this allocated page
  254. //note: this function is used to get the memory for PDT(Page Directory Table)&PT(Page Table)
  255. static void *
  256. boot_alloc_page(void) {
  257. struct Page *p = alloc_page();
  258. if (p == NULL) {
  259. panic("boot_alloc_page failed.\n");
  260. }
  261. return page2kva(p);
  262. }
  263. //pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism
  264. // - check the correctness of pmm & paging mechanism, print PDT&PT
  265. void
  266. pmm_init(void) {
  267. //We need to alloc/free the physical memory (granularity is 4KB or other size).
  268. //So a framework of physical memory manager (struct pmm_manager)is defined in pmm.h
  269. //First we should init a physical memory manager(pmm) based on the framework.
  270. //Then pmm can alloc/free the physical memory.
  271. //Now the first_fit/best_fit/worst_fit/buddy_system pmm are available.
  272. init_pmm_manager();
  273. // detect physical memory space, reserve already used memory,
  274. // then use pmm->init_memmap to create free page list
  275. page_init();
  276. //use pmm->check to verify the correctness of the alloc/free function in a pmm
  277. check_alloc_page();
  278. // create boot_pgdir, an initial page directory(Page Directory Table, PDT)
  279. boot_pgdir = boot_alloc_page();
  280. memset(boot_pgdir, 0, PGSIZE);
  281. boot_cr3 = PADDR(boot_pgdir);
  282. check_pgdir();
  283. static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0);
  284. // recursively insert boot_pgdir in itself
  285. // to form a virtual page table at virtual address VPT
  286. boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_P | PTE_W;
  287. // map all physical memory to linear memory with base linear addr KERNBASE
  288. //linear_addr KERNBASE~KERNBASE+KMEMSIZE = phy_addr 0~KMEMSIZE
  289. //But shouldn't use this map until enable_paging() & gdt_init() finished.
  290. boot_map_segment(boot_pgdir, KERNBASE, KMEMSIZE, 0, PTE_W);
  291. //temporary map:
  292. //virtual_addr 3G~3G+4M = linear_addr 0~4M = linear_addr 3G~3G+4M = phy_addr 0~4M
  293. boot_pgdir[0] = boot_pgdir[PDX(KERNBASE)];
  294. enable_paging();
  295. //reload gdt(third time,the last time) to map all physical memory
  296. //virtual_addr 0~4G=liear_addr 0~4G
  297. //then set kernel stack(ss:esp) in TSS, setup TSS in gdt, load TSS
  298. gdt_init();
  299. //disable the map of virtual_addr 0~4M
  300. boot_pgdir[0] = 0;
  301. //now the basic virtual memory map(see memalyout.h) is established.
  302. //check the correctness of the basic virtual memory map.
  303. check_boot_pgdir();
  304. print_pgdir();
  305. kmalloc_init();
  306. }
  307. //get_pte - get pte and return the kernel virtual address of this pte for la
  308. // - if the PT contians this pte didn't exist, alloc a page for PT
  309. // parameter:
  310. // pgdir: the kernel virtual base address of PDT
  311. // la: the linear address need to map
  312. // create: a logical value to decide if alloc a page for PT
  313. // return vaule: the kernel virtual address of this pte
  314. pte_t *
  315. get_pte(pde_t *pgdir, uintptr_t la, bool create) {
  316. /* LAB2 EXERCISE 2: YOUR CODE
  317. *
  318. * If you need to visit a physical address, please use KADDR()
  319. * please read pmm.h for useful macros
  320. *
  321. * Maybe you want help comment, BELOW comments can help you finish the code
  322. *
  323. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  324. * MACROs or Functions:
  325. * PDX(la) = the index of page directory entry of VIRTUAL ADDRESS la.
  326. * KADDR(pa) : takes a physical address and returns the corresponding kernel virtual address.
  327. * set_page_ref(page,1) : means the page be referenced by one time
  328. * page2pa(page): get the physical address of memory which this (struct Page *) page manages
  329. * struct Page * alloc_page() : allocation a page
  330. * memset(void *s, char c, size_t n) : sets the first n bytes of the memory area pointed by s
  331. * to the specified value c.
  332. * DEFINEs:
  333. * PTE_P 0x001 // page table/directory entry flags bit : Present
  334. * PTE_W 0x002 // page table/directory entry flags bit : Writeable
  335. * PTE_U 0x004 // page table/directory entry flags bit : User can access
  336. */
  337. #if 0
  338. pde_t *pdep = NULL; // (1) find page directory entry
  339. if (0) { // (2) check if entry is not present
  340. // (3) check if creating is needed, then alloc page for page table
  341. // CAUTION: this page is used for page table, not for common data page
  342. // (4) set page reference
  343. uintptr_t pa = 0; // (5) get linear address of page
  344. // (6) clear page content using memset
  345. // (7) set page directory entry's permission
  346. }
  347. return NULL; // (8) return page table entry
  348. #endif
  349. pde_t *pdep = &pgdir[PDX(la)];
  350. if (!(*pdep & PTE_P)) {
  351. struct Page *page;
  352. if (!create || (page = alloc_page()) == NULL) {
  353. return NULL;
  354. }
  355. set_page_ref(page, 1);
  356. uintptr_t pa = page2pa(page);
  357. memset(KADDR(pa), 0, PGSIZE);
  358. *pdep = pa | PTE_U | PTE_W | PTE_P;
  359. }
  360. return &((pte_t *)KADDR(PDE_ADDR(*pdep)))[PTX(la)];
  361. }
  362. //get_page - get related Page struct for linear address la using PDT pgdir
  363. struct Page *
  364. get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_store) {
  365. pte_t *ptep = get_pte(pgdir, la, 0);
  366. if (ptep_store != NULL) {
  367. *ptep_store = ptep;
  368. }
  369. if (ptep != NULL && *ptep & PTE_P) {
  370. return pte2page(*ptep);
  371. }
  372. return NULL;
  373. }
  374. //page_remove_pte - free an Page sturct which is related linear address la
  375. // - and clean(invalidate) pte which is related linear address la
  376. //note: PT is changed, so the TLB need to be invalidate
  377. static inline void
  378. page_remove_pte(pde_t *pgdir, uintptr_t la, pte_t *ptep) {
  379. /* LAB2 EXERCISE 3: YOUR CODE
  380. *
  381. * Please check if ptep is valid, and tlb must be manually updated if mapping is updated
  382. *
  383. * Maybe you want help comment, BELOW comments can help you finish the code
  384. *
  385. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  386. * MACROs or Functions:
  387. * struct Page *page pte2page(*ptep): get the according page from the value of a ptep
  388. * free_page : free a page
  389. * page_ref_dec(page) : decrease page->ref. NOTICE: ff page->ref == 0 , then this page should be free.
  390. * tlb_invalidate(pde_t *pgdir, uintptr_t la) : Invalidate a TLB entry, but only if the page tables being
  391. * edited are the ones currently in use by the processor.
  392. * DEFINEs:
  393. * PTE_P 0x001 // page table/directory entry flags bit : Present
  394. */
  395. #if 0
  396. if (0) { //(1) check if page directory is present
  397. struct Page *page = NULL; //(2) find corresponding page to pte
  398. //(3) decrease page reference
  399. //(4) and free this page when page reference reachs 0
  400. //(5) clear second page table entry
  401. //(6) flush tlb
  402. }
  403. #endif
  404. if (*ptep & PTE_P) {
  405. struct Page *page = pte2page(*ptep);
  406. if (page_ref_dec(page) == 0) {
  407. free_page(page);
  408. }
  409. *ptep = 0;
  410. tlb_invalidate(pgdir, la);
  411. }
  412. }
  413. void
  414. unmap_range(pde_t *pgdir, uintptr_t start, uintptr_t end) {
  415. assert(start % PGSIZE == 0 && end % PGSIZE == 0);
  416. assert(USER_ACCESS(start, end));
  417. do {
  418. pte_t *ptep = get_pte(pgdir, start, 0);
  419. if (ptep == NULL) {
  420. start = ROUNDDOWN(start + PTSIZE, PTSIZE);
  421. continue ;
  422. }
  423. if (*ptep != 0) {
  424. page_remove_pte(pgdir, start, ptep);
  425. }
  426. start += PGSIZE;
  427. } while (start != 0 && start < end);
  428. }
  429. void
  430. exit_range(pde_t *pgdir, uintptr_t start, uintptr_t end) {
  431. assert(start % PGSIZE == 0 && end % PGSIZE == 0);
  432. assert(USER_ACCESS(start, end));
  433. start = ROUNDDOWN(start, PTSIZE);
  434. do {
  435. int pde_idx = PDX(start);
  436. if (pgdir[pde_idx] & PTE_P) {
  437. free_page(pde2page(pgdir[pde_idx]));
  438. pgdir[pde_idx] = 0;
  439. }
  440. start += PTSIZE;
  441. } while (start != 0 && start < end);
  442. }
  443. /* copy_range - copy content of memory (start, end) of one process A to another process B
  444. * @to: the addr of process B's Page Directory
  445. * @from: the addr of process A's Page Directory
  446. * @share: flags to indicate to dup OR share. We just use dup method, so it didn't be used.
  447. *
  448. * CALL GRAPH: copy_mm-->dup_mmap-->copy_range
  449. */
  450. int
  451. copy_range(pde_t *to, pde_t *from, uintptr_t start, uintptr_t end, bool share) {
  452. assert(start % PGSIZE == 0 && end % PGSIZE == 0);
  453. assert(USER_ACCESS(start, end));
  454. // copy content by page unit.
  455. do {
  456. //call get_pte to find process A's pte according to the addr start
  457. pte_t *ptep = get_pte(from, start, 0), *nptep;
  458. if (ptep == NULL) {
  459. start = ROUNDDOWN(start + PTSIZE, PTSIZE);
  460. continue ;
  461. }
  462. //call get_pte to find process B's pte according to the addr start. If pte is NULL, just alloc a PT
  463. if (*ptep & PTE_P) {
  464. if ((nptep = get_pte(to, start, 1)) == NULL) {
  465. return -E_NO_MEM;
  466. }
  467. uint32_t perm = (*ptep & PTE_USER);
  468. //get page from ptep
  469. struct Page *page = pte2page(*ptep);
  470. // alloc a page for process B
  471. struct Page *npage=alloc_page();
  472. assert(page!=NULL);
  473. assert(npage!=NULL);
  474. int ret=0;
  475. /* LAB5:EXERCISE2 YOUR CODE
  476. * replicate content of page to npage, build the map of phy addr of nage with the linear addr start
  477. *
  478. * Some Useful MACROs and DEFINEs, you can use them in below implementation.
  479. * MACROs or Functions:
  480. * page2kva(struct Page *page): return the kernel vritual addr of memory which page managed (SEE pmm.h)
  481. * page_insert: build the map of phy addr of an Page with the linear addr la
  482. * memcpy: typical memory copy function
  483. *
  484. * (1) find src_kvaddr: the kernel virtual address of page
  485. * (2) find dst_kvaddr: the kernel virtual address of npage
  486. * (3) memory copy from src_kvaddr to dst_kvaddr, size is PGSIZE
  487. * (4) build the map of phy addr of nage with the linear addr start
  488. */
  489. void * kva_src = page2kva(page);
  490. void * kva_dst = page2kva(npage);
  491. memcpy(kva_dst, kva_src, PGSIZE);
  492. ret = page_insert(to, npage, start, perm);
  493. assert(ret == 0);
  494. }
  495. start += PGSIZE;
  496. } while (start != 0 && start < end);
  497. return 0;
  498. }
  499. //page_remove - free an Page which is related linear address la and has an validated pte
  500. void
  501. page_remove(pde_t *pgdir, uintptr_t la) {
  502. pte_t *ptep = get_pte(pgdir, la, 0);
  503. if (ptep != NULL) {
  504. page_remove_pte(pgdir, la, ptep);
  505. }
  506. }
  507. //page_insert - build the map of phy addr of an Page with the linear addr la
  508. // paramemters:
  509. // pgdir: the kernel virtual base address of PDT
  510. // page: the Page which need to map
  511. // la: the linear address need to map
  512. // perm: the permission of this Page which is setted in related pte
  513. // return value: always 0
  514. //note: PT is changed, so the TLB need to be invalidate
  515. int
  516. page_insert(pde_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm) {
  517. pte_t *ptep = get_pte(pgdir, la, 1);
  518. if (ptep == NULL) {
  519. return -E_NO_MEM;
  520. }
  521. page_ref_inc(page);
  522. if (*ptep & PTE_P) {
  523. struct Page *p = pte2page(*ptep);
  524. if (p == page) {
  525. page_ref_dec(page);
  526. }
  527. else {
  528. page_remove_pte(pgdir, la, ptep);
  529. }
  530. }
  531. *ptep = page2pa(page) | PTE_P | perm;
  532. tlb_invalidate(pgdir, la);
  533. return 0;
  534. }
  535. // invalidate a TLB entry, but only if the page tables being
  536. // edited are the ones currently in use by the processor.
  537. void
  538. tlb_invalidate(pde_t *pgdir, uintptr_t la) {
  539. if (rcr3() == PADDR(pgdir)) {
  540. invlpg((void *)la);
  541. }
  542. }
  543. // pgdir_alloc_page - call alloc_page & page_insert functions to
  544. // - allocate a page size memory & setup an addr map
  545. // - pa<->la with linear address la and the PDT pgdir
  546. struct Page *
  547. pgdir_alloc_page(pde_t *pgdir, uintptr_t la, uint32_t perm) {
  548. struct Page *page = alloc_page();
  549. if (page != NULL) {
  550. if (page_insert(pgdir, page, la, perm) != 0) {
  551. free_page(page);
  552. return NULL;
  553. }
  554. if (swap_init_ok){
  555. if(check_mm_struct!=NULL) {
  556. swap_map_swappable(check_mm_struct, la, page, 0);
  557. page->pra_vaddr=la;
  558. assert(page_ref(page) == 1);
  559. //cprintf("get No. %d page: pra_vaddr %x, pra_link.prev %x, pra_link_next %x in pgdir_alloc_page\n", (page-pages), page->pra_vaddr,page->pra_page_link.prev, page->pra_page_link.next);
  560. }
  561. else { //now current is existed, should fix it in the future
  562. //swap_map_swappable(current->mm, la, page, 0);
  563. //page->pra_vaddr=la;
  564. //assert(page_ref(page) == 1);
  565. //panic("pgdir_alloc_page: no pages. now current is existed, should fix it in the future\n");
  566. }
  567. }
  568. }
  569. return page;
  570. }
  571. static void
  572. check_alloc_page(void) {
  573. pmm_manager->check();
  574. cprintf("check_alloc_page() succeeded!\n");
  575. }
  576. static void
  577. check_pgdir(void) {
  578. assert(npage <= KMEMSIZE / PGSIZE);
  579. assert(boot_pgdir != NULL && (uint32_t)PGOFF(boot_pgdir) == 0);
  580. assert(get_page(boot_pgdir, 0x0, NULL) == NULL);
  581. struct Page *p1, *p2;
  582. p1 = alloc_page();
  583. assert(page_insert(boot_pgdir, p1, 0x0, 0) == 0);
  584. pte_t *ptep;
  585. assert((ptep = get_pte(boot_pgdir, 0x0, 0)) != NULL);
  586. assert(pte2page(*ptep) == p1);
  587. assert(page_ref(p1) == 1);
  588. ptep = &((pte_t *)KADDR(PDE_ADDR(boot_pgdir[0])))[1];
  589. assert(get_pte(boot_pgdir, PGSIZE, 0) == ptep);
  590. p2 = alloc_page();
  591. assert(page_insert(boot_pgdir, p2, PGSIZE, PTE_U | PTE_W) == 0);
  592. assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL);
  593. assert(*ptep & PTE_U);
  594. assert(*ptep & PTE_W);
  595. assert(boot_pgdir[0] & PTE_U);
  596. assert(page_ref(p2) == 1);
  597. assert(page_insert(boot_pgdir, p1, PGSIZE, 0) == 0);
  598. assert(page_ref(p1) == 2);
  599. assert(page_ref(p2) == 0);
  600. assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL);
  601. assert(pte2page(*ptep) == p1);
  602. assert((*ptep & PTE_U) == 0);
  603. page_remove(boot_pgdir, 0x0);
  604. assert(page_ref(p1) == 1);
  605. assert(page_ref(p2) == 0);
  606. page_remove(boot_pgdir, PGSIZE);
  607. assert(page_ref(p1) == 0);
  608. assert(page_ref(p2) == 0);
  609. assert(page_ref(pde2page(boot_pgdir[0])) == 1);
  610. free_page(pde2page(boot_pgdir[0]));
  611. boot_pgdir[0] = 0;
  612. cprintf("check_pgdir() succeeded!\n");
  613. }
  614. static void
  615. check_boot_pgdir(void) {
  616. pte_t *ptep;
  617. int i;
  618. for (i = 0; i < npage; i += PGSIZE) {
  619. assert((ptep = get_pte(boot_pgdir, (uintptr_t)KADDR(i), 0)) != NULL);
  620. assert(PTE_ADDR(*ptep) == i);
  621. }
  622. assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir));
  623. assert(boot_pgdir[0] == 0);
  624. struct Page *p;
  625. p = alloc_page();
  626. assert(page_insert(boot_pgdir, p, 0x100, PTE_W) == 0);
  627. assert(page_ref(p) == 1);
  628. assert(page_insert(boot_pgdir, p, 0x100 + PGSIZE, PTE_W) == 0);
  629. assert(page_ref(p) == 2);
  630. const char *str = "ucore: Hello world!!";
  631. strcpy((void *)0x100, str);
  632. assert(strcmp((void *)0x100, (void *)(0x100 + PGSIZE)) == 0);
  633. *(char *)(page2kva(p) + 0x100) = '\0';
  634. assert(strlen((const char *)0x100) == 0);
  635. free_page(p);
  636. free_page(pde2page(boot_pgdir[0]));
  637. boot_pgdir[0] = 0;
  638. cprintf("check_boot_pgdir() succeeded!\n");
  639. }
  640. //perm2str - use string 'u,r,w,-' to present the permission
  641. static const char *
  642. perm2str(int perm) {
  643. static char str[4];
  644. str[0] = (perm & PTE_U) ? 'u' : '-';
  645. str[1] = 'r';
  646. str[2] = (perm & PTE_W) ? 'w' : '-';
  647. str[3] = '\0';
  648. return str;
  649. }
  650. //get_pgtable_items - In [left, right] range of PDT or PT, find a continuous linear addr space
  651. // - (left_store*X_SIZE~right_store*X_SIZE) for PDT or PT
  652. // - X_SIZE=PTSIZE=4M, if PDT; X_SIZE=PGSIZE=4K, if PT
  653. // paramemters:
  654. // left: no use ???
  655. // right: the high side of table's range
  656. // start: the low side of table's range
  657. // table: the beginning addr of table
  658. // left_store: the pointer of the high side of table's next range
  659. // right_store: the pointer of the low side of table's next range
  660. // return value: 0 - not a invalid item range, perm - a valid item range with perm permission
  661. static int
  662. get_pgtable_items(size_t left, size_t right, size_t start, uintptr_t *table, size_t *left_store, size_t *right_store) {
  663. if (start >= right) {
  664. return 0;
  665. }
  666. while (start < right && !(table[start] & PTE_P)) {
  667. start ++;
  668. }
  669. if (start < right) {
  670. if (left_store != NULL) {
  671. *left_store = start;
  672. }
  673. int perm = (table[start ++] & PTE_USER);
  674. while (start < right && (table[start] & PTE_USER) == perm) {
  675. start ++;
  676. }
  677. if (right_store != NULL) {
  678. *right_store = start;
  679. }
  680. return perm;
  681. }
  682. return 0;
  683. }
  684. //print_pgdir - print the PDT&PT
  685. void
  686. print_pgdir(void) {
  687. cprintf("-------------------- BEGIN --------------------\n");
  688. size_t left, right = 0, perm;
  689. while ((perm = get_pgtable_items(0, NPDEENTRY, right, vpd, &left, &right)) != 0) {
  690. cprintf("PDE(%03x) %08x-%08x %08x %s\n", right - left,
  691. left * PTSIZE, right * PTSIZE, (right - left) * PTSIZE, perm2str(perm));
  692. size_t l, r = left * NPTEENTRY;
  693. while ((perm = get_pgtable_items(left * NPTEENTRY, right * NPTEENTRY, r, vpt, &l, &r)) != 0) {
  694. cprintf(" |-- PTE(%05x) %08x-%08x %08x %s\n", r - l,
  695. l * PGSIZE, r * PGSIZE, (r - l) * PGSIZE, perm2str(perm));
  696. }
  697. }
  698. cprintf("--------------------- END ---------------------\n");
  699. }