《操作系统》的实验代码。
Não pode escolher mais do que 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

939 linhas
30 KiB

há 10 anos
  1. #include <proc.h>
  2. #include <kmalloc.h>
  3. #include <string.h>
  4. #include <sync.h>
  5. #include <pmm.h>
  6. #include <error.h>
  7. #include <sched.h>
  8. #include <elf.h>
  9. #include <vmm.h>
  10. #include <trap.h>
  11. #include <stdio.h>
  12. #include <stdlib.h>
  13. #include <assert.h>
  14. #include <unistd.h>
  15. /* ------------- process/thread mechanism design&implementation -------------
  16. (an simplified Linux process/thread mechanism )
  17. introduction:
  18. ucore implements a simple process/thread mechanism. process contains the independent memory sapce, at least one threads
  19. for execution, the kernel data(for management), processor state (for context switch), files(in lab6), etc. ucore needs to
  20. manage all these details efficiently. In ucore, a thread is just a special kind of process(share process's memory).
  21. ------------------------------
  22. process state : meaning -- reason
  23. PROC_UNINIT : uninitialized -- alloc_proc
  24. PROC_SLEEPING : sleeping -- try_free_pages, do_wait, do_sleep
  25. PROC_RUNNABLE : runnable(maybe running) -- proc_init, wakeup_proc,
  26. PROC_ZOMBIE : almost dead -- do_exit
  27. -----------------------------
  28. process state changing:
  29. alloc_proc RUNNING
  30. + +--<----<--+
  31. + + proc_run +
  32. V +-->---->--+
  33. PROC_UNINIT -- proc_init/wakeup_proc --> PROC_RUNNABLE -- try_free_pages/do_wait/do_sleep --> PROC_SLEEPING --
  34. A + +
  35. | +--- do_exit --> PROC_ZOMBIE +
  36. + +
  37. -----------------------wakeup_proc----------------------------------
  38. -----------------------------
  39. process relations
  40. parent: proc->parent (proc is children)
  41. children: proc->cptr (proc is parent)
  42. older sibling: proc->optr (proc is younger sibling)
  43. younger sibling: proc->yptr (proc is older sibling)
  44. -----------------------------
  45. related syscall for process:
  46. SYS_exit : process exit, -->do_exit
  47. SYS_fork : create child process, dup mm -->do_fork-->wakeup_proc
  48. SYS_wait : wait process -->do_wait
  49. SYS_exec : after fork, process execute a program -->load a program and refresh the mm
  50. SYS_clone : create child thread -->do_fork-->wakeup_proc
  51. SYS_yield : process flag itself need resecheduling, -- proc->need_sched=1, then scheduler will rescheule this process
  52. SYS_sleep : process sleep -->do_sleep
  53. SYS_kill : kill process -->do_kill-->proc->flags |= PF_EXITING
  54. -->wakeup_proc-->do_wait-->do_exit
  55. SYS_getpid : get the process's pid
  56. */
  57. // the process set's list
  58. list_entry_t proc_list;
  59. #define HASH_SHIFT 10
  60. #define HASH_LIST_SIZE (1 << HASH_SHIFT)
  61. #define pid_hashfn(x) (hash32(x, HASH_SHIFT))
  62. // has list for process set based on pid
  63. static list_entry_t hash_list[HASH_LIST_SIZE];
  64. // idle proc
  65. struct proc_struct *idleproc = NULL;
  66. // init proc
  67. struct proc_struct *initproc = NULL;
  68. // current proc
  69. struct proc_struct *current = NULL;
  70. static int nr_process = 0;
  71. void kernel_thread_entry(void);
  72. void forkrets(struct trapframe *tf);
  73. void switch_to(struct context *from, struct context *to);
  74. // alloc_proc - alloc a proc_struct and init all fields of proc_struct
  75. static struct proc_struct *
  76. alloc_proc(void) {
  77. struct proc_struct *proc = kmalloc(sizeof(struct proc_struct));
  78. if (proc != NULL) {
  79. //LAB4:EXERCISE1 YOUR CODE
  80. /*
  81. * below fields in proc_struct need to be initialized
  82. * enum proc_state state; // Process state
  83. * int pid; // Process ID
  84. * int runs; // the running times of Proces
  85. * uintptr_t kstack; // Process kernel stack
  86. * volatile bool need_resched; // bool value: need to be rescheduled to release CPU?
  87. * struct proc_struct *parent; // the parent process
  88. * struct mm_struct *mm; // Process's memory management field
  89. * struct context context; // Switch here to run process
  90. * struct trapframe *tf; // Trap frame for current interrupt
  91. * uintptr_t cr3; // CR3 register: the base addr of Page Directroy Table(PDT)
  92. * uint32_t flags; // Process flag
  93. * char name[PROC_NAME_LEN + 1]; // Process name
  94. */
  95. //LAB5 YOUR CODE : (update LAB4 steps)
  96. /*
  97. * below fields(add in LAB5) in proc_struct need to be initialized
  98. * uint32_t wait_state; // waiting state
  99. * struct proc_struct *cptr, *yptr, *optr; // relations between processes
  100. */
  101. proc->state = PROC_UNINIT;
  102. proc->pid = -1;
  103. proc->runs = 0;
  104. proc->kstack = 0;
  105. proc->need_resched = 0;
  106. proc->parent = NULL;
  107. proc->mm = NULL;
  108. memset(&(proc->context), 0, sizeof(struct context));
  109. proc->tf = NULL;
  110. proc->cr3 = boot_cr3;
  111. proc->flags = 0;
  112. memset(proc->name, 0, PROC_NAME_LEN);
  113. proc->wait_state = 0;
  114. proc->cptr = proc->optr = proc->yptr = NULL;
  115. proc->rq = NULL;
  116. proc->run_link.prev = proc->run_link.next = NULL;
  117. proc->time_slice = 0;
  118. proc->lab6_run_pool.left = proc->lab6_run_pool.right = proc->lab6_run_pool.parent = NULL;
  119. proc->lab6_stride = 0;
  120. proc->lab6_priority = 0;
  121. }
  122. return proc;
  123. }
  124. // set_proc_name - set the name of proc
  125. char *
  126. set_proc_name(struct proc_struct *proc, const char *name) {
  127. memset(proc->name, 0, sizeof(proc->name));
  128. return memcpy(proc->name, name, PROC_NAME_LEN);
  129. }
  130. // get_proc_name - get the name of proc
  131. char *
  132. get_proc_name(struct proc_struct *proc) {
  133. static char name[PROC_NAME_LEN + 1];
  134. memset(name, 0, sizeof(name));
  135. return memcpy(name, proc->name, PROC_NAME_LEN);
  136. }
  137. // set_links - set the relation links of process
  138. static void
  139. set_links(struct proc_struct *proc) {
  140. list_add(&proc_list, &(proc->list_link));
  141. proc->yptr = NULL;
  142. if ((proc->optr = proc->parent->cptr) != NULL) {
  143. proc->optr->yptr = proc;
  144. }
  145. proc->parent->cptr = proc;
  146. nr_process ++;
  147. }
  148. // remove_links - clean the relation links of process
  149. static void
  150. remove_links(struct proc_struct *proc) {
  151. list_del(&(proc->list_link));
  152. if (proc->optr != NULL) {
  153. proc->optr->yptr = proc->yptr;
  154. }
  155. if (proc->yptr != NULL) {
  156. proc->yptr->optr = proc->optr;
  157. }
  158. else {
  159. proc->parent->cptr = proc->optr;
  160. }
  161. nr_process --;
  162. }
  163. // get_pid - alloc a unique pid for process
  164. static int
  165. get_pid(void) {
  166. static_assert(MAX_PID > MAX_PROCESS);
  167. struct proc_struct *proc;
  168. list_entry_t *list = &proc_list, *le;
  169. static int next_safe = MAX_PID, last_pid = MAX_PID;
  170. if (++ last_pid >= MAX_PID) {
  171. last_pid = 1;
  172. goto inside;
  173. }
  174. if (last_pid >= next_safe) {
  175. inside:
  176. next_safe = MAX_PID;
  177. repeat:
  178. le = list;
  179. while ((le = list_next(le)) != list) {
  180. proc = le2proc(le, list_link);
  181. if (proc->pid == last_pid) {
  182. if (++ last_pid >= next_safe) {
  183. if (last_pid >= MAX_PID) {
  184. last_pid = 1;
  185. }
  186. next_safe = MAX_PID;
  187. goto repeat;
  188. }
  189. }
  190. else if (proc->pid > last_pid && next_safe > proc->pid) {
  191. next_safe = proc->pid;
  192. }
  193. }
  194. }
  195. return last_pid;
  196. }
  197. // proc_run - make process "proc" running on cpu
  198. // NOTE: before call switch_to, should load base addr of "proc"'s new PDT
  199. void
  200. proc_run(struct proc_struct *proc) {
  201. if (proc != current) {
  202. bool intr_flag;
  203. struct proc_struct *prev = current, *next = proc;
  204. local_intr_save(intr_flag);
  205. {
  206. current = proc;
  207. load_esp0(next->kstack + KSTACKSIZE);
  208. lcr3(next->cr3);
  209. switch_to(&(prev->context), &(next->context));
  210. }
  211. local_intr_restore(intr_flag);
  212. }
  213. }
  214. // forkret -- the first kernel entry point of a new thread/process
  215. // NOTE: the addr of forkret is setted in copy_thread function
  216. // after switch_to, the current proc will execute here.
  217. static void
  218. forkret(void) {
  219. forkrets(current->tf);
  220. }
  221. // hash_proc - add proc into proc hash_list
  222. static void
  223. hash_proc(struct proc_struct *proc) {
  224. list_add(hash_list + pid_hashfn(proc->pid), &(proc->hash_link));
  225. }
  226. // unhash_proc - delete proc from proc hash_list
  227. static void
  228. unhash_proc(struct proc_struct *proc) {
  229. list_del(&(proc->hash_link));
  230. }
  231. // find_proc - find proc frome proc hash_list according to pid
  232. struct proc_struct *
  233. find_proc(int pid) {
  234. if (0 < pid && pid < MAX_PID) {
  235. list_entry_t *list = hash_list + pid_hashfn(pid), *le = list;
  236. while ((le = list_next(le)) != list) {
  237. struct proc_struct *proc = le2proc(le, hash_link);
  238. if (proc->pid == pid) {
  239. return proc;
  240. }
  241. }
  242. }
  243. return NULL;
  244. }
  245. // kernel_thread - create a kernel thread using "fn" function
  246. // NOTE: the contents of temp trapframe tf will be copied to
  247. // proc->tf in do_fork-->copy_thread function
  248. int
  249. kernel_thread(int (*fn)(void *), void *arg, uint32_t clone_flags) {
  250. struct trapframe tf;
  251. memset(&tf, 0, sizeof(struct trapframe));
  252. tf.tf_cs = KERNEL_CS;
  253. tf.tf_ds = tf.tf_es = tf.tf_ss = KERNEL_DS;
  254. tf.tf_regs.reg_ebx = (uint32_t)fn;
  255. tf.tf_regs.reg_edx = (uint32_t)arg;
  256. tf.tf_eip = (uint32_t)kernel_thread_entry;
  257. return do_fork(clone_flags | CLONE_VM, 0, &tf);
  258. }
  259. // setup_kstack - alloc pages with size KSTACKPAGE as process kernel stack
  260. static int
  261. setup_kstack(struct proc_struct *proc) {
  262. struct Page *page = alloc_pages(KSTACKPAGE);
  263. if (page != NULL) {
  264. proc->kstack = (uintptr_t)page2kva(page);
  265. return 0;
  266. }
  267. return -E_NO_MEM;
  268. }
  269. // put_kstack - free the memory space of process kernel stack
  270. static void
  271. put_kstack(struct proc_struct *proc) {
  272. free_pages(kva2page((void *)(proc->kstack)), KSTACKPAGE);
  273. }
  274. // setup_pgdir - alloc one page as PDT
  275. static int
  276. setup_pgdir(struct mm_struct *mm) {
  277. struct Page *page;
  278. if ((page = alloc_page()) == NULL) {
  279. return -E_NO_MEM;
  280. }
  281. pde_t *pgdir = page2kva(page);
  282. memcpy(pgdir, boot_pgdir, PGSIZE);
  283. pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;
  284. mm->pgdir = pgdir;
  285. return 0;
  286. }
  287. // put_pgdir - free the memory space of PDT
  288. static void
  289. put_pgdir(struct mm_struct *mm) {
  290. free_page(kva2page(mm->pgdir));
  291. }
  292. // copy_mm - process "proc" duplicate OR share process "current"'s mm according clone_flags
  293. // - if clone_flags & CLONE_VM, then "share" ; else "duplicate"
  294. static int
  295. copy_mm(uint32_t clone_flags, struct proc_struct *proc) {
  296. struct mm_struct *mm, *oldmm = current->mm;
  297. /* current is a kernel thread */
  298. if (oldmm == NULL) {
  299. return 0;
  300. }
  301. if (clone_flags & CLONE_VM) {
  302. mm = oldmm;
  303. goto good_mm;
  304. }
  305. int ret = -E_NO_MEM;
  306. if ((mm = mm_create()) == NULL) {
  307. goto bad_mm;
  308. }
  309. if (setup_pgdir(mm) != 0) {
  310. goto bad_pgdir_cleanup_mm;
  311. }
  312. lock_mm(oldmm);
  313. {
  314. ret = dup_mmap(mm, oldmm);
  315. }
  316. unlock_mm(oldmm);
  317. if (ret != 0) {
  318. goto bad_dup_cleanup_mmap;
  319. }
  320. good_mm:
  321. mm_count_inc(mm);
  322. proc->mm = mm;
  323. proc->cr3 = PADDR(mm->pgdir);
  324. return 0;
  325. bad_dup_cleanup_mmap:
  326. exit_mmap(mm);
  327. put_pgdir(mm);
  328. bad_pgdir_cleanup_mm:
  329. mm_destroy(mm);
  330. bad_mm:
  331. return ret;
  332. }
  333. // copy_thread - setup the trapframe on the process's kernel stack top and
  334. // - setup the kernel entry point and stack of process
  335. static void
  336. copy_thread(struct proc_struct *proc, uintptr_t esp, struct trapframe *tf) {
  337. proc->tf = (struct trapframe *)(proc->kstack + KSTACKSIZE) - 1;
  338. *(proc->tf) = *tf;
  339. proc->tf->tf_regs.reg_eax = 0;
  340. proc->tf->tf_esp = esp;
  341. proc->tf->tf_eflags |= FL_IF;
  342. proc->context.eip = (uintptr_t)forkret;
  343. proc->context.esp = (uintptr_t)(proc->tf);
  344. }
  345. /* do_fork - parent process for a new child process
  346. * @clone_flags: used to guide how to clone the child process
  347. * @stack: the parent's user stack pointer. if stack==0, It means to fork a kernel thread.
  348. * @tf: the trapframe info, which will be copied to child process's proc->tf
  349. */
  350. int
  351. do_fork(uint32_t clone_flags, uintptr_t stack, struct trapframe *tf) {
  352. int ret = -E_NO_FREE_PROC;
  353. struct proc_struct *proc;
  354. if (nr_process >= MAX_PROCESS) {
  355. goto fork_out;
  356. }
  357. ret = -E_NO_MEM;
  358. //LAB4:EXERCISE2 YOUR CODE
  359. /*
  360. * Some Useful MACROs, Functions and DEFINEs, you can use them in below implementation.
  361. * MACROs or Functions:
  362. * alloc_proc: create a proc struct and init fields (lab4:exercise1)
  363. * setup_kstack: alloc pages with size KSTACKPAGE as process kernel stack
  364. * copy_mm: process "proc" duplicate OR share process "current"'s mm according clone_flags
  365. * if clone_flags & CLONE_VM, then "share" ; else "duplicate"
  366. * copy_thread: setup the trapframe on the process's kernel stack top and
  367. * setup the kernel entry point and stack of process
  368. * hash_proc: add proc into proc hash_list
  369. * get_pid: alloc a unique pid for process
  370. * wakup_proc: set proc->state = PROC_RUNNABLE
  371. * VARIABLES:
  372. * proc_list: the process set's list
  373. * nr_process: the number of process set
  374. */
  375. // 1. call alloc_proc to allocate a proc_struct
  376. // 2. call setup_kstack to allocate a kernel stack for child process
  377. // 3. call copy_mm to dup OR share mm according clone_flag
  378. // 4. call copy_thread to setup tf & context in proc_struct
  379. // 5. insert proc_struct into hash_list && proc_list
  380. // 6. call wakup_proc to make the new child process RUNNABLE
  381. // 7. set ret vaule using child proc's pid
  382. //LAB5 YOUR CODE : (update LAB4 steps)
  383. /* Some Functions
  384. * set_links: set the relation links of process. ALSO SEE: remove_links: lean the relation links of process
  385. * -------------------
  386. * update step 1: set child proc's parent to current process, make sure current process's wait_state is 0
  387. * update step 5: insert proc_struct into hash_list && proc_list, set the relation links of process
  388. */
  389. if ((proc = alloc_proc()) == NULL) {
  390. goto fork_out;
  391. }
  392. proc->parent = current;
  393. assert(current->wait_state == 0);
  394. if (setup_kstack(proc) != 0) {
  395. goto bad_fork_cleanup_proc;
  396. }
  397. if (copy_mm(clone_flags, proc) != 0) {
  398. goto bad_fork_cleanup_kstack;
  399. }
  400. copy_thread(proc, stack, tf);
  401. bool intr_flag;
  402. local_intr_save(intr_flag);
  403. {
  404. proc->pid = get_pid();
  405. hash_proc(proc);
  406. set_links(proc);
  407. }
  408. local_intr_restore(intr_flag);
  409. wakeup_proc(proc);
  410. ret = proc->pid;
  411. fork_out:
  412. return ret;
  413. bad_fork_cleanup_kstack:
  414. put_kstack(proc);
  415. bad_fork_cleanup_proc:
  416. kfree(proc);
  417. goto fork_out;
  418. }
  419. // do_exit - called by sys_exit
  420. // 1. call exit_mmap & put_pgdir & mm_destroy to free the almost all memory space of process
  421. // 2. set process' state as PROC_ZOMBIE, then call wakeup_proc(parent) to ask parent reclaim itself.
  422. // 3. call scheduler to switch to other process
  423. int
  424. do_exit(int error_code) {
  425. if (current == idleproc) {
  426. panic("idleproc exit.\n");
  427. }
  428. if (current == initproc) {
  429. panic("initproc exit.\n");
  430. }
  431. struct mm_struct *mm = current->mm;
  432. if (mm != NULL) {
  433. lcr3(boot_cr3);
  434. if (mm_count_dec(mm) == 0) {
  435. exit_mmap(mm);
  436. put_pgdir(mm);
  437. mm_destroy(mm);
  438. }
  439. current->mm = NULL;
  440. }
  441. current->state = PROC_ZOMBIE;
  442. current->exit_code = error_code;
  443. bool intr_flag;
  444. struct proc_struct *proc;
  445. local_intr_save(intr_flag);
  446. {
  447. proc = current->parent;
  448. if (proc->wait_state == WT_CHILD) {
  449. wakeup_proc(proc);
  450. }
  451. while (current->cptr != NULL) {
  452. proc = current->cptr;
  453. current->cptr = proc->optr;
  454. proc->yptr = NULL;
  455. if ((proc->optr = initproc->cptr) != NULL) {
  456. initproc->cptr->yptr = proc;
  457. }
  458. proc->parent = initproc;
  459. initproc->cptr = proc;
  460. if (proc->state == PROC_ZOMBIE) {
  461. if (initproc->wait_state == WT_CHILD) {
  462. wakeup_proc(initproc);
  463. }
  464. }
  465. }
  466. }
  467. local_intr_restore(intr_flag);
  468. schedule();
  469. panic("do_exit will not return!! %d.\n", current->pid);
  470. }
  471. /* load_icode - load the content of binary program(ELF format) as the new content of current process
  472. * @binary: the memory addr of the content of binary program
  473. * @size: the size of the content of binary program
  474. */
  475. static int
  476. load_icode(unsigned char *binary, size_t size) {
  477. if (current->mm != NULL) {
  478. panic("load_icode: current->mm must be empty.\n");
  479. }
  480. int ret = -E_NO_MEM;
  481. struct mm_struct *mm;
  482. //(1) create a new mm for current process
  483. if ((mm = mm_create()) == NULL) {
  484. goto bad_mm;
  485. }
  486. //(2) create a new PDT, and mm->pgdir= kernel virtual addr of PDT
  487. if (setup_pgdir(mm) != 0) {
  488. goto bad_pgdir_cleanup_mm;
  489. }
  490. //(3) copy TEXT/DATA section, build BSS parts in binary to memory space of process
  491. struct Page *page;
  492. //(3.1) get the file header of the bianry program (ELF format)
  493. struct elfhdr *elf = (struct elfhdr *)binary;
  494. //(3.2) get the entry of the program section headers of the bianry program (ELF format)
  495. struct proghdr *ph = (struct proghdr *)(binary + elf->e_phoff);
  496. //(3.3) This program is valid?
  497. if (elf->e_magic != ELF_MAGIC) {
  498. ret = -E_INVAL_ELF;
  499. goto bad_elf_cleanup_pgdir;
  500. }
  501. uint32_t vm_flags, perm;
  502. struct proghdr *ph_end = ph + elf->e_phnum;
  503. for (; ph < ph_end; ph ++) {
  504. //(3.4) find every program section headers
  505. if (ph->p_type != ELF_PT_LOAD) {
  506. continue ;
  507. }
  508. if (ph->p_filesz > ph->p_memsz) {
  509. ret = -E_INVAL_ELF;
  510. goto bad_cleanup_mmap;
  511. }
  512. if (ph->p_filesz == 0) {
  513. continue ;
  514. }
  515. //(3.5) call mm_map fun to setup the new vma ( ph->p_va, ph->p_memsz)
  516. vm_flags = 0, perm = PTE_U;
  517. if (ph->p_flags & ELF_PF_X) vm_flags |= VM_EXEC;
  518. if (ph->p_flags & ELF_PF_W) vm_flags |= VM_WRITE;
  519. if (ph->p_flags & ELF_PF_R) vm_flags |= VM_READ;
  520. if (vm_flags & VM_WRITE) perm |= PTE_W;
  521. if ((ret = mm_map(mm, ph->p_va, ph->p_memsz, vm_flags, NULL)) != 0) {
  522. goto bad_cleanup_mmap;
  523. }
  524. unsigned char *from = binary + ph->p_offset;
  525. size_t off, size;
  526. uintptr_t start = ph->p_va, end, la = ROUNDDOWN(start, PGSIZE);
  527. ret = -E_NO_MEM;
  528. //(3.6) alloc memory, and copy the contents of every program section (from, from+end) to process's memory (la, la+end)
  529. end = ph->p_va + ph->p_filesz;
  530. //(3.6.1) copy TEXT/DATA section of bianry program
  531. while (start < end) {
  532. if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
  533. goto bad_cleanup_mmap;
  534. }
  535. off = start - la, size = PGSIZE - off, la += PGSIZE;
  536. if (end < la) {
  537. size -= la - end;
  538. }
  539. memcpy(page2kva(page) + off, from, size);
  540. start += size, from += size;
  541. }
  542. //(3.6.2) build BSS section of binary program
  543. end = ph->p_va + ph->p_memsz;
  544. if (start < la) {
  545. /* ph->p_memsz == ph->p_filesz */
  546. if (start == end) {
  547. continue ;
  548. }
  549. off = start + PGSIZE - la, size = PGSIZE - off;
  550. if (end < la) {
  551. size -= la - end;
  552. }
  553. memset(page2kva(page) + off, 0, size);
  554. start += size;
  555. assert((end < la && start == end) || (end >= la && start == la));
  556. }
  557. while (start < end) {
  558. if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
  559. goto bad_cleanup_mmap;
  560. }
  561. off = start - la, size = PGSIZE - off, la += PGSIZE;
  562. if (end < la) {
  563. size -= la - end;
  564. }
  565. memset(page2kva(page) + off, 0, size);
  566. start += size;
  567. }
  568. }
  569. //(4) build user stack memory
  570. vm_flags = VM_READ | VM_WRITE | VM_STACK;
  571. if ((ret = mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags, NULL)) != 0) {
  572. goto bad_cleanup_mmap;
  573. }
  574. assert(pgdir_alloc_page(mm->pgdir, USTACKTOP-PGSIZE , PTE_USER) != NULL);
  575. assert(pgdir_alloc_page(mm->pgdir, USTACKTOP-2*PGSIZE , PTE_USER) != NULL);
  576. assert(pgdir_alloc_page(mm->pgdir, USTACKTOP-3*PGSIZE , PTE_USER) != NULL);
  577. assert(pgdir_alloc_page(mm->pgdir, USTACKTOP-4*PGSIZE , PTE_USER) != NULL);
  578. //(5) set current process's mm, sr3, and set CR3 reg = physical addr of Page Directory
  579. mm_count_inc(mm);
  580. current->mm = mm;
  581. current->cr3 = PADDR(mm->pgdir);
  582. lcr3(PADDR(mm->pgdir));
  583. //(6) setup trapframe for user environment
  584. struct trapframe *tf = current->tf;
  585. memset(tf, 0, sizeof(struct trapframe));
  586. /* LAB5:EXERCISE1 YOUR CODE
  587. * should set tf_cs,tf_ds,tf_es,tf_ss,tf_esp,tf_eip,tf_eflags
  588. * NOTICE: If we set trapframe correctly, then the user level process can return to USER MODE from kernel. So
  589. * tf_cs should be USER_CS segment (see memlayout.h)
  590. * tf_ds=tf_es=tf_ss should be USER_DS segment
  591. * tf_esp should be the top addr of user stack (USTACKTOP)
  592. * tf_eip should be the entry point of this binary program (elf->e_entry)
  593. * tf_eflags should be set to enable computer to produce Interrupt
  594. */
  595. tf->tf_cs = USER_CS;
  596. tf->tf_ds = tf->tf_es = tf->tf_ss = USER_DS;
  597. tf->tf_esp = USTACKTOP;
  598. tf->tf_eip = elf->e_entry;
  599. tf->tf_eflags = FL_IF;
  600. ret = 0;
  601. out:
  602. return ret;
  603. bad_cleanup_mmap:
  604. exit_mmap(mm);
  605. bad_elf_cleanup_pgdir:
  606. put_pgdir(mm);
  607. bad_pgdir_cleanup_mm:
  608. mm_destroy(mm);
  609. bad_mm:
  610. goto out;
  611. }
  612. // do_execve - call exit_mmap(mm)&pug_pgdir(mm) to reclaim memory space of current process
  613. // - call load_icode to setup new memory space accroding binary prog.
  614. int
  615. do_execve(const char *name, size_t len, unsigned char *binary, size_t size) {
  616. struct mm_struct *mm = current->mm;
  617. if (!user_mem_check(mm, (uintptr_t)name, len, 0)) {
  618. return -E_INVAL;
  619. }
  620. if (len > PROC_NAME_LEN) {
  621. len = PROC_NAME_LEN;
  622. }
  623. char local_name[PROC_NAME_LEN + 1];
  624. memset(local_name, 0, sizeof(local_name));
  625. memcpy(local_name, name, len);
  626. if (mm != NULL) {
  627. lcr3(boot_cr3);
  628. if (mm_count_dec(mm) == 0) {
  629. exit_mmap(mm);
  630. put_pgdir(mm);
  631. mm_destroy(mm);
  632. }
  633. current->mm = NULL;
  634. }
  635. int ret;
  636. if ((ret = load_icode(binary, size)) != 0) {
  637. goto execve_exit;
  638. }
  639. set_proc_name(current, local_name);
  640. return 0;
  641. execve_exit:
  642. do_exit(ret);
  643. panic("already exit: %e.\n", ret);
  644. }
  645. // do_yield - ask the scheduler to reschedule
  646. int
  647. do_yield(void) {
  648. current->need_resched = 1;
  649. return 0;
  650. }
  651. // do_wait - wait one OR any children with PROC_ZOMBIE state, and free memory space of kernel stack
  652. // - proc struct of this child.
  653. // NOTE: only after do_wait function, all resources of the child proces are free.
  654. int
  655. do_wait(int pid, int *code_store) {
  656. struct mm_struct *mm = current->mm;
  657. if (code_store != NULL) {
  658. if (!user_mem_check(mm, (uintptr_t)code_store, sizeof(int), 1)) {
  659. return -E_INVAL;
  660. }
  661. }
  662. struct proc_struct *proc;
  663. bool intr_flag, haskid;
  664. repeat:
  665. haskid = 0;
  666. if (pid != 0) {
  667. proc = find_proc(pid);
  668. if (proc != NULL && proc->parent == current) {
  669. haskid = 1;
  670. if (proc->state == PROC_ZOMBIE) {
  671. goto found;
  672. }
  673. }
  674. }
  675. else {
  676. proc = current->cptr;
  677. for (; proc != NULL; proc = proc->optr) {
  678. haskid = 1;
  679. if (proc->state == PROC_ZOMBIE) {
  680. goto found;
  681. }
  682. }
  683. }
  684. if (haskid) {
  685. current->state = PROC_SLEEPING;
  686. current->wait_state = WT_CHILD;
  687. schedule();
  688. if (current->flags & PF_EXITING) {
  689. do_exit(-E_KILLED);
  690. }
  691. goto repeat;
  692. }
  693. return -E_BAD_PROC;
  694. found:
  695. if (proc == idleproc || proc == initproc) {
  696. panic("wait idleproc or initproc.\n");
  697. }
  698. if (code_store != NULL) {
  699. *code_store = proc->exit_code;
  700. }
  701. local_intr_save(intr_flag);
  702. {
  703. unhash_proc(proc);
  704. remove_links(proc);
  705. }
  706. local_intr_restore(intr_flag);
  707. put_kstack(proc);
  708. kfree(proc);
  709. return 0;
  710. }
  711. // do_kill - kill process with pid by set this process's flags with PF_EXITING
  712. int
  713. do_kill(int pid) {
  714. struct proc_struct *proc;
  715. if ((proc = find_proc(pid)) != NULL) {
  716. if (!(proc->flags & PF_EXITING)) {
  717. proc->flags |= PF_EXITING;
  718. if (proc->wait_state & WT_INTERRUPTED) {
  719. wakeup_proc(proc);
  720. }
  721. return 0;
  722. }
  723. return -E_KILLED;
  724. }
  725. return -E_INVAL;
  726. }
  727. // kernel_execve - do SYS_exec syscall to exec a user program called by user_main kernel_thread
  728. static int
  729. kernel_execve(const char *name, unsigned char *binary, size_t size) {
  730. int ret, len = strlen(name);
  731. asm volatile (
  732. "int %1;"
  733. : "=a" (ret)
  734. : "i" (T_SYSCALL), "0" (SYS_exec), "d" (name), "c" (len), "b" (binary), "D" (size)
  735. : "memory");
  736. return ret;
  737. }
  738. #define __KERNEL_EXECVE(name, binary, size) ({ \
  739. cprintf("kernel_execve: pid = %d, name = \"%s\".\n", \
  740. current->pid, name); \
  741. kernel_execve(name, binary, (size_t)(size)); \
  742. })
  743. #define KERNEL_EXECVE(x) ({ \
  744. extern unsigned char _binary_obj___user_##x##_out_start[], \
  745. _binary_obj___user_##x##_out_size[]; \
  746. __KERNEL_EXECVE(#x, _binary_obj___user_##x##_out_start, \
  747. _binary_obj___user_##x##_out_size); \
  748. })
  749. #define __KERNEL_EXECVE2(x, xstart, xsize) ({ \
  750. extern unsigned char xstart[], xsize[]; \
  751. __KERNEL_EXECVE(#x, xstart, (size_t)xsize); \
  752. })
  753. #define KERNEL_EXECVE2(x, xstart, xsize) __KERNEL_EXECVE2(x, xstart, xsize)
  754. // user_main - kernel thread used to exec a user program
  755. static int
  756. user_main(void *arg) {
  757. #ifdef TEST
  758. KERNEL_EXECVE2(TEST, TESTSTART, TESTSIZE);
  759. #else
  760. KERNEL_EXECVE(exit);
  761. #endif
  762. panic("user_main execve failed.\n");
  763. }
  764. // init_main - the second kernel thread used to create user_main kernel threads
  765. static int
  766. init_main(void *arg) {
  767. size_t nr_free_pages_store = nr_free_pages();
  768. size_t kernel_allocated_store = kallocated();
  769. int pid = kernel_thread(user_main, NULL, 0);
  770. if (pid <= 0) {
  771. panic("create user_main failed.\n");
  772. }
  773. extern void check_sync(void);
  774. check_sync(); // check philosopher sync problem
  775. while (do_wait(0, NULL) == 0) {
  776. schedule();
  777. }
  778. cprintf("all user-mode processes have quit.\n");
  779. assert(initproc->cptr == NULL && initproc->yptr == NULL && initproc->optr == NULL);
  780. assert(nr_process == 2);
  781. assert(list_next(&proc_list) == &(initproc->list_link));
  782. assert(list_prev(&proc_list) == &(initproc->list_link));
  783. assert(nr_free_pages_store == nr_free_pages());
  784. assert(kernel_allocated_store == kallocated());
  785. cprintf("init check memory pass.\n");
  786. return 0;
  787. }
  788. // proc_init - set up the first kernel thread idleproc "idle" by itself and
  789. // - create the second kernel thread init_main
  790. void
  791. proc_init(void) {
  792. int i;
  793. list_init(&proc_list);
  794. for (i = 0; i < HASH_LIST_SIZE; i ++) {
  795. list_init(hash_list + i);
  796. }
  797. if ((idleproc = alloc_proc()) == NULL) {
  798. panic("cannot alloc idleproc.\n");
  799. }
  800. idleproc->pid = 0;
  801. idleproc->state = PROC_RUNNABLE;
  802. idleproc->kstack = (uintptr_t)bootstack;
  803. idleproc->need_resched = 1;
  804. set_proc_name(idleproc, "idle");
  805. nr_process ++;
  806. current = idleproc;
  807. int pid = kernel_thread(init_main, NULL, 0);
  808. if (pid <= 0) {
  809. panic("create init_main failed.\n");
  810. }
  811. initproc = find_proc(pid);
  812. set_proc_name(initproc, "init");
  813. assert(idleproc != NULL && idleproc->pid == 0);
  814. assert(initproc != NULL && initproc->pid == 1);
  815. }
  816. // cpu_idle - at the end of kern_init, the first kernel thread idleproc will do below works
  817. void
  818. cpu_idle(void) {
  819. while (1) {
  820. if (current->need_resched) {
  821. schedule();
  822. }
  823. }
  824. }
  825. //FOR LAB6, set the process's priority (bigger value will get more CPU time)
  826. void
  827. lab6_set_priority(uint32_t priority)
  828. {
  829. if (priority == 0)
  830. current->lab6_priority = 1;
  831. else current->lab6_priority = priority;
  832. }
  833. // do_sleep - set current process state to sleep and add timer with "time"
  834. // - then call scheduler. if process run again, delete timer first.
  835. int
  836. do_sleep(unsigned int time) {
  837. if (time == 0) {
  838. return 0;
  839. }
  840. bool intr_flag;
  841. local_intr_save(intr_flag);
  842. timer_t __timer, *timer = timer_init(&__timer, current, time);
  843. current->state = PROC_SLEEPING;
  844. current->wait_state = WT_TIMER;
  845. add_timer(timer);
  846. local_intr_restore(intr_flag);
  847. schedule();
  848. del_timer(timer);
  849. return 0;
  850. }