《操作系统》的实验代码。
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

888 lines
28 KiB

10 years ago
10 years ago
10 years ago
10 years ago
  1. #include <proc.h>
  2. #include <kmalloc.h>
  3. #include <string.h>
  4. #include <sync.h>
  5. #include <pmm.h>
  6. #include <error.h>
  7. #include <sched.h>
  8. #include <elf.h>
  9. #include <vmm.h>
  10. #include <trap.h>
  11. #include <stdio.h>
  12. #include <stdlib.h>
  13. #include <assert.h>
  14. #include <unistd.h>
  15. /* ------------- process/thread mechanism design&implementation -------------
  16. (an simplified Linux process/thread mechanism )
  17. introduction:
  18. ucore implements a simple process/thread mechanism. process contains the independent memory sapce, at least one threads
  19. for execution, the kernel data(for management), processor state (for context switch), files(in lab6), etc. ucore needs to
  20. manage all these details efficiently. In ucore, a thread is just a special kind of process(share process's memory).
  21. ------------------------------
  22. process state : meaning -- reason
  23. PROC_UNINIT : uninitialized -- alloc_proc
  24. PROC_SLEEPING : sleeping -- try_free_pages, do_wait, do_sleep
  25. PROC_RUNNABLE : runnable(maybe running) -- proc_init, wakeup_proc,
  26. PROC_ZOMBIE : almost dead -- do_exit
  27. -----------------------------
  28. process state changing:
  29. alloc_proc RUNNING
  30. + +--<----<--+
  31. + + proc_run +
  32. V +-->---->--+
  33. PROC_UNINIT -- proc_init/wakeup_proc --> PROC_RUNNABLE -- try_free_pages/do_wait/do_sleep --> PROC_SLEEPING --
  34. A + +
  35. | +--- do_exit --> PROC_ZOMBIE +
  36. + +
  37. -----------------------wakeup_proc----------------------------------
  38. -----------------------------
  39. process relations
  40. parent: proc->parent (proc is children)
  41. children: proc->cptr (proc is parent)
  42. older sibling: proc->optr (proc is younger sibling)
  43. younger sibling: proc->yptr (proc is older sibling)
  44. -----------------------------
  45. related syscall for process:
  46. SYS_exit : process exit, -->do_exit
  47. SYS_fork : create child process, dup mm -->do_fork-->wakeup_proc
  48. SYS_wait : wait process -->do_wait
  49. SYS_exec : after fork, process execute a program -->load a program and refresh the mm
  50. SYS_clone : create child thread -->do_fork-->wakeup_proc
  51. SYS_yield : process flag itself need resecheduling, -- proc->need_sched=1, then scheduler will rescheule this process
  52. SYS_sleep : process sleep -->do_sleep
  53. SYS_kill : kill process -->do_kill-->proc->flags |= PF_EXITING
  54. -->wakeup_proc-->do_wait-->do_exit
  55. SYS_getpid : get the process's pid
  56. */
  57. // the process set's list
  58. list_entry_t proc_list;
  59. #define HASH_SHIFT 10
  60. #define HASH_LIST_SIZE (1 << HASH_SHIFT)
  61. #define pid_hashfn(x) (hash32(x, HASH_SHIFT))
  62. // has list for process set based on pid
  63. static list_entry_t hash_list[HASH_LIST_SIZE];
  64. // idle proc
  65. struct proc_struct *idleproc = NULL;
  66. // init proc
  67. struct proc_struct *initproc = NULL;
  68. // current proc
  69. struct proc_struct *current = NULL;
  70. static int nr_process = 0;
  71. void kernel_thread_entry(void);
  72. void forkrets(struct trapframe *tf);
  73. void switch_to(struct context *from, struct context *to);
  74. // alloc_proc - alloc a proc_struct and init all fields of proc_struct
  75. static struct proc_struct *
  76. alloc_proc(void) {
  77. struct proc_struct *proc = kmalloc(sizeof(struct proc_struct));
  78. if (proc != NULL) {
  79. //LAB4:EXERCISE1 YOUR CODE
  80. /*
  81. * below fields in proc_struct need to be initialized
  82. * enum proc_state state; // Process state
  83. * int pid; // Process ID
  84. * int runs; // the running times of Proces
  85. * uintptr_t kstack; // Process kernel stack
  86. * volatile bool need_resched; // bool value: need to be rescheduled to release CPU?
  87. * struct proc_struct *parent; // the parent process
  88. * struct mm_struct *mm; // Process's memory management field
  89. * struct context context; // Switch here to run process
  90. * struct trapframe *tf; // Trap frame for current interrupt
  91. * uintptr_t cr3; // CR3 register: the base addr of Page Directroy Table(PDT)
  92. * uint32_t flags; // Process flag
  93. * char name[PROC_NAME_LEN + 1]; // Process name
  94. */
  95. proc->state = PROC_UNINIT;
  96. proc->pid = -1;
  97. proc->runs = 0;
  98. proc->kstack = 0;
  99. proc->need_resched = 0;
  100. proc->parent = NULL;
  101. proc->mm = NULL;
  102. memset(&(proc->context), 0, sizeof(struct context));
  103. proc->tf = NULL;
  104. proc->cr3 = boot_cr3;
  105. proc->flags = 0;
  106. memset(proc->name, 0, PROC_NAME_LEN);
  107. proc->wait_state = 0;
  108. proc->cptr = proc->optr = proc->yptr = NULL;
  109. }
  110. return proc;
  111. }
  112. // set_proc_name - set the name of proc
  113. char *
  114. set_proc_name(struct proc_struct *proc, const char *name) {
  115. memset(proc->name, 0, sizeof(proc->name));
  116. return memcpy(proc->name, name, PROC_NAME_LEN);
  117. }
  118. // get_proc_name - get the name of proc
  119. char *
  120. get_proc_name(struct proc_struct *proc) {
  121. static char name[PROC_NAME_LEN + 1];
  122. memset(name, 0, sizeof(name));
  123. return memcpy(name, proc->name, PROC_NAME_LEN);
  124. }
  125. // set_links - set the relation links of process
  126. static void
  127. set_links(struct proc_struct *proc) {
  128. list_add(&proc_list, &(proc->list_link));
  129. proc->yptr = NULL;
  130. if ((proc->optr = proc->parent->cptr) != NULL) {
  131. proc->optr->yptr = proc;
  132. }
  133. proc->parent->cptr = proc;
  134. nr_process ++;
  135. }
  136. // remove_links - clean the relation links of process
  137. static void
  138. remove_links(struct proc_struct *proc) {
  139. list_del(&(proc->list_link));
  140. if (proc->optr != NULL) {
  141. proc->optr->yptr = proc->yptr;
  142. }
  143. if (proc->yptr != NULL) {
  144. proc->yptr->optr = proc->optr;
  145. }
  146. else {
  147. proc->parent->cptr = proc->optr;
  148. }
  149. nr_process --;
  150. }
  151. // get_pid - alloc a unique pid for process
  152. static int
  153. get_pid(void) {
  154. static_assert(MAX_PID > MAX_PROCESS);
  155. struct proc_struct *proc;
  156. list_entry_t *list = &proc_list, *le;
  157. static int next_safe = MAX_PID, last_pid = MAX_PID;
  158. if (++ last_pid >= MAX_PID) {
  159. last_pid = 1;
  160. goto inside;
  161. }
  162. if (last_pid >= next_safe) {
  163. inside:
  164. next_safe = MAX_PID;
  165. repeat:
  166. le = list;
  167. while ((le = list_next(le)) != list) {
  168. proc = le2proc(le, list_link);
  169. if (proc->pid == last_pid) {
  170. if (++ last_pid >= next_safe) {
  171. if (last_pid >= MAX_PID) {
  172. last_pid = 1;
  173. }
  174. next_safe = MAX_PID;
  175. goto repeat;
  176. }
  177. }
  178. else if (proc->pid > last_pid && next_safe > proc->pid) {
  179. next_safe = proc->pid;
  180. }
  181. }
  182. }
  183. return last_pid;
  184. }
  185. // proc_run - make process "proc" running on cpu
  186. // NOTE: before call switch_to, should load base addr of "proc"'s new PDT
  187. void
  188. proc_run(struct proc_struct *proc) {
  189. if (proc != current) {
  190. bool intr_flag;
  191. struct proc_struct *prev = current, *next = proc;
  192. local_intr_save(intr_flag);
  193. {
  194. current = proc;
  195. load_esp0(next->kstack + KSTACKSIZE);
  196. lcr3(next->cr3);
  197. switch_to(&(prev->context), &(next->context));
  198. }
  199. local_intr_restore(intr_flag);
  200. }
  201. }
  202. // forkret -- the first kernel entry point of a new thread/process
  203. // NOTE: the addr of forkret is setted in copy_thread function
  204. // after switch_to, the current proc will execute here.
  205. static void
  206. forkret(void) {
  207. forkrets(current->tf);
  208. }
  209. // hash_proc - add proc into proc hash_list
  210. static void
  211. hash_proc(struct proc_struct *proc) {
  212. list_add(hash_list + pid_hashfn(proc->pid), &(proc->hash_link));
  213. }
  214. // unhash_proc - delete proc from proc hash_list
  215. static void
  216. unhash_proc(struct proc_struct *proc) {
  217. list_del(&(proc->hash_link));
  218. }
  219. // find_proc - find proc frome proc hash_list according to pid
  220. struct proc_struct *
  221. find_proc(int pid) {
  222. if (0 < pid && pid < MAX_PID) {
  223. list_entry_t *list = hash_list + pid_hashfn(pid), *le = list;
  224. while ((le = list_next(le)) != list) {
  225. struct proc_struct *proc = le2proc(le, hash_link);
  226. if (proc->pid == pid) {
  227. return proc;
  228. }
  229. }
  230. }
  231. return NULL;
  232. }
  233. // kernel_thread - create a kernel thread using "fn" function
  234. // NOTE: the contents of temp trapframe tf will be copied to
  235. // proc->tf in do_fork-->copy_thread function
  236. int
  237. kernel_thread(int (*fn)(void *), void *arg, uint32_t clone_flags) {
  238. struct trapframe tf;
  239. memset(&tf, 0, sizeof(struct trapframe));
  240. tf.tf_cs = KERNEL_CS;
  241. tf.tf_ds = tf.tf_es = tf.tf_ss = KERNEL_DS;
  242. tf.tf_regs.reg_ebx = (uint32_t)fn;
  243. tf.tf_regs.reg_edx = (uint32_t)arg;
  244. tf.tf_eip = (uint32_t)kernel_thread_entry;
  245. return do_fork(clone_flags | CLONE_VM, 0, &tf);
  246. }
  247. // setup_kstack - alloc pages with size KSTACKPAGE as process kernel stack
  248. static int
  249. setup_kstack(struct proc_struct *proc) {
  250. struct Page *page = alloc_pages(KSTACKPAGE);
  251. if (page != NULL) {
  252. proc->kstack = (uintptr_t)page2kva(page);
  253. return 0;
  254. }
  255. return -E_NO_MEM;
  256. }
  257. // put_kstack - free the memory space of process kernel stack
  258. static void
  259. put_kstack(struct proc_struct *proc) {
  260. free_pages(kva2page((void *)(proc->kstack)), KSTACKPAGE);
  261. }
  262. // setup_pgdir - alloc one page as PDT
  263. static int
  264. setup_pgdir(struct mm_struct *mm) {
  265. struct Page *page;
  266. if ((page = alloc_page()) == NULL) {
  267. return -E_NO_MEM;
  268. }
  269. pde_t *pgdir = page2kva(page);
  270. memcpy(pgdir, boot_pgdir, PGSIZE);
  271. pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W;
  272. mm->pgdir = pgdir;
  273. return 0;
  274. }
  275. // put_pgdir - free the memory space of PDT
  276. static void
  277. put_pgdir(struct mm_struct *mm) {
  278. free_page(kva2page(mm->pgdir));
  279. }
  280. // copy_mm - process "proc" duplicate OR share process "current"'s mm according clone_flags
  281. // - if clone_flags & CLONE_VM, then "share" ; else "duplicate"
  282. static int
  283. copy_mm(uint32_t clone_flags, struct proc_struct *proc) {
  284. struct mm_struct *mm, *oldmm = current->mm;
  285. /* current is a kernel thread */
  286. if (oldmm == NULL) {
  287. return 0;
  288. }
  289. if (clone_flags & CLONE_VM) {
  290. mm = oldmm;
  291. goto good_mm;
  292. }
  293. int ret = -E_NO_MEM;
  294. if ((mm = mm_create()) == NULL) {
  295. goto bad_mm;
  296. }
  297. if (setup_pgdir(mm) != 0) {
  298. goto bad_pgdir_cleanup_mm;
  299. }
  300. lock_mm(oldmm);
  301. {
  302. ret = dup_mmap(mm, oldmm);
  303. }
  304. unlock_mm(oldmm);
  305. if (ret != 0) {
  306. goto bad_dup_cleanup_mmap;
  307. }
  308. good_mm:
  309. mm_count_inc(mm);
  310. proc->mm = mm;
  311. proc->cr3 = PADDR(mm->pgdir);
  312. return 0;
  313. bad_dup_cleanup_mmap:
  314. exit_mmap(mm);
  315. put_pgdir(mm);
  316. bad_pgdir_cleanup_mm:
  317. mm_destroy(mm);
  318. bad_mm:
  319. return ret;
  320. }
  321. // copy_thread - setup the trapframe on the process's kernel stack top and
  322. // - setup the kernel entry point and stack of process
  323. static void
  324. copy_thread(struct proc_struct *proc, uintptr_t esp, struct trapframe *tf) {
  325. proc->tf = (struct trapframe *)(proc->kstack + KSTACKSIZE) - 1;
  326. *(proc->tf) = *tf;
  327. proc->tf->tf_regs.reg_eax = 0;
  328. proc->tf->tf_esp = esp;
  329. proc->tf->tf_eflags |= FL_IF;
  330. proc->context.eip = (uintptr_t)forkret;
  331. proc->context.esp = (uintptr_t)(proc->tf);
  332. }
  333. /* do_fork - parent process for a new child process
  334. * @clone_flags: used to guide how to clone the child process
  335. * @stack: the parent's user stack pointer. if stack==0, It means to fork a kernel thread.
  336. * @tf: the trapframe info, which will be copied to child process's proc->tf
  337. */
  338. int
  339. do_fork(uint32_t clone_flags, uintptr_t stack, struct trapframe *tf) {
  340. int ret = -E_NO_FREE_PROC;
  341. struct proc_struct *proc;
  342. if (nr_process >= MAX_PROCESS) {
  343. goto fork_out;
  344. }
  345. ret = -E_NO_MEM;
  346. //LAB4:EXERCISE2 YOUR CODE
  347. /*
  348. * Some Useful MACROs, Functions and DEFINEs, you can use them in below implementation.
  349. * MACROs or Functions:
  350. * alloc_proc: create a proc struct and init fields (lab4:exercise1)
  351. * setup_kstack: alloc pages with size KSTACKPAGE as process kernel stack
  352. * copy_mm: process "proc" duplicate OR share process "current"'s mm according clone_flags
  353. * if clone_flags & CLONE_VM, then "share" ; else "duplicate"
  354. * copy_thread: setup the trapframe on the process's kernel stack top and
  355. * setup the kernel entry point and stack of process
  356. * hash_proc: add proc into proc hash_list
  357. * get_pid: alloc a unique pid for process
  358. * wakeup_proc: set proc->state = PROC_RUNNABLE
  359. * VARIABLES:
  360. * proc_list: the process set's list
  361. * nr_process: the number of process set
  362. */
  363. // 1. call alloc_proc to allocate a proc_struct
  364. // 2. call setup_kstack to allocate a kernel stack for child process
  365. // 3. call copy_mm to dup OR share mm according clone_flag
  366. // 4. call copy_thread to setup tf & context in proc_struct
  367. // 5. insert proc_struct into hash_list && proc_list
  368. // 6. call wakeup_proc to make the new child process RUNNABLE
  369. // 7. set ret vaule using child proc's pid
  370. if ((proc = alloc_proc()) == NULL) {
  371. goto fork_out;
  372. }
  373. proc->parent = current;
  374. assert(current->wait_state == 0);
  375. if (setup_kstack(proc) != 0) {
  376. goto bad_fork_cleanup_proc;
  377. }
  378. if (copy_mm(clone_flags, proc) != 0) {
  379. goto bad_fork_cleanup_kstack;
  380. }
  381. copy_thread(proc, stack, tf);
  382. bool intr_flag;
  383. local_intr_save(intr_flag);
  384. {
  385. proc->pid = get_pid();
  386. hash_proc(proc);
  387. set_links(proc);
  388. }
  389. local_intr_restore(intr_flag);
  390. wakeup_proc(proc);
  391. ret = proc->pid;
  392. fork_out:
  393. return ret;
  394. bad_fork_cleanup_kstack:
  395. put_kstack(proc);
  396. bad_fork_cleanup_proc:
  397. kfree(proc);
  398. goto fork_out;
  399. }
  400. // do_exit - called by sys_exit
  401. // 1. call exit_mmap & put_pgdir & mm_destroy to free the almost all memory space of process
  402. // 2. set process' state as PROC_ZOMBIE, then call wakeup_proc(parent) to ask parent reclaim itself.
  403. // 3. call scheduler to switch to other process
  404. int
  405. do_exit(int error_code) {
  406. if (current == idleproc) {
  407. panic("idleproc exit.\n");
  408. }
  409. if (current == initproc) {
  410. panic("initproc exit.\n");
  411. }
  412. struct mm_struct *mm = current->mm;
  413. if (mm != NULL) {
  414. lcr3(boot_cr3);
  415. if (mm_count_dec(mm) == 0) {
  416. exit_mmap(mm);
  417. put_pgdir(mm);
  418. mm_destroy(mm);
  419. }
  420. current->mm = NULL;
  421. }
  422. current->state = PROC_ZOMBIE;
  423. current->exit_code = error_code;
  424. bool intr_flag;
  425. struct proc_struct *proc;
  426. local_intr_save(intr_flag);
  427. {
  428. proc = current->parent;
  429. if (proc->wait_state == WT_CHILD) {
  430. wakeup_proc(proc);
  431. }
  432. while (current->cptr != NULL) {
  433. proc = current->cptr;
  434. current->cptr = proc->optr;
  435. proc->yptr = NULL;
  436. if ((proc->optr = initproc->cptr) != NULL) {
  437. initproc->cptr->yptr = proc;
  438. }
  439. proc->parent = initproc;
  440. initproc->cptr = proc;
  441. if (proc->state == PROC_ZOMBIE) {
  442. if (initproc->wait_state == WT_CHILD) {
  443. wakeup_proc(initproc);
  444. }
  445. }
  446. }
  447. }
  448. local_intr_restore(intr_flag);
  449. schedule();
  450. panic("do_exit will not return!! %d.\n", current->pid);
  451. }
  452. /* load_icode - load the content of binary program(ELF format) as the new content of current process
  453. * @binary: the memory addr of the content of binary program
  454. * @size: the size of the content of binary program
  455. */
  456. static int
  457. load_icode(unsigned char *binary, size_t size) {
  458. if (current->mm != NULL) {
  459. panic("load_icode: current->mm must be empty.\n");
  460. }
  461. int ret = -E_NO_MEM;
  462. struct mm_struct *mm;
  463. //(1) create a new mm for current process
  464. if ((mm = mm_create()) == NULL) {
  465. goto bad_mm;
  466. }
  467. //(2) create a new PDT, and mm->pgdir= kernel virtual addr of PDT
  468. if (setup_pgdir(mm) != 0) {
  469. goto bad_pgdir_cleanup_mm;
  470. }
  471. //(3) copy TEXT/DATA section, build BSS parts in binary to memory space of process
  472. struct Page *page;
  473. //(3.1) get the file header of the bianry program (ELF format)
  474. struct elfhdr *elf = (struct elfhdr *)binary;
  475. //(3.2) get the entry of the program section headers of the bianry program (ELF format)
  476. struct proghdr *ph = (struct proghdr *)(binary + elf->e_phoff);
  477. //(3.3) This program is valid?
  478. if (elf->e_magic != ELF_MAGIC) {
  479. ret = -E_INVAL_ELF;
  480. goto bad_elf_cleanup_pgdir;
  481. }
  482. uint32_t vm_flags, perm;
  483. struct proghdr *ph_end = ph + elf->e_phnum;
  484. for (; ph < ph_end; ph ++) {
  485. //(3.4) find every program section headers
  486. if (ph->p_type != ELF_PT_LOAD) {
  487. continue ;
  488. }
  489. if (ph->p_filesz > ph->p_memsz) {
  490. ret = -E_INVAL_ELF;
  491. goto bad_cleanup_mmap;
  492. }
  493. if (ph->p_filesz == 0) {
  494. continue ;
  495. }
  496. //(3.5) call mm_map fun to setup the new vma ( ph->p_va, ph->p_memsz)
  497. vm_flags = 0, perm = PTE_U;
  498. if (ph->p_flags & ELF_PF_X) vm_flags |= VM_EXEC;
  499. if (ph->p_flags & ELF_PF_W) vm_flags |= VM_WRITE;
  500. if (ph->p_flags & ELF_PF_R) vm_flags |= VM_READ;
  501. if (vm_flags & VM_WRITE) perm |= PTE_W;
  502. if ((ret = mm_map(mm, ph->p_va, ph->p_memsz, vm_flags, NULL)) != 0) {
  503. goto bad_cleanup_mmap;
  504. }
  505. unsigned char *from = binary + ph->p_offset;
  506. size_t off, size;
  507. uintptr_t start = ph->p_va, end, la = ROUNDDOWN(start, PGSIZE);
  508. ret = -E_NO_MEM;
  509. //(3.6) alloc memory, and copy the contents of every program section (from, from+end) to process's memory (la, la+end)
  510. end = ph->p_va + ph->p_filesz;
  511. //(3.6.1) copy TEXT/DATA section of bianry program
  512. while (start < end) {
  513. if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
  514. goto bad_cleanup_mmap;
  515. }
  516. off = start - la, size = PGSIZE - off, la += PGSIZE;
  517. if (end < la) {
  518. size -= la - end;
  519. }
  520. memcpy(page2kva(page) + off, from, size);
  521. start += size, from += size;
  522. }
  523. //(3.6.2) build BSS section of binary program
  524. end = ph->p_va + ph->p_memsz;
  525. if (start < la) {
  526. /* ph->p_memsz == ph->p_filesz */
  527. if (start == end) {
  528. continue ;
  529. }
  530. off = start + PGSIZE - la, size = PGSIZE - off;
  531. if (end < la) {
  532. size -= la - end;
  533. }
  534. memset(page2kva(page) + off, 0, size);
  535. start += size;
  536. assert((end < la && start == end) || (end >= la && start == la));
  537. }
  538. while (start < end) {
  539. if ((page = pgdir_alloc_page(mm->pgdir, la, perm)) == NULL) {
  540. goto bad_cleanup_mmap;
  541. }
  542. off = start - la, size = PGSIZE - off, la += PGSIZE;
  543. if (end < la) {
  544. size -= la - end;
  545. }
  546. memset(page2kva(page) + off, 0, size);
  547. start += size;
  548. }
  549. }
  550. //(4) build user stack memory
  551. vm_flags = VM_READ | VM_WRITE | VM_STACK;
  552. if ((ret = mm_map(mm, USTACKTOP - USTACKSIZE, USTACKSIZE, vm_flags, NULL)) != 0) {
  553. goto bad_cleanup_mmap;
  554. }
  555. assert(pgdir_alloc_page(mm->pgdir, USTACKTOP-PGSIZE , PTE_USER) != NULL);
  556. assert(pgdir_alloc_page(mm->pgdir, USTACKTOP-2*PGSIZE , PTE_USER) != NULL);
  557. assert(pgdir_alloc_page(mm->pgdir, USTACKTOP-3*PGSIZE , PTE_USER) != NULL);
  558. assert(pgdir_alloc_page(mm->pgdir, USTACKTOP-4*PGSIZE , PTE_USER) != NULL);
  559. //(5) set current process's mm, sr3, and set CR3 reg = physical addr of Page Directory
  560. mm_count_inc(mm);
  561. current->mm = mm;
  562. current->cr3 = PADDR(mm->pgdir);
  563. lcr3(PADDR(mm->pgdir));
  564. //(6) setup trapframe for user environment
  565. struct trapframe *tf = current->tf;
  566. memset(tf, 0, sizeof(struct trapframe));
  567. /* LAB5:EXERCISE1 YOUR CODE
  568. * should set tf_cs,tf_ds,tf_es,tf_ss,tf_esp,tf_eip,tf_eflags
  569. * NOTICE: If we set trapframe correctly, then the user level process can return to USER MODE from kernel. So
  570. * tf_cs should be USER_CS segment (see memlayout.h)
  571. * tf_ds=tf_es=tf_ss should be USER_DS segment
  572. * tf_esp should be the top addr of user stack (USTACKTOP)
  573. * tf_eip should be the entry point of this binary program (elf->e_entry)
  574. * tf_eflags should be set to enable computer to produce Interrupt
  575. */
  576. tf->tf_cs = USER_CS;
  577. tf->tf_ds = tf->tf_es = tf->tf_ss = USER_DS;
  578. tf->tf_esp = USTACKTOP;
  579. tf->tf_eip = elf->e_entry;
  580. tf->tf_eflags = FL_IF;
  581. ret = 0;
  582. out:
  583. return ret;
  584. bad_cleanup_mmap:
  585. exit_mmap(mm);
  586. bad_elf_cleanup_pgdir:
  587. put_pgdir(mm);
  588. bad_pgdir_cleanup_mm:
  589. mm_destroy(mm);
  590. bad_mm:
  591. goto out;
  592. }
  593. // do_execve - call exit_mmap(mm)&put_pgdir(mm) to reclaim memory space of current process
  594. // - call load_icode to setup new memory space accroding binary prog.
  595. int
  596. do_execve(const char *name, size_t len, unsigned char *binary, size_t size) {
  597. struct mm_struct *mm = current->mm;
  598. if (!user_mem_check(mm, (uintptr_t)name, len, 0)) {
  599. return -E_INVAL;
  600. }
  601. if (len > PROC_NAME_LEN) {
  602. len = PROC_NAME_LEN;
  603. }
  604. char local_name[PROC_NAME_LEN + 1];
  605. memset(local_name, 0, sizeof(local_name));
  606. memcpy(local_name, name, len);
  607. if (mm != NULL) {
  608. lcr3(boot_cr3);
  609. if (mm_count_dec(mm) == 0) {
  610. exit_mmap(mm);
  611. put_pgdir(mm);
  612. mm_destroy(mm);
  613. }
  614. current->mm = NULL;
  615. }
  616. int ret;
  617. if ((ret = load_icode(binary, size)) != 0) {
  618. goto execve_exit;
  619. }
  620. set_proc_name(current, local_name);
  621. return 0;
  622. execve_exit:
  623. do_exit(ret);
  624. panic("already exit: %e.\n", ret);
  625. }
  626. // do_yield - ask the scheduler to reschedule
  627. int
  628. do_yield(void) {
  629. current->need_resched = 1;
  630. return 0;
  631. }
  632. // do_wait - wait one OR any children with PROC_ZOMBIE state, and free memory space of kernel stack
  633. // - proc struct of this child.
  634. // NOTE: only after do_wait function, all resources of the child proces are free.
  635. int
  636. do_wait(int pid, int *code_store) {
  637. struct mm_struct *mm = current->mm;
  638. if (code_store != NULL) {
  639. if (!user_mem_check(mm, (uintptr_t)code_store, sizeof(int), 1)) {
  640. return -E_INVAL;
  641. }
  642. }
  643. struct proc_struct *proc;
  644. bool intr_flag, haskid;
  645. repeat:
  646. haskid = 0;
  647. if (pid != 0) {
  648. proc = find_proc(pid);
  649. if (proc != NULL && proc->parent == current) {
  650. haskid = 1;
  651. if (proc->state == PROC_ZOMBIE) {
  652. goto found;
  653. }
  654. }
  655. }
  656. else {
  657. proc = current->cptr;
  658. for (; proc != NULL; proc = proc->optr) {
  659. haskid = 1;
  660. if (proc->state == PROC_ZOMBIE) {
  661. goto found;
  662. }
  663. }
  664. }
  665. if (haskid) {
  666. current->state = PROC_SLEEPING;
  667. current->wait_state = WT_CHILD;
  668. schedule();
  669. if (current->flags & PF_EXITING) {
  670. do_exit(-E_KILLED);
  671. }
  672. goto repeat;
  673. }
  674. return -E_BAD_PROC;
  675. found:
  676. if (proc == idleproc || proc == initproc) {
  677. panic("wait idleproc or initproc.\n");
  678. }
  679. if (code_store != NULL) {
  680. *code_store = proc->exit_code;
  681. }
  682. local_intr_save(intr_flag);
  683. {
  684. unhash_proc(proc);
  685. remove_links(proc);
  686. }
  687. local_intr_restore(intr_flag);
  688. put_kstack(proc);
  689. kfree(proc);
  690. return 0;
  691. }
  692. // do_kill - kill process with pid by set this process's flags with PF_EXITING
  693. int
  694. do_kill(int pid) {
  695. struct proc_struct *proc;
  696. if ((proc = find_proc(pid)) != NULL) {
  697. if (!(proc->flags & PF_EXITING)) {
  698. proc->flags |= PF_EXITING;
  699. if (proc->wait_state & WT_INTERRUPTED) {
  700. wakeup_proc(proc);
  701. }
  702. return 0;
  703. }
  704. return -E_KILLED;
  705. }
  706. return -E_INVAL;
  707. }
  708. // kernel_execve - do SYS_exec syscall to exec a user program called by user_main kernel_thread
  709. static int
  710. kernel_execve(const char *name, unsigned char *binary, size_t size) {
  711. int ret, len = strlen(name);
  712. asm volatile (
  713. "int %1;"
  714. : "=a" (ret)
  715. : "i" (T_SYSCALL), "0" (SYS_exec), "d" (name), "c" (len), "b" (binary), "D" (size)
  716. : "memory");
  717. return ret;
  718. }
  719. #define __KERNEL_EXECVE(name, binary, size) ({ \
  720. cprintf("kernel_execve: pid = %d, name = \"%s\".\n", \
  721. current->pid, name); \
  722. kernel_execve(name, binary, (size_t)(size)); \
  723. })
  724. #define KERNEL_EXECVE(x) ({ \
  725. extern unsigned char _binary_obj___user_##x##_out_start[], \
  726. _binary_obj___user_##x##_out_size[]; \
  727. __KERNEL_EXECVE(#x, _binary_obj___user_##x##_out_start, \
  728. _binary_obj___user_##x##_out_size); \
  729. })
  730. #define __KERNEL_EXECVE2(x, xstart, xsize) ({ \
  731. extern unsigned char xstart[], xsize[]; \
  732. __KERNEL_EXECVE(#x, xstart, (size_t)xsize); \
  733. })
  734. #define KERNEL_EXECVE2(x, xstart, xsize) __KERNEL_EXECVE2(x, xstart, xsize)
  735. // user_main - kernel thread used to exec a user program
  736. static int
  737. user_main(void *arg) {
  738. #ifdef TEST
  739. KERNEL_EXECVE2(TEST, TESTSTART, TESTSIZE);
  740. #else
  741. KERNEL_EXECVE(exit);
  742. #endif
  743. panic("user_main execve failed.\n");
  744. }
  745. // init_main - the second kernel thread used to create user_main kernel threads
  746. static int
  747. init_main(void *arg) {
  748. size_t nr_free_pages_store = nr_free_pages();
  749. size_t kernel_allocated_store = kallocated();
  750. int pid = kernel_thread(user_main, NULL, 0);
  751. if (pid <= 0) {
  752. panic("create user_main failed.\n");
  753. }
  754. while (do_wait(0, NULL) == 0) {
  755. schedule();
  756. }
  757. cprintf("all user-mode processes have quit.\n");
  758. assert(initproc->cptr == NULL && initproc->yptr == NULL && initproc->optr == NULL);
  759. assert(nr_process == 2);
  760. assert(list_next(&proc_list) == &(initproc->list_link));
  761. assert(list_prev(&proc_list) == &(initproc->list_link));
  762. assert(nr_free_pages_store == nr_free_pages());
  763. assert(kernel_allocated_store == kallocated());
  764. cprintf("init check memory pass.\n");
  765. return 0;
  766. }
  767. // proc_init - set up the first kernel thread idleproc "idle" by itself and
  768. // - create the second kernel thread init_main
  769. void
  770. proc_init(void) {
  771. int i;
  772. list_init(&proc_list);
  773. for (i = 0; i < HASH_LIST_SIZE; i ++) {
  774. list_init(hash_list + i);
  775. }
  776. if ((idleproc = alloc_proc()) == NULL) {
  777. panic("cannot alloc idleproc.\n");
  778. }
  779. idleproc->pid = 0;
  780. idleproc->state = PROC_RUNNABLE;
  781. idleproc->kstack = (uintptr_t)bootstack;
  782. idleproc->need_resched = 1;
  783. set_proc_name(idleproc, "idle");
  784. nr_process ++;
  785. current = idleproc;
  786. int pid = kernel_thread(init_main, NULL, 0);
  787. if (pid <= 0) {
  788. panic("create init_main failed.\n");
  789. }
  790. initproc = find_proc(pid);
  791. set_proc_name(initproc, "init");
  792. assert(idleproc != NULL && idleproc->pid == 0);
  793. assert(initproc != NULL && initproc->pid == 1);
  794. }
  795. // cpu_idle - at the end of kern_init, the first kernel thread idleproc will do below works
  796. void
  797. cpu_idle(void) {
  798. while (1) {
  799. if (current->need_resched) {
  800. schedule();
  801. }
  802. }
  803. }