Browse Source

change slab based kmalloc to slob based kmalloc

main
chyyuu 11 years ago
parent
commit
8a6deabc73
39 changed files with 1290 additions and 5784 deletions
  1. +0
    -528
      code/lab4/kern/libs/rb_tree.c
  2. +0
    -32
      code/lab4/kern/libs/rb_tree.h
  3. +253
    -578
      code/lab4/kern/mm/kmalloc.c
  4. +3
    -3
      code/lab4/kern/mm/kmalloc.h
  5. +0
    -5
      code/lab4/kern/mm/memlayout.h
  6. +0
    -4
      code/lab4/kern/mm/vmm.c
  7. +2
    -2
      code/lab4/tools/grade.sh
  8. +0
    -528
      code/lab5/kern/libs/rb_tree.c
  9. +0
    -32
      code/lab5/kern/libs/rb_tree.h
  10. +253
    -583
      code/lab5/kern/mm/kmalloc.c
  11. +3
    -3
      code/lab5/kern/mm/kmalloc.h
  12. +0
    -5
      code/lab5/kern/mm/memlayout.h
  13. +0
    -4
      code/lab5/kern/mm/vmm.c
  14. +1
    -2
      code/lab5/kern/process/proc.c
  15. +1
    -1
      code/lab5/tools/grade.sh
  16. +0
    -528
      code/lab6/kern/libs/rb_tree.c
  17. +0
    -32
      code/lab6/kern/libs/rb_tree.h
  18. +253
    -583
      code/lab6/kern/mm/kmalloc.c
  19. +3
    -3
      code/lab6/kern/mm/kmalloc.h
  20. +0
    -5
      code/lab6/kern/mm/memlayout.h
  21. +0
    -4
      code/lab6/kern/mm/vmm.c
  22. +1
    -2
      code/lab6/kern/process/proc.c
  23. +1
    -1
      code/lab6/tools/grade.sh
  24. +0
    -528
      code/lab7/kern/libs/rb_tree.c
  25. +0
    -32
      code/lab7/kern/libs/rb_tree.h
  26. +253
    -583
      code/lab7/kern/mm/kmalloc.c
  27. +3
    -3
      code/lab7/kern/mm/kmalloc.h
  28. +0
    -5
      code/lab7/kern/mm/memlayout.h
  29. +0
    -4
      code/lab7/kern/mm/vmm.c
  30. +1
    -2
      code/lab7/kern/process/proc.c
  31. +1
    -1
      code/lab7/tools/grade.sh
  32. +0
    -528
      code/lab8/kern/libs/rb_tree.c
  33. +0
    -32
      code/lab8/kern/libs/rb_tree.h
  34. +253
    -583
      code/lab8/kern/mm/kmalloc.c
  35. +3
    -3
      code/lab8/kern/mm/kmalloc.h
  36. +0
    -5
      code/lab8/kern/mm/memlayout.h
  37. +0
    -4
      code/lab8/kern/mm/vmm.c
  38. +1
    -2
      code/lab8/kern/process/proc.c
  39. +1
    -1
      code/lab8/tools/grade.sh

+ 0
- 528
code/lab4/kern/libs/rb_tree.c View File

@ -1,528 +0,0 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <kmalloc.h>
#include <rb_tree.h>
#include <assert.h>
/* rb_node_create - create a new rb_node */
static inline rb_node *
rb_node_create(void) {
return kmalloc(sizeof(rb_node));
}
/* rb_tree_empty - tests if tree is empty */
static inline bool
rb_tree_empty(rb_tree *tree) {
rb_node *nil = tree->nil, *root = tree->root;
return root->left == nil;
}
/* *
* rb_tree_create - creates a new red-black tree, the 'compare' function
* is required and returns 'NULL' if failed.
*
* Note that, root->left should always point to the node that is the root
* of the tree. And nil points to a 'NULL' node which should always be
* black and may have arbitrary children and parent node.
* */
rb_tree *
rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2)) {
assert(compare != NULL);
rb_tree *tree;
rb_node *nil, *root;
if ((tree = kmalloc(sizeof(rb_tree))) == NULL) {
goto bad_tree;
}
tree->compare = compare;
if ((nil = rb_node_create()) == NULL) {
goto bad_node_cleanup_tree;
}
nil->parent = nil->left = nil->right = nil;
nil->red = 0;
tree->nil = nil;
if ((root = rb_node_create()) == NULL) {
goto bad_node_cleanup_nil;
}
root->parent = root->left = root->right = nil;
root->red = 0;
tree->root = root;
return tree;
bad_node_cleanup_nil:
kfree(nil);
bad_node_cleanup_tree:
kfree(tree);
bad_tree:
return NULL;
}
/* *
* FUNC_ROTATE - rotates as described in "Introduction to Algorithm".
*
* For example, FUNC_ROTATE(rb_left_rotate, left, right) can be expaned to a
* left-rotate function, which requires an red-black 'tree' and a node 'x'
* to be rotated on. Basically, this function, named rb_left_rotate, makes the
* parent of 'x' be the left child of 'x', 'x' the parent of its parent before
* rotation and finally fixes other nodes accordingly.
*
* FUNC_ROTATE(xx, left, right) means left-rotate,
* and FUNC_ROTATE(xx, right, left) means right-rotate.
* */
#define FUNC_ROTATE(func_name, _left, _right) \
static void \
func_name(rb_tree *tree, rb_node *x) { \
rb_node *nil = tree->nil, *y = x->_right; \
assert(x != tree->root && x != nil && y != nil); \
x->_right = y->_left; \
if (y->_left != nil) { \
y->_left->parent = x; \
} \
y->parent = x->parent; \
if (x == x->parent->_left) { \
x->parent->_left = y; \
} \
else { \
x->parent->_right = y; \
} \
y->_left = x; \
x->parent = y; \
assert(!(nil->red)); \
}
FUNC_ROTATE(rb_left_rotate, left, right);
FUNC_ROTATE(rb_right_rotate, right, left);
#undef FUNC_ROTATE
#define COMPARE(tree, node1, node2) \
((tree))->compare((node1), (node2))
/* *
* rb_insert_binary - insert @node to red-black @tree as if it were
* a regular binary tree. This function is only intended to be called
* by function rb_insert.
* */
static inline void
rb_insert_binary(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node, *nil = tree->nil, *root = tree->root;
z->left = z->right = nil;
y = root, x = y->left;
while (x != nil) {
y = x;
x = (COMPARE(tree, x, node) > 0) ? x->left : x->right;
}
z->parent = y;
if (y == root || COMPARE(tree, y, z) > 0) {
y->left = z;
}
else {
y->right = z;
}
}
/* rb_insert - insert a node to red-black tree */
void
rb_insert(rb_tree *tree, rb_node *node) {
rb_insert_binary(tree, node);
node->red = 1;
rb_node *x = node, *y;
#define RB_INSERT_SUB(_left, _right) \
do { \
y = x->parent->parent->_right; \
if (y->red) { \
x->parent->red = 0; \
y->red = 0; \
x->parent->parent->red = 1; \
x = x->parent->parent; \
} \
else { \
if (x == x->parent->_right) { \
x = x->parent; \
rb_##_left##_rotate(tree, x); \
} \
x->parent->red = 0; \
x->parent->parent->red = 1; \
rb_##_right##_rotate(tree, x->parent->parent); \
} \
} while (0)
while (x->parent->red) {
if (x->parent == x->parent->parent->left) {
RB_INSERT_SUB(left, right);
}
else {
RB_INSERT_SUB(right, left);
}
}
tree->root->left->red = 0;
assert(!(tree->nil->red) && !(tree->root->red));
#undef RB_INSERT_SUB
}
/* *
* rb_tree_successor - returns the successor of @node, or nil
* if no successor exists. Make sure that @node must belong to @tree,
* and this function should only be called by rb_node_prev.
* */
static inline rb_node *
rb_tree_successor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->right) != nil) {
while (y->left != nil) {
y = y->left;
}
return y;
}
else {
y = x->parent;
while (x == y->right) {
x = y, y = y->parent;
}
if (y == tree->root) {
return nil;
}
return y;
}
}
/* *
* rb_tree_predecessor - returns the predecessor of @node, or nil
* if no predecessor exists, likes rb_tree_successor.
* */
static inline rb_node *
rb_tree_predecessor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->left) != nil) {
while (y->right != nil) {
y = y->right;
}
return y;
}
else {
y = x->parent;
while (x == y->left) {
if (y == tree->root) {
return nil;
}
x = y, y = y->parent;
}
return y;
}
}
/* *
* rb_search - returns a node with value 'equal' to @key (according to
* function @compare). If there're multiple nodes with value 'equal' to @key,
* the functions returns the one highest in the tree.
* */
rb_node *
rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key) {
rb_node *nil = tree->nil, *node = tree->root->left;
int r;
while (node != nil && (r = compare(node, key)) != 0) {
node = (r > 0) ? node->left : node->right;
}
return (node != nil) ? node : NULL;
}
/* *
* rb_delete_fixup - performs rotations and changes colors to restore
* red-black properties after a node is deleted.
* */
static void
rb_delete_fixup(rb_tree *tree, rb_node *node) {
rb_node *x = node, *w, *root = tree->root->left;
#define RB_DELETE_FIXUP_SUB(_left, _right) \
do { \
w = x->parent->_right; \
if (w->red) { \
w->red = 0; \
x->parent->red = 1; \
rb_##_left##_rotate(tree, x->parent); \
w = x->parent->_right; \
} \
if (!w->_left->red && !w->_right->red) { \
w->red = 1; \
x = x->parent; \
} \
else { \
if (!w->_right->red) { \
w->_left->red = 0; \
w->red = 1; \
rb_##_right##_rotate(tree, w); \
w = x->parent->_right; \
} \
w->red = x->parent->red; \
x->parent->red = 0; \
w->_right->red = 0; \
rb_##_left##_rotate(tree, x->parent); \
x = root; \
} \
} while (0)
while (x != root && !x->red) {
if (x == x->parent->left) {
RB_DELETE_FIXUP_SUB(left, right);
}
else {
RB_DELETE_FIXUP_SUB(right, left);
}
}
x->red = 0;
#undef RB_DELETE_FIXUP_SUB
}
/* *
* rb_delete - deletes @node from @tree, and calls rb_delete_fixup to
* restore red-black properties.
* */
void
rb_delete(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node;
rb_node *nil = tree->nil, *root = tree->root;
y = (z->left == nil || z->right == nil) ? z : rb_tree_successor(tree, z);
x = (y->left != nil) ? y->left : y->right;
assert(y != root && y != nil);
x->parent = y->parent;
if (y == y->parent->left) {
y->parent->left = x;
}
else {
y->parent->right = x;
}
bool need_fixup = !(y->red);
if (y != z) {
if (z == z->parent->left) {
z->parent->left = y;
}
else {
z->parent->right = y;
}
z->left->parent = z->right->parent = y;
*y = *z;
}
if (need_fixup) {
rb_delete_fixup(tree, x);
}
}
/* rb_tree_destroy - destroy a tree and free memory */
void
rb_tree_destroy(rb_tree *tree) {
kfree(tree->root);
kfree(tree->nil);
kfree(tree);
}
/* *
* rb_node_prev - returns the predecessor node of @node in @tree,
* or 'NULL' if no predecessor exists.
* */
rb_node *
rb_node_prev(rb_tree *tree, rb_node *node) {
rb_node *prev = rb_tree_predecessor(tree, node);
return (prev != tree->nil) ? prev : NULL;
}
/* *
* rb_node_next - returns the successor node of @node in @tree,
* or 'NULL' if no successor exists.
* */
rb_node *
rb_node_next(rb_tree *tree, rb_node *node) {
rb_node *next = rb_tree_successor(tree, node);
return (next != tree->nil) ? next : NULL;
}
/* rb_node_root - returns the root node of a @tree, or 'NULL' if tree is empty */
rb_node *
rb_node_root(rb_tree *tree) {
rb_node *node = tree->root->left;
return (node != tree->nil) ? node : NULL;
}
/* rb_node_left - gets the left child of @node, or 'NULL' if no such node */
rb_node *
rb_node_left(rb_tree *tree, rb_node *node) {
rb_node *left = node->left;
return (left != tree->nil) ? left : NULL;
}
/* rb_node_right - gets the right child of @node, or 'NULL' if no such node */
rb_node *
rb_node_right(rb_tree *tree, rb_node *node) {
rb_node *right = node->right;
return (right != tree->nil) ? right : NULL;
}
int
check_tree(rb_tree *tree, rb_node *node) {
rb_node *nil = tree->nil;
if (node == nil) {
assert(!node->red);
return 1;
}
if (node->left != nil) {
assert(COMPARE(tree, node, node->left) >= 0);
assert(node->left->parent == node);
}
if (node->right != nil) {
assert(COMPARE(tree, node, node->right) <= 0);
assert(node->right->parent == node);
}
if (node->red) {
assert(!node->left->red && !node->right->red);
}
int hb_left = check_tree(tree, node->left);
int hb_right = check_tree(tree, node->right);
assert(hb_left == hb_right);
int hb = hb_left;
if (!node->red) {
hb ++;
}
return hb;
}
static void *
check_safe_kmalloc(size_t size) {
void *ret = kmalloc(size);
assert(ret != NULL);
return ret;
}
struct check_data {
long data;
rb_node rb_link;
};
#define rbn2data(node) \
(to_struct(node, struct check_data, rb_link))
static inline int
check_compare1(rb_node *node1, rb_node *node2) {
return rbn2data(node1)->data - rbn2data(node2)->data;
}
static inline int
check_compare2(rb_node *node, void *key) {
return rbn2data(node)->data - (long)key;
}
void
check_rb_tree(void) {
rb_tree *tree = rb_tree_create(check_compare1);
assert(tree != NULL);
rb_node *nil = tree->nil, *root = tree->root;
assert(!nil->red && root->left == nil);
int total = 1000;
struct check_data **all = check_safe_kmalloc(sizeof(struct check_data *) * total);
long i;
for (i = 0; i < total; i ++) {
all[i] = check_safe_kmalloc(sizeof(struct check_data));
all[i]->data = i;
}
int *mark = check_safe_kmalloc(sizeof(int) * total);
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
int j = (rand() % (total - i)) + i;
struct check_data *z = all[i];
all[i] = all[j];
all[j] = z;
}
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_node *node;
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)(all[i]->data));
assert(node != NULL && node == &(all[i]->rb_link));
}
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)i);
assert(node != NULL && rbn2data(node)->data == i);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(!nil->red && root->left == nil);
long max = 32;
if (max > total) {
max = total;
}
for (i = 0; i < max; i ++) {
all[i]->data = max;
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
for (i = 0; i < max; i ++) {
node = rb_search(tree, check_compare2, (void *)max);
assert(node != NULL && rbn2data(node)->data == max);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(rb_tree_empty(tree));
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_tree_destroy(tree);
for (i = 0; i < total; i ++) {
kfree(all[i]);
}
kfree(mark);
kfree(all);
}

+ 0
- 32
code/lab4/kern/libs/rb_tree.h View File

@ -1,32 +0,0 @@
#ifndef __KERN_LIBS_RB_TREE_H__
#define __KERN_LIBS_RB_TREE_H__
#include <defs.h>
typedef struct rb_node {
bool red; // if red = 0, it's a black node
struct rb_node *parent;
struct rb_node *left, *right;
} rb_node;
typedef struct rb_tree {
// compare function should return -1 if *node1 < *node2, 1 if *node1 > *node2, and 0 otherwise
int (*compare)(rb_node *node1, rb_node *node2);
struct rb_node *nil, *root;
} rb_tree;
rb_tree *rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2));
void rb_tree_destroy(rb_tree *tree);
void rb_insert(rb_tree *tree, rb_node *node);
void rb_delete(rb_tree *tree, rb_node *node);
rb_node *rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key);
rb_node *rb_node_prev(rb_tree *tree, rb_node *node);
rb_node *rb_node_next(rb_tree *tree, rb_node *node);
rb_node *rb_node_root(rb_tree *tree);
rb_node *rb_node_left(rb_tree *tree, rb_node *node);
rb_node *rb_node_right(rb_tree *tree, rb_node *node);
void check_rb_tree(void);
#endif /* !__KERN_LIBS_RBTREE_H__ */

+ 253
- 578
code/lab4/kern/mm/kmalloc.c View File

@ -6,630 +6,305 @@
#include <sync.h>
#include <pmm.h>
#include <stdio.h>
#include <rb_tree.h>
/* The slab allocator used in ucore is based on an algorithm first introduced by
Jeff Bonwick for the SunOS operating system. The paper can be download from
http://citeseer.ist.psu.edu/bonwick94slab.html
An implementation of the Slab Allocator as described in outline in;
UNIX Internals: The New Frontiers by Uresh Vahalia
Pub: Prentice Hall ISBN 0-13-101908-2
Within a kernel, a considerable amount of memory is allocated for a finite set
of objects such as file descriptors and other common structures. Jeff found that
the amount of time required to initialize a regular object in the kernel exceeded
the amount of time required to allocate and deallocate it. His conclusion was
that instead of freeing the memory back to a global pool, he would have the memory
remain initialized for its intended purpose.
In our simple slab implementation, the the high-level organization of the slab
structures is simplied. At the highest level is an array slab_cache[SLAB_CACHE_NUM],
and each array element is a slab_cache which has slab chains. Each slab_cache has
two list, one list chains the full allocated slab, and another list chains the notfull
allocated(maybe empty) slab. And each slab has fixed number(2^n) of pages. In each
slab, there are a lot of objects (such as ) with same fixed size(32B ~ 128KB).
+----------------------------------+
| slab_cache[0] for 0~32B obj |
+----------------------------------+
| slab_cache[1] for 33B~64B obj |-->lists for slabs
+----------------------------------+ |
| slab_cache[2] for 65B~128B obj | |
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
+----------------------------------+ |
| slab_cache[12]for 64KB~128KB obj | |
+----------------------------------+ |
|
slabs_full/slabs_not +---------------------+
-<-----------<----------<-+
| | |
slab1 slab2 slab3...
|
|-------|-------|
pages1 pages2 pages3...
|
|
|
slab_t+n*bufctl_t+obj1-obj2-obj3...objn (the size of obj is small)
|
OR
|
obj1-obj2-obj3...objn WITH slab_t+n*bufctl_t in another slab (the size of obj is BIG)
The important functions are:
kmem_cache_grow(kmem_cache_t *cachep)
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp)
kmalloc(size_t size): used by outside functions need dynamicly get memory
kfree(void *objp): used by outside functions need dynamicly release memory
*/
#define BUFCTL_END 0xFFFFFFFFL // the signature of the last bufctl
#define SLAB_LIMIT 0xFFFFFFFEL // the max value of obj number
typedef size_t kmem_bufctl_t; //the index of obj in slab
typedef struct slab_s {
list_entry_t slab_link; // the list entry linked to kmem_cache list
void *s_mem; // the kernel virtual address of the first obj in slab
size_t inuse; // the number of allocated objs
size_t offset; // the first obj's offset value in slab
kmem_bufctl_t free; // the first free obj's index in slab
} slab_t;
// get the slab address according to the link element (see list.h)
#define le2slab(le, member) \
to_struct((le), slab_t, member)
typedef struct kmem_cache_s kmem_cache_t;
struct kmem_cache_s {
list_entry_t slabs_full; // list for fully allocated slabs
list_entry_t slabs_notfull; // list for not-fully allocated slabs
size_t objsize; // the fixed size of obj
size_t num; // number of objs per slab
size_t offset; // this first obj's offset in slab
bool off_slab; // the control part of slab in slab or not.
/* order of pages per slab (2^n) */
size_t page_order;
kmem_cache_t *slab_cachep;
};
#define MIN_SIZE_ORDER 5 // 32
#define MAX_SIZE_ORDER 17 // 128k
#define SLAB_CACHE_NUM (MAX_SIZE_ORDER - MIN_SIZE_ORDER + 1)
/*
* SLOB Allocator: Simple List Of Blocks
*
* Matt Mackall <mpm@selenic.com> 12/30/03
*
* How SLOB works:
*
* The core of SLOB is a traditional K&R style heap allocator, with
* support for returning aligned objects. The granularity of this
* allocator is 8 bytes on x86, though it's perhaps possible to reduce
* this to 4 if it's deemed worth the effort. The slob heap is a
* singly-linked list of pages from __get_free_page, grown on demand
* and allocation from the heap is currently first-fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are 8-byte aligned and prepended with a 8-byte header.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* __get_free_pages directly so that it can return page-aligned blocks
* and keeps a linked list of such pages and their orders. These
* objects are detected in kfree() by their page alignment.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
* destructors for every SLAB allocation. Objects are returned with
* the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
* set, in which case the low-level allocator will fragment blocks to
* create the proper alignment. Again, objects of page-size or greater
* are allocated by calling __get_free_pages. As SLAB objects know
* their size, no separate size bookkeeping is necessary and there is
* essentially no allocation space overhead.
*/
//some helper
#define spin_lock_irqsave(l, f) local_intr_save(f)
#define spin_unlock_irqrestore(l, f) local_intr_restore(f)
typedef unsigned int gfp_t;
#ifndef PAGE_SIZE
#define PAGE_SIZE PGSIZE
#endif
#ifndef L1_CACHE_BYTES
#define L1_CACHE_BYTES 64
#endif
#ifndef ALIGN
#define ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
#endif
struct slob_block {
int units;
struct slob_block *next;
};
typedef struct slob_block slob_t;
static kmem_cache_t slab_cache[SLAB_CACHE_NUM];
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_ALIGN L1_CACHE_BYTES
static void init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align);
static void check_slab(void);
struct bigblock {
int order;
void *pages;
struct bigblock *next;
};
typedef struct bigblock bigblock_t;
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
//slab_init - call init_kmem_cache function to reset the slab_cache array
static void
slab_init(void) {
size_t i;
//the align bit for obj in slab. 2^n could be better for performance
size_t align = 16;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
init_kmem_cache(slab_cache + i, 1 << (i + MIN_SIZE_ORDER), align);
}
check_slab();
}
inline void
kmalloc_init(void) {
slab_init();
cprintf("kmalloc_init() succeeded!\n");
}
//slab_allocated - summary the total size of allocated objs
static size_t
slab_allocated(void) {
size_t total = 0;
int i;
bool intr_flag;
local_intr_save(intr_flag);
{
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
list_entry_t *list, *le;
list = le = &(cachep->slabs_full);
while ((le = list_next(le)) != list) {
total += cachep->num * cachep->objsize;
}
list = le = &(cachep->slabs_notfull);
while ((le = list_next(le)) != list) {
slab_t *slabp = le2slab(le, slab_link);
total += slabp->inuse * cachep->objsize;
}
}
}
local_intr_restore(intr_flag);
return total;
static void* __slob_get_free_pages(gfp_t gfp, int order)
{
struct Page * page = alloc_pages(1 << order);
if(!page)
return NULL;
return page2kva(page);
}
// slab_mgmt_size - get the size of slab control area (slab_t+num*kmem_bufctl_t)
static size_t
slab_mgmt_size(size_t num, size_t align) {
return ROUNDUP(sizeof(slab_t) + num * sizeof(kmem_bufctl_t), align);
}
#define __slob_get_free_page(gfp) __slob_get_free_pages(gfp, 0)
// cacahe_estimate - estimate the number of objs in a slab
static void
cache_estimate(size_t order, size_t objsize, size_t align, bool off_slab, size_t *remainder, size_t *num) {
size_t nr_objs, mgmt_size;
size_t slab_size = (PGSIZE << order);
if (off_slab) {
mgmt_size = 0;
nr_objs = slab_size / objsize;
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
}
else {
nr_objs = (slab_size - sizeof(slab_t)) / (objsize + sizeof(kmem_bufctl_t));
while (slab_mgmt_size(nr_objs, align) + nr_objs * objsize > slab_size) {
nr_objs --;
}
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
mgmt_size = slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
*remainder = slab_size - nr_objs * objsize - mgmt_size;
static inline void __slob_free_pages(unsigned long kva, int order)
{
free_pages(kva2page(kva), 1 << order);
}
// calculate_slab_order - estimate the size(4K~4M) of slab
// paramemters:
// cachep: the slab_cache
// objsize: the size of obj
// align: align bit for objs
// off_slab: the control part of slab in slab or not
// left_over: the size of can not be used area in slab
static void
calculate_slab_order(kmem_cache_t *cachep, size_t objsize, size_t align, bool off_slab, size_t *left_over) {
size_t order;
for (order = 0; order <= KMALLOC_MAX_ORDER; order ++) {
size_t num, remainder;
cache_estimate(order, objsize, align, off_slab, &remainder, &num);
if (num != 0) {
if (off_slab) {
size_t off_slab_limit = objsize - sizeof(slab_t);
off_slab_limit /= sizeof(kmem_bufctl_t);
if (num > off_slab_limit) {
panic("off_slab: objsize = %d, num = %d.", objsize, num);
}
}
if (remainder * 8 <= (PGSIZE << order)) {
cachep->num = num;
cachep->page_order = order;
if (left_over != NULL) {
*left_over = remainder;
}
return ;
}
}
}
panic("calculate_slab_over: failed.");
static void slob_free(void *b, int size);
static void *slob_alloc(size_t size, gfp_t gfp, int align)
{
assert( (size + SLOB_UNIT) < PAGE_SIZE );
slob_t *prev, *cur, *aligned = 0;
int delta = 0, units = SLOB_UNITS(size);
unsigned long flags;
spin_lock_irqsave(&slob_lock, flags);
prev = slobfree;
for (cur = prev->next; ; prev = cur, cur = cur->next) {
if (align) {
aligned = (slob_t *)ALIGN((unsigned long)cur, align);
delta = aligned - cur;
}
if (cur->units >= units + delta) { /* room enough? */
if (delta) { /* need to fragment head to align? */
aligned->units = cur->units - delta;
aligned->next = cur->next;
cur->next = aligned;
cur->units = delta;
prev = cur;
cur = aligned;
}
if (cur->units == units) /* exact fit? */
prev->next = cur->next; /* unlink */
else { /* fragment */
prev->next = cur + units;
prev->next->units = cur->units - units;
prev->next->next = cur->next;
cur->units = units;
}
slobfree = prev;
spin_unlock_irqrestore(&slob_lock, flags);
return cur;
}
if (cur == slobfree) {
spin_unlock_irqrestore(&slob_lock, flags);
if (size == PAGE_SIZE) /* trying to shrink arena? */
return 0;
cur = (slob_t *)__slob_get_free_page(gfp);
if (!cur)
return 0;
slob_free(cur, PAGE_SIZE);
spin_lock_irqsave(&slob_lock, flags);
cur = slobfree;
}
}
}
// getorder - find order, should satisfy n <= minest 2^order
static inline size_t
getorder(size_t n) {
size_t order = MIN_SIZE_ORDER, order_size = (1 << order);
for (; order <= MAX_SIZE_ORDER; order ++, order_size <<= 1) {
if (n <= order_size) {
return order;
}
}
panic("getorder failed. %d\n", n);
}
static void slob_free(void *block, int size)
{
slob_t *cur, *b = (slob_t *)block;
unsigned long flags;
// init_kmem_cache - initial a slab_cache cachep according to the obj with the size = objsize
static void
init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align) {
list_init(&(cachep->slabs_full));
list_init(&(cachep->slabs_notfull));
if (!block)
return;
objsize = ROUNDUP(objsize, align);
cachep->objsize = objsize;
cachep->off_slab = (objsize >= (PGSIZE >> 3));
if (size)
b->units = SLOB_UNITS(size);
size_t left_over;
calculate_slab_order(cachep, objsize, align, cachep->off_slab, &left_over);
/* Find reinsertion point */
spin_lock_irqsave(&slob_lock, flags);
for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
if (cur >= cur->next && (b > cur || b < cur->next))
break;
assert(cachep->num > 0);
if (b + b->units == cur->next) {
b->units += cur->next->units;
b->next = cur->next->next;
} else
b->next = cur->next;
size_t mgmt_size = slab_mgmt_size(cachep->num, align);
if (cur + cur->units == b) {
cur->units += b->units;
cur->next = b->next;
} else
cur->next = b;
if (cachep->off_slab && left_over >= mgmt_size) {
cachep->off_slab = 0;
}
slobfree = cur;
if (cachep->off_slab) {
cachep->offset = 0;
cachep->slab_cachep = slab_cache + (getorder(mgmt_size) - MIN_SIZE_ORDER);
}
else {
cachep->offset = mgmt_size;
}
spin_unlock_irqrestore(&slob_lock, flags);
}
static void *kmem_cache_alloc(kmem_cache_t *cachep);
#define slab_bufctl(slabp) \
((kmem_bufctl_t*)(((slab_t *)(slabp)) + 1))
// kmem_cache_slabmgmt - get the address of a slab according to page
// - and initialize the slab according to cachep
static slab_t *
kmem_cache_slabmgmt(kmem_cache_t *cachep, struct Page *page) {
void *objp = page2kva(page);
slab_t *slabp;
if (cachep->off_slab) {
if ((slabp = kmem_cache_alloc(cachep->slab_cachep)) == NULL) {
return NULL;
}
}
else {
slabp = page2kva(page);
}
slabp->inuse = 0;
slabp->offset = cachep->offset;
slabp->s_mem = objp + cachep->offset;
return slabp;
}
#define SET_PAGE_CACHE(page, cachep) \
do { \
struct Page *__page = (struct Page *)(page); \
kmem_cache_t **__cachepp = (kmem_cache_t **)&(__page->page_link.next); \
*__cachepp = (kmem_cache_t *)(cachep); \
} while (0)
#define SET_PAGE_SLAB(page, slabp) \
do { \
struct Page *__page = (struct Page *)(page); \
slab_t **__cachepp = (slab_t **)&(__page->page_link.prev); \
*__cachepp = (slab_t *)(slabp); \
} while (0)
// kmem_cache_grow - allocate a new slab by calling alloc_pages
// - set control area in the new slab
static bool
kmem_cache_grow(kmem_cache_t *cachep) {
struct Page *page = alloc_pages(1 << cachep->page_order);
if (page == NULL) {
goto failed;
}
slab_t *slabp;
if ((slabp = kmem_cache_slabmgmt(cachep, page)) == NULL) {
goto oops;
}
size_t order_size = (1 << cachep->page_order);
do {
//setup this page in the free list (see memlayout.h: struct page)???
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
//this page is used for slab
SetPageSlab(page);
page ++;
} while (-- order_size);
int i;
for (i = 0; i < cachep->num; i ++) {
slab_bufctl(slabp)[i] = i + 1;
}
slab_bufctl(slabp)[cachep->num - 1] = BUFCTL_END;
slabp->free = 0;
bool intr_flag;
local_intr_save(intr_flag);
{
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
local_intr_restore(intr_flag);
return 1;
oops:
free_pages(page, 1 << cachep->page_order);
failed:
return 0;
}
// kmem_cache_alloc_one - allocate a obj in a slab
static void *
kmem_cache_alloc_one(kmem_cache_t *cachep, slab_t *slabp) {
slabp->inuse ++;
void *objp = slabp->s_mem + slabp->free * cachep->objsize;
slabp->free = slab_bufctl(slabp)[slabp->free];
if (slabp->free == BUFCTL_END) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_full), &(slabp->slab_link));
}
return objp;
void check_slob(void) {
cprintf("check_slob() success\n");
}
// kmem_cache_alloc - call kmem_cache_alloc_one function to allocate a obj
// - if no free obj, try to allocate a slab
static void *
kmem_cache_alloc(kmem_cache_t *cachep) {
void *objp;
bool intr_flag;
try_again:
local_intr_save(intr_flag);
if (list_empty(&(cachep->slabs_notfull))) {
goto alloc_new_slab;
}
slab_t *slabp = le2slab(list_next(&(cachep->slabs_notfull)), slab_link);
objp = kmem_cache_alloc_one(cachep, slabp);
local_intr_restore(intr_flag);
return objp;
alloc_new_slab:
local_intr_restore(intr_flag);
if (kmem_cache_grow(cachep)) {
goto try_again;
}
return NULL;
void
slob_init(void) {
cprintf("use SLOB allocator\n");
check_slob();
}
// kmalloc - simple interface used by outside functions
// - to allocate a free memory using kmem_cache_alloc function
void *
kmalloc(size_t size) {
assert(size > 0);
size_t order = getorder(size);
if (order > MAX_SIZE_ORDER) {
return NULL;
}
return kmem_cache_alloc(slab_cache + (order - MIN_SIZE_ORDER));
inline void
kmalloc_init(void) {
slob_init();
cprintf("kmalloc_init() succeeded!\n");
}
static void kmem_cache_free(kmem_cache_t *cachep, void *obj);
// kmem_slab_destroy - call free_pages & kmem_cache_free to free a slab
static void
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp) {
struct Page *page = kva2page(slabp->s_mem - slabp->offset);
struct Page *p = page;
size_t order_size = (1 << cachep->page_order);
do {
assert(PageSlab(p));
ClearPageSlab(p);
p ++;
} while (-- order_size);
free_pages(page, 1 << cachep->page_order);
if (cachep->off_slab) {
kmem_cache_free(cachep->slab_cachep, slabp);
}
size_t
slob_allocated(void) {
return 0;
}
// kmem_cache_free_one - free an obj in a slab
// - if slab->inuse==0, then free the slab
static void
kmem_cache_free_one(kmem_cache_t *cachep, slab_t *slabp, void *objp) {
//should not use divide operator ???
size_t objnr = (objp - slabp->s_mem) / cachep->objsize;
slab_bufctl(slabp)[objnr] = slabp->free;
slabp->free = objnr;
slabp->inuse --;
if (slabp->inuse == 0) {
list_del(&(slabp->slab_link));
kmem_slab_destroy(cachep, slabp);
}
else if (slabp->inuse == cachep->num -1 ) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
size_t
kallocated(void) {
return slob_allocated();
}
#define GET_PAGE_CACHE(page) \
(kmem_cache_t *)((page)->page_link.next)
#define GET_PAGE_SLAB(page) \
(slab_t *)((page)->page_link.prev)
// kmem_cache_free - call kmem_cache_free_one function to free an obj
static void
kmem_cache_free(kmem_cache_t *cachep, void *objp) {
bool intr_flag;
struct Page *page = kva2page(objp);
if (!PageSlab(page)) {
panic("not a slab page %08x\n", objp);
}
local_intr_save(intr_flag);
{
kmem_cache_free_one(cachep, GET_PAGE_SLAB(page), objp);
}
local_intr_restore(intr_flag);
static int find_order(int size)
{
int order = 0;
for ( ; size > 4096 ; size >>=1)
order++;
return order;
}
// kfree - simple interface used by ooutside functions to free an obj
void
kfree(void *objp) {
kmem_cache_free(GET_PAGE_CACHE(kva2page(objp)), objp);
static void *__kmalloc(size_t size, gfp_t gfp)
{
slob_t *m;
bigblock_t *bb;
unsigned long flags;
if (size < PAGE_SIZE - SLOB_UNIT) {
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
return m ? (void *)(m + 1) : 0;
}
bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
if (!bb)
return 0;
bb->order = find_order(size);
bb->pages = (void *)__slob_get_free_pages(gfp, bb->order);
if (bb->pages) {
spin_lock_irqsave(&block_lock, flags);
bb->next = bigblocks;
bigblocks = bb;
spin_unlock_irqrestore(&block_lock, flags);
return bb->pages;
}
slob_free(bb, sizeof(bigblock_t));
return 0;
}
static inline void
check_slab_empty(void) {
int i;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
assert(list_empty(&(cachep->slabs_full)));
assert(list_empty(&(cachep->slabs_notfull)));
}
void *
kmalloc(size_t size)
{
return __kmalloc(size, 0);
}
void
check_slab(void) {
int i;
void *v0, *v1;
size_t nr_free_pages_store = nr_free_pages();
size_t kernel_allocated_store = slab_allocated();
/* slab must be empty now */
check_slab_empty();
assert(slab_allocated() == 0);
kmem_cache_t *cachep0, *cachep1;
cachep0 = slab_cache;
assert(cachep0->objsize == 32 && cachep0->num > 1 && !cachep0->off_slab);
assert((v0 = kmalloc(16)) != NULL);
slab_t *slabp0, *slabp1;
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
assert(slabp0->inuse == 1 && list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
struct Page *p0, *p1;
size_t order_size;
p0 = kva2page(slabp0->s_mem - slabp0->offset), p1 = p0;
order_size = (1 << cachep0->page_order);
for (i = 0; i < cachep0->page_order; i ++, p1 ++) {
assert(PageSlab(p1));
assert(GET_PAGE_CACHE(p1) == cachep0 && GET_PAGE_SLAB(p1) == slabp0);
}
assert(v0 == slabp0->s_mem);
assert((v1 = kmalloc(16)) != NULL && v1 == v0 + 32);
kfree(v0);
assert(slabp0->free == 0);
kfree(v1);
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->page_order; i ++, p0 ++) {
assert(!PageSlab(p0));
}
void kfree(void *block)
{
bigblock_t *bb, **last = &bigblocks;
unsigned long flags;
if (!block)
return;
if (!((unsigned long)block & (PAGE_SIZE-1))) {
/* might be on the big block list */
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
if (bb->pages == block) {
*last = bb->next;
spin_unlock_irqrestore(&block_lock, flags);
__slob_free_pages((unsigned long)block, bb->order);
slob_free(bb, sizeof(bigblock_t));
return;
}
}
spin_unlock_irqrestore(&block_lock, flags);
}
slob_free((slob_t *)block - 1, 0);
return;
}
v0 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
for (i = 0; i < cachep0->num - 1; i ++) {
kmalloc(16);
}
assert(slabp0->inuse == cachep0->num);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
v1 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
kfree(v0);
assert(list_empty(&(cachep0->slabs_full)));
assert(list_next(&(slabp0->slab_link)) == &(slabp1->slab_link)
|| list_next(&(slabp1->slab_link)) == &(slabp0->slab_link));
kfree(v1);
assert(!list_empty(&(cachep0->slabs_notfull)));
assert(list_next(&(cachep0->slabs_notfull)) == &(slabp0->slab_link));
assert(list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
v1 = kmalloc(16);
assert(v1 == v0);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->num; i ++) {
kfree(v1 + i * cachep0->objsize);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
cachep0 = slab_cache;
bool has_off_slab = 0;
for (i = 0; i < SLAB_CACHE_NUM; i ++, cachep0 ++) {
if (cachep0->off_slab) {
has_off_slab = 1;
cachep1 = cachep0->slab_cachep;
if (!cachep1->off_slab) {
break;
}
}
}
unsigned int ksize(const void *block)
{
bigblock_t *bb;
unsigned long flags;
if (!has_off_slab) {
goto check_pass;
}
if (!block)
return 0;
assert(cachep0->off_slab && !cachep1->off_slab);
assert(cachep1 < cachep0);
if (!((unsigned long)block & (PAGE_SIZE-1))) {
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; bb = bb->next)
if (bb->pages == block) {
spin_unlock_irqrestore(&slob_lock, flags);
return PAGE_SIZE << bb->order;
}
spin_unlock_irqrestore(&block_lock, flags);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
return ((slob_t *)block - 1)->units * SLOB_UNIT;
}
assert(list_empty(&(cachep1->slabs_full)));
assert(list_empty(&(cachep1->slabs_notfull)));
v0 = kmalloc(cachep0->objsize);
p0 = kva2page(v0);
assert(page2kva(p0) == v0);
if (cachep0->num == 1) {
assert(!list_empty(&(cachep0->slabs_full)));
slabp0 = le2slab(list_next(&(cachep0->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
}
assert(slabp0 != NULL);
if (cachep1->num == 1) {
assert(!list_empty(&(cachep1->slabs_full)));
slabp1 = le2slab(list_next(&(cachep1->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep1->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep1->slabs_notfull)), slab_link);
}
assert(slabp1 != NULL);
order_size = (1 << cachep0->page_order);
for (i = 0; i < order_size; i ++, p0 ++) {
assert(PageSlab(p0));
assert(GET_PAGE_CACHE(p0) == cachep0 && GET_PAGE_SLAB(p0) == slabp0);
}
kfree(v0);
check_pass:
check_rb_tree();
check_slab_empty();
assert(slab_allocated() == 0);
assert(nr_free_pages_store == nr_free_pages());
assert(kernel_allocated_store == slab_allocated());
cprintf("check_slab() succeeded!\n");
}

+ 3
- 3
code/lab4/kern/mm/kmalloc.h View File

@ -1,5 +1,5 @@
#ifndef __KERN_MM_SLAB_H__
#define __KERN_MM_SLAB_H__
#ifndef __KERN_MM_KMALLOC_H__
#define __KERN_MM_KMALLOC_H__
#include <defs.h>
@ -10,5 +10,5 @@ void kmalloc_init(void);
void *kmalloc(size_t n);
void kfree(void *objp);
#endif /* !__KERN_MM_SLAB_H__ */
#endif /* !__KERN_MM_KMALLOC_H__ */

+ 0
- 5
code/lab4/kern/mm/memlayout.h View File

@ -127,11 +127,6 @@ typedef struct {
unsigned int nr_free; // # of free pages in this free list
} free_area_t;
/* for slab style kmalloc */
#define PG_slab 2 // page frame is included in a slab
#define SetPageSlab(page) set_bit(PG_slab, &((page)->flags))
#define ClearPageSlab(page) clear_bit(PG_slab, &((page)->flags))
#define PageSlab(page) test_bit(PG_slab, &((page)->flags))
#endif /* !__ASSEMBLER__ */

+ 0
- 4
code/lab4/kern/mm/vmm.c View File

@ -167,8 +167,6 @@ check_vmm(void) {
check_vma_struct();
check_pgfault();
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vmm() succeeded.\n");
}
@ -215,8 +213,6 @@ check_vma_struct(void) {
mm_destroy(mm);
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vma_struct() succeeded!\n");
}

+ 2
- 2
code/lab4/tools/grade.sh View File

@ -333,8 +333,8 @@ quick_check 'check page table' \
' |-- PTE(00001) fafeb000-fafec000 00001000 -rw'
pts=10
quick_check 'check slab' \
'check_slab() succeeded!'
quick_check 'check slob' \
'check_slob() succeeded!'
pts=25
quick_check 'check vmm' \

+ 0
- 528
code/lab5/kern/libs/rb_tree.c View File

@ -1,528 +0,0 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <kmalloc.h>
#include <rb_tree.h>
#include <assert.h>
/* rb_node_create - create a new rb_node */
static inline rb_node *
rb_node_create(void) {
return kmalloc(sizeof(rb_node));
}
/* rb_tree_empty - tests if tree is empty */
static inline bool
rb_tree_empty(rb_tree *tree) {
rb_node *nil = tree->nil, *root = tree->root;
return root->left == nil;
}
/* *
* rb_tree_create - creates a new red-black tree, the 'compare' function
* is required and returns 'NULL' if failed.
*
* Note that, root->left should always point to the node that is the root
* of the tree. And nil points to a 'NULL' node which should always be
* black and may have arbitrary children and parent node.
* */
rb_tree *
rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2)) {
assert(compare != NULL);
rb_tree *tree;
rb_node *nil, *root;
if ((tree = kmalloc(sizeof(rb_tree))) == NULL) {
goto bad_tree;
}
tree->compare = compare;
if ((nil = rb_node_create()) == NULL) {
goto bad_node_cleanup_tree;
}
nil->parent = nil->left = nil->right = nil;
nil->red = 0;
tree->nil = nil;
if ((root = rb_node_create()) == NULL) {
goto bad_node_cleanup_nil;
}
root->parent = root->left = root->right = nil;
root->red = 0;
tree->root = root;
return tree;
bad_node_cleanup_nil:
kfree(nil);
bad_node_cleanup_tree:
kfree(tree);
bad_tree:
return NULL;
}
/* *
* FUNC_ROTATE - rotates as described in "Introduction to Algorithm".
*
* For example, FUNC_ROTATE(rb_left_rotate, left, right) can be expaned to a
* left-rotate function, which requires an red-black 'tree' and a node 'x'
* to be rotated on. Basically, this function, named rb_left_rotate, makes the
* parent of 'x' be the left child of 'x', 'x' the parent of its parent before
* rotation and finally fixes other nodes accordingly.
*
* FUNC_ROTATE(xx, left, right) means left-rotate,
* and FUNC_ROTATE(xx, right, left) means right-rotate.
* */
#define FUNC_ROTATE(func_name, _left, _right) \
static void \
func_name(rb_tree *tree, rb_node *x) { \
rb_node *nil = tree->nil, *y = x->_right; \
assert(x != tree->root && x != nil && y != nil); \
x->_right = y->_left; \
if (y->_left != nil) { \
y->_left->parent = x; \
} \
y->parent = x->parent; \
if (x == x->parent->_left) { \
x->parent->_left = y; \
} \
else { \
x->parent->_right = y; \
} \
y->_left = x; \
x->parent = y; \
assert(!(nil->red)); \
}
FUNC_ROTATE(rb_left_rotate, left, right);
FUNC_ROTATE(rb_right_rotate, right, left);
#undef FUNC_ROTATE
#define COMPARE(tree, node1, node2) \
((tree))->compare((node1), (node2))
/* *
* rb_insert_binary - insert @node to red-black @tree as if it were
* a regular binary tree. This function is only intended to be called
* by function rb_insert.
* */
static inline void
rb_insert_binary(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node, *nil = tree->nil, *root = tree->root;
z->left = z->right = nil;
y = root, x = y->left;
while (x != nil) {
y = x;
x = (COMPARE(tree, x, node) > 0) ? x->left : x->right;
}
z->parent = y;
if (y == root || COMPARE(tree, y, z) > 0) {
y->left = z;
}
else {
y->right = z;
}
}
/* rb_insert - insert a node to red-black tree */
void
rb_insert(rb_tree *tree, rb_node *node) {
rb_insert_binary(tree, node);
node->red = 1;
rb_node *x = node, *y;
#define RB_INSERT_SUB(_left, _right) \
do { \
y = x->parent->parent->_right; \
if (y->red) { \
x->parent->red = 0; \
y->red = 0; \
x->parent->parent->red = 1; \
x = x->parent->parent; \
} \
else { \
if (x == x->parent->_right) { \
x = x->parent; \
rb_##_left##_rotate(tree, x); \
} \
x->parent->red = 0; \
x->parent->parent->red = 1; \
rb_##_right##_rotate(tree, x->parent->parent); \
} \
} while (0)
while (x->parent->red) {
if (x->parent == x->parent->parent->left) {
RB_INSERT_SUB(left, right);
}
else {
RB_INSERT_SUB(right, left);
}
}
tree->root->left->red = 0;
assert(!(tree->nil->red) && !(tree->root->red));
#undef RB_INSERT_SUB
}
/* *
* rb_tree_successor - returns the successor of @node, or nil
* if no successor exists. Make sure that @node must belong to @tree,
* and this function should only be called by rb_node_prev.
* */
static inline rb_node *
rb_tree_successor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->right) != nil) {
while (y->left != nil) {
y = y->left;
}
return y;
}
else {
y = x->parent;
while (x == y->right) {
x = y, y = y->parent;
}
if (y == tree->root) {
return nil;
}
return y;
}
}
/* *
* rb_tree_predecessor - returns the predecessor of @node, or nil
* if no predecessor exists, likes rb_tree_successor.
* */
static inline rb_node *
rb_tree_predecessor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->left) != nil) {
while (y->right != nil) {
y = y->right;
}
return y;
}
else {
y = x->parent;
while (x == y->left) {
if (y == tree->root) {
return nil;
}
x = y, y = y->parent;
}
return y;
}
}
/* *
* rb_search - returns a node with value 'equal' to @key (according to
* function @compare). If there're multiple nodes with value 'equal' to @key,
* the functions returns the one highest in the tree.
* */
rb_node *
rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key) {
rb_node *nil = tree->nil, *node = tree->root->left;
int r;
while (node != nil && (r = compare(node, key)) != 0) {
node = (r > 0) ? node->left : node->right;
}
return (node != nil) ? node : NULL;
}
/* *
* rb_delete_fixup - performs rotations and changes colors to restore
* red-black properties after a node is deleted.
* */
static void
rb_delete_fixup(rb_tree *tree, rb_node *node) {
rb_node *x = node, *w, *root = tree->root->left;
#define RB_DELETE_FIXUP_SUB(_left, _right) \
do { \
w = x->parent->_right; \
if (w->red) { \
w->red = 0; \
x->parent->red = 1; \
rb_##_left##_rotate(tree, x->parent); \
w = x->parent->_right; \
} \
if (!w->_left->red && !w->_right->red) { \
w->red = 1; \
x = x->parent; \
} \
else { \
if (!w->_right->red) { \
w->_left->red = 0; \
w->red = 1; \
rb_##_right##_rotate(tree, w); \
w = x->parent->_right; \
} \
w->red = x->parent->red; \
x->parent->red = 0; \
w->_right->red = 0; \
rb_##_left##_rotate(tree, x->parent); \
x = root; \
} \
} while (0)
while (x != root && !x->red) {
if (x == x->parent->left) {
RB_DELETE_FIXUP_SUB(left, right);
}
else {
RB_DELETE_FIXUP_SUB(right, left);
}
}
x->red = 0;
#undef RB_DELETE_FIXUP_SUB
}
/* *
* rb_delete - deletes @node from @tree, and calls rb_delete_fixup to
* restore red-black properties.
* */
void
rb_delete(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node;
rb_node *nil = tree->nil, *root = tree->root;
y = (z->left == nil || z->right == nil) ? z : rb_tree_successor(tree, z);
x = (y->left != nil) ? y->left : y->right;
assert(y != root && y != nil);
x->parent = y->parent;
if (y == y->parent->left) {
y->parent->left = x;
}
else {
y->parent->right = x;
}
bool need_fixup = !(y->red);
if (y != z) {
if (z == z->parent->left) {
z->parent->left = y;
}
else {
z->parent->right = y;
}
z->left->parent = z->right->parent = y;
*y = *z;
}
if (need_fixup) {
rb_delete_fixup(tree, x);
}
}
/* rb_tree_destroy - destroy a tree and free memory */
void
rb_tree_destroy(rb_tree *tree) {
kfree(tree->root);
kfree(tree->nil);
kfree(tree);
}
/* *
* rb_node_prev - returns the predecessor node of @node in @tree,
* or 'NULL' if no predecessor exists.
* */
rb_node *
rb_node_prev(rb_tree *tree, rb_node *node) {
rb_node *prev = rb_tree_predecessor(tree, node);
return (prev != tree->nil) ? prev : NULL;
}
/* *
* rb_node_next - returns the successor node of @node in @tree,
* or 'NULL' if no successor exists.
* */
rb_node *
rb_node_next(rb_tree *tree, rb_node *node) {
rb_node *next = rb_tree_successor(tree, node);
return (next != tree->nil) ? next : NULL;
}
/* rb_node_root - returns the root node of a @tree, or 'NULL' if tree is empty */
rb_node *
rb_node_root(rb_tree *tree) {
rb_node *node = tree->root->left;
return (node != tree->nil) ? node : NULL;
}
/* rb_node_left - gets the left child of @node, or 'NULL' if no such node */
rb_node *
rb_node_left(rb_tree *tree, rb_node *node) {
rb_node *left = node->left;
return (left != tree->nil) ? left : NULL;
}
/* rb_node_right - gets the right child of @node, or 'NULL' if no such node */
rb_node *
rb_node_right(rb_tree *tree, rb_node *node) {
rb_node *right = node->right;
return (right != tree->nil) ? right : NULL;
}
int
check_tree(rb_tree *tree, rb_node *node) {
rb_node *nil = tree->nil;
if (node == nil) {
assert(!node->red);
return 1;
}
if (node->left != nil) {
assert(COMPARE(tree, node, node->left) >= 0);
assert(node->left->parent == node);
}
if (node->right != nil) {
assert(COMPARE(tree, node, node->right) <= 0);
assert(node->right->parent == node);
}
if (node->red) {
assert(!node->left->red && !node->right->red);
}
int hb_left = check_tree(tree, node->left);
int hb_right = check_tree(tree, node->right);
assert(hb_left == hb_right);
int hb = hb_left;
if (!node->red) {
hb ++;
}
return hb;
}
static void *
check_safe_kmalloc(size_t size) {
void *ret = kmalloc(size);
assert(ret != NULL);
return ret;
}
struct check_data {
long data;
rb_node rb_link;
};
#define rbn2data(node) \
(to_struct(node, struct check_data, rb_link))
static inline int
check_compare1(rb_node *node1, rb_node *node2) {
return rbn2data(node1)->data - rbn2data(node2)->data;
}
static inline int
check_compare2(rb_node *node, void *key) {
return rbn2data(node)->data - (long)key;
}
void
check_rb_tree(void) {
rb_tree *tree = rb_tree_create(check_compare1);
assert(tree != NULL);
rb_node *nil = tree->nil, *root = tree->root;
assert(!nil->red && root->left == nil);
int total = 1000;
struct check_data **all = check_safe_kmalloc(sizeof(struct check_data *) * total);
long i;
for (i = 0; i < total; i ++) {
all[i] = check_safe_kmalloc(sizeof(struct check_data));
all[i]->data = i;
}
int *mark = check_safe_kmalloc(sizeof(int) * total);
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
int j = (rand() % (total - i)) + i;
struct check_data *z = all[i];
all[i] = all[j];
all[j] = z;
}
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_node *node;
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)(all[i]->data));
assert(node != NULL && node == &(all[i]->rb_link));
}
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)i);
assert(node != NULL && rbn2data(node)->data == i);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(!nil->red && root->left == nil);
long max = 32;
if (max > total) {
max = total;
}
for (i = 0; i < max; i ++) {
all[i]->data = max;
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
for (i = 0; i < max; i ++) {
node = rb_search(tree, check_compare2, (void *)max);
assert(node != NULL && rbn2data(node)->data == max);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(rb_tree_empty(tree));
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_tree_destroy(tree);
for (i = 0; i < total; i ++) {
kfree(all[i]);
}
kfree(mark);
kfree(all);
}

+ 0
- 32
code/lab5/kern/libs/rb_tree.h View File

@ -1,32 +0,0 @@
#ifndef __KERN_LIBS_RB_TREE_H__
#define __KERN_LIBS_RB_TREE_H__
#include <defs.h>
typedef struct rb_node {
bool red; // if red = 0, it's a black node
struct rb_node *parent;
struct rb_node *left, *right;
} rb_node;
typedef struct rb_tree {
// compare function should return -1 if *node1 < *node2, 1 if *node1 > *node2, and 0 otherwise
int (*compare)(rb_node *node1, rb_node *node2);
struct rb_node *nil, *root;
} rb_tree;
rb_tree *rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2));
void rb_tree_destroy(rb_tree *tree);
void rb_insert(rb_tree *tree, rb_node *node);
void rb_delete(rb_tree *tree, rb_node *node);
rb_node *rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key);
rb_node *rb_node_prev(rb_tree *tree, rb_node *node);
rb_node *rb_node_next(rb_tree *tree, rb_node *node);
rb_node *rb_node_root(rb_tree *tree);
rb_node *rb_node_left(rb_tree *tree, rb_node *node);
rb_node *rb_node_right(rb_tree *tree, rb_node *node);
void check_rb_tree(void);
#endif /* !__KERN_LIBS_RBTREE_H__ */

+ 253
- 583
code/lab5/kern/mm/kmalloc.c View File

@ -6,635 +6,305 @@
#include <sync.h>
#include <pmm.h>
#include <stdio.h>
#include <rb_tree.h>
/* The slab allocator used in ucore is based on an algorithm first introduced by
Jeff Bonwick for the SunOS operating system. The paper can be download from
http://citeseer.ist.psu.edu/bonwick94slab.html
An implementation of the Slab Allocator as described in outline in;
UNIX Internals: The New Frontiers by Uresh Vahalia
Pub: Prentice Hall ISBN 0-13-101908-2
Within a kernel, a considerable amount of memory is allocated for a finite set
of objects such as file descriptors and other common structures. Jeff found that
the amount of time required to initialize a regular object in the kernel exceeded
the amount of time required to allocate and deallocate it. His conclusion was
that instead of freeing the memory back to a global pool, he would have the memory
remain initialized for its intended purpose.
In our simple slab implementation, the the high-level organization of the slab
structures is simplied. At the highest level is an array slab_cache[SLAB_CACHE_NUM],
and each array element is a slab_cache which has slab chains. Each slab_cache has
two list, one list chains the full allocated slab, and another list chains the notfull
allocated(maybe empty) slab. And each slab has fixed number(2^n) of pages. In each
slab, there are a lot of objects (such as ) with same fixed size(32B ~ 128KB).
+----------------------------------+
| slab_cache[0] for 0~32B obj |
+----------------------------------+
| slab_cache[1] for 33B~64B obj |-->lists for slabs
+----------------------------------+ |
| slab_cache[2] for 65B~128B obj | |
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
+----------------------------------+ |
| slab_cache[12]for 64KB~128KB obj | |
+----------------------------------+ |
|
slabs_full/slabs_not +---------------------+
-<-----------<----------<-+
| | |
slab1 slab2 slab3...
|
|-------|-------|
pages1 pages2 pages3...
|
|
|
slab_t+n*bufctl_t+obj1-obj2-obj3...objn (the size of obj is small)
|
OR
|
obj1-obj2-obj3...objn WITH slab_t+n*bufctl_t in another slab (the size of obj is BIG)
The important functions are:
kmem_cache_grow(kmem_cache_t *cachep)
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp)
kmalloc(size_t size): used by outside functions need dynamicly get memory
kfree(void *objp): used by outside functions need dynamicly release memory
*/
#define BUFCTL_END 0xFFFFFFFFL // the signature of the last bufctl
#define SLAB_LIMIT 0xFFFFFFFEL // the max value of obj number
typedef size_t kmem_bufctl_t; //the index of obj in slab
typedef struct slab_s {
list_entry_t slab_link; // the list entry linked to kmem_cache list
void *s_mem; // the kernel virtual address of the first obj in slab
size_t inuse; // the number of allocated objs
size_t offset; // the first obj's offset value in slab
kmem_bufctl_t free; // the first free obj's index in slab
} slab_t;
// get the slab address according to the link element (see list.h)
#define le2slab(le, member) \
to_struct((le), slab_t, member)
typedef struct kmem_cache_s kmem_cache_t;
struct kmem_cache_s {
list_entry_t slabs_full; // list for fully allocated slabs
list_entry_t slabs_notfull; // list for not-fully allocated slabs
size_t objsize; // the fixed size of obj
size_t num; // number of objs per slab
size_t offset; // this first obj's offset in slab
bool off_slab; // the control part of slab in slab or not.
/* order of pages per slab (2^n) */
size_t page_order;
kmem_cache_t *slab_cachep;
};
#define MIN_SIZE_ORDER 5 // 32
#define MAX_SIZE_ORDER 17 // 128k
#define SLAB_CACHE_NUM (MAX_SIZE_ORDER - MIN_SIZE_ORDER + 1)
/*
* SLOB Allocator: Simple List Of Blocks
*
* Matt Mackall <mpm@selenic.com> 12/30/03
*
* How SLOB works:
*
* The core of SLOB is a traditional K&R style heap allocator, with
* support for returning aligned objects. The granularity of this
* allocator is 8 bytes on x86, though it's perhaps possible to reduce
* this to 4 if it's deemed worth the effort. The slob heap is a
* singly-linked list of pages from __get_free_page, grown on demand
* and allocation from the heap is currently first-fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are 8-byte aligned and prepended with a 8-byte header.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* __get_free_pages directly so that it can return page-aligned blocks
* and keeps a linked list of such pages and their orders. These
* objects are detected in kfree() by their page alignment.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
* destructors for every SLAB allocation. Objects are returned with
* the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
* set, in which case the low-level allocator will fragment blocks to
* create the proper alignment. Again, objects of page-size or greater
* are allocated by calling __get_free_pages. As SLAB objects know
* their size, no separate size bookkeeping is necessary and there is
* essentially no allocation space overhead.
*/
//some helper
#define spin_lock_irqsave(l, f) local_intr_save(f)
#define spin_unlock_irqrestore(l, f) local_intr_restore(f)
typedef unsigned int gfp_t;
#ifndef PAGE_SIZE
#define PAGE_SIZE PGSIZE
#endif
#ifndef L1_CACHE_BYTES
#define L1_CACHE_BYTES 64
#endif
#ifndef ALIGN
#define ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
#endif
struct slob_block {
int units;
struct slob_block *next;
};
typedef struct slob_block slob_t;
static kmem_cache_t slab_cache[SLAB_CACHE_NUM];
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_ALIGN L1_CACHE_BYTES
static void init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align);
static void check_slab(void);
struct bigblock {
int order;
void *pages;
struct bigblock *next;
};
typedef struct bigblock bigblock_t;
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
//slab_init - call init_kmem_cache function to reset the slab_cache array
static void
slab_init(void) {
size_t i;
//the align bit for obj in slab. 2^n could be better for performance
size_t align = 16;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
init_kmem_cache(slab_cache + i, 1 << (i + MIN_SIZE_ORDER), align);
}
check_slab();
}
inline void
kmalloc_init(void) {
slab_init();
cprintf("kmalloc_init() succeeded!\n");
}
//slab_allocated - summary the total size of allocated objs
static size_t
slab_allocated(void) {
size_t total = 0;
int i;
bool intr_flag;
local_intr_save(intr_flag);
{
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
list_entry_t *list, *le;
list = le = &(cachep->slabs_full);
while ((le = list_next(le)) != list) {
total += cachep->num * cachep->objsize;
}
list = le = &(cachep->slabs_notfull);
while ((le = list_next(le)) != list) {
slab_t *slabp = le2slab(le, slab_link);
total += slabp->inuse * cachep->objsize;
}
}
}
local_intr_restore(intr_flag);
return total;
static void* __slob_get_free_pages(gfp_t gfp, int order)
{
struct Page * page = alloc_pages(1 << order);
if(!page)
return NULL;
return page2kva(page);
}
inline size_t
kallocated(void) {
return slab_allocated();
}
#define __slob_get_free_page(gfp) __slob_get_free_pages(gfp, 0)
// slab_mgmt_size - get the size of slab control area (slab_t+num*kmem_bufctl_t)
static size_t
slab_mgmt_size(size_t num, size_t align) {
return ROUNDUP(sizeof(slab_t) + num * sizeof(kmem_bufctl_t), align);
static inline void __slob_free_pages(unsigned long kva, int order)
{
free_pages(kva2page(kva), 1 << order);
}
// cacahe_estimate - estimate the number of objs in a slab
static void
cache_estimate(size_t order, size_t objsize, size_t align, bool off_slab, size_t *remainder, size_t *num) {
size_t nr_objs, mgmt_size;
size_t slab_size = (PGSIZE << order);
if (off_slab) {
mgmt_size = 0;
nr_objs = slab_size / objsize;
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
}
else {
nr_objs = (slab_size - sizeof(slab_t)) / (objsize + sizeof(kmem_bufctl_t));
while (slab_mgmt_size(nr_objs, align) + nr_objs * objsize > slab_size) {
nr_objs --;
}
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
mgmt_size = slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
*remainder = slab_size - nr_objs * objsize - mgmt_size;
static void slob_free(void *b, int size);
static void *slob_alloc(size_t size, gfp_t gfp, int align)
{
assert( (size + SLOB_UNIT) < PAGE_SIZE );
slob_t *prev, *cur, *aligned = 0;
int delta = 0, units = SLOB_UNITS(size);
unsigned long flags;
spin_lock_irqsave(&slob_lock, flags);
prev = slobfree;
for (cur = prev->next; ; prev = cur, cur = cur->next) {
if (align) {
aligned = (slob_t *)ALIGN((unsigned long)cur, align);
delta = aligned - cur;
}
if (cur->units >= units + delta) { /* room enough? */
if (delta) { /* need to fragment head to align? */
aligned->units = cur->units - delta;
aligned->next = cur->next;
cur->next = aligned;
cur->units = delta;
prev = cur;
cur = aligned;
}
if (cur->units == units) /* exact fit? */
prev->next = cur->next; /* unlink */
else { /* fragment */
prev->next = cur + units;
prev->next->units = cur->units - units;
prev->next->next = cur->next;
cur->units = units;
}
slobfree = prev;
spin_unlock_irqrestore(&slob_lock, flags);
return cur;
}
if (cur == slobfree) {
spin_unlock_irqrestore(&slob_lock, flags);
if (size == PAGE_SIZE) /* trying to shrink arena? */
return 0;
cur = (slob_t *)__slob_get_free_page(gfp);
if (!cur)
return 0;
slob_free(cur, PAGE_SIZE);
spin_lock_irqsave(&slob_lock, flags);
cur = slobfree;
}
}
}
// calculate_slab_order - estimate the size(4K~4M) of slab
// paramemters:
// cachep: the slab_cache
// objsize: the size of obj
// align: align bit for objs
// off_slab: the control part of slab in slab or not
// left_over: the size of can not be used area in slab
static void
calculate_slab_order(kmem_cache_t *cachep, size_t objsize, size_t align, bool off_slab, size_t *left_over) {
size_t order;
for (order = 0; order <= KMALLOC_MAX_ORDER; order ++) {
size_t num, remainder;
cache_estimate(order, objsize, align, off_slab, &remainder, &num);
if (num != 0) {
if (off_slab) {
size_t off_slab_limit = objsize - sizeof(slab_t);
off_slab_limit /= sizeof(kmem_bufctl_t);
if (num > off_slab_limit) {
panic("off_slab: objsize = %d, num = %d.", objsize, num);
}
}
if (remainder * 8 <= (PGSIZE << order)) {
cachep->num = num;
cachep->page_order = order;
if (left_over != NULL) {
*left_over = remainder;
}
return ;
}
}
}
panic("calculate_slab_over: failed.");
}
static void slob_free(void *block, int size)
{
slob_t *cur, *b = (slob_t *)block;
unsigned long flags;
// getorder - find order, should satisfy n <= minest 2^order
static inline size_t
getorder(size_t n) {
size_t order = MIN_SIZE_ORDER, order_size = (1 << order);
for (; order <= MAX_SIZE_ORDER; order ++, order_size <<= 1) {
if (n <= order_size) {
return order;
}
}
panic("getorder failed. %d\n", n);
}
if (!block)
return;
// init_kmem_cache - initial a slab_cache cachep according to the obj with the size = objsize
static void
init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align) {
list_init(&(cachep->slabs_full));
list_init(&(cachep->slabs_notfull));
if (size)
b->units = SLOB_UNITS(size);
objsize = ROUNDUP(objsize, align);
cachep->objsize = objsize;
cachep->off_slab = (objsize >= (PGSIZE >> 3));
/* Find reinsertion point */
spin_lock_irqsave(&slob_lock, flags);
for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
if (cur >= cur->next && (b > cur || b < cur->next))
break;
size_t left_over;
calculate_slab_order(cachep, objsize, align, cachep->off_slab, &left_over);
if (b + b->units == cur->next) {
b->units += cur->next->units;
b->next = cur->next->next;
} else
b->next = cur->next;
assert(cachep->num > 0);
if (cur + cur->units == b) {
cur->units += b->units;
cur->next = b->next;
} else
cur->next = b;
size_t mgmt_size = slab_mgmt_size(cachep->num, align);
slobfree = cur;
if (cachep->off_slab && left_over >= mgmt_size) {
cachep->off_slab = 0;
}
if (cachep->off_slab) {
cachep->offset = 0;
cachep->slab_cachep = slab_cache + (getorder(mgmt_size) - MIN_SIZE_ORDER);
}
else {
cachep->offset = mgmt_size;
}
spin_unlock_irqrestore(&slob_lock, flags);
}
static void *kmem_cache_alloc(kmem_cache_t *cachep);
#define slab_bufctl(slabp) \
((kmem_bufctl_t*)(((slab_t *)(slabp)) + 1))
// kmem_cache_slabmgmt - get the address of a slab according to page
// - and initialize the slab according to cachep
static slab_t *
kmem_cache_slabmgmt(kmem_cache_t *cachep, struct Page *page) {
void *objp = page2kva(page);
slab_t *slabp;
if (cachep->off_slab) {
if ((slabp = kmem_cache_alloc(cachep->slab_cachep)) == NULL) {
return NULL;
}
}
else {
slabp = page2kva(page);
}
slabp->inuse = 0;
slabp->offset = cachep->offset;
slabp->s_mem = objp + cachep->offset;
return slabp;
}
#define SET_PAGE_CACHE(page, cachep) \
do { \
struct Page *__page = (struct Page *)(page); \
kmem_cache_t **__cachepp = (kmem_cache_t **)&(__page->page_link.next); \
*__cachepp = (kmem_cache_t *)(cachep); \
} while (0)
#define SET_PAGE_SLAB(page, slabp) \
do { \
struct Page *__page = (struct Page *)(page); \
slab_t **__cachepp = (slab_t **)&(__page->page_link.prev); \
*__cachepp = (slab_t *)(slabp); \
} while (0)
// kmem_cache_grow - allocate a new slab by calling alloc_pages
// - set control area in the new slab
static bool
kmem_cache_grow(kmem_cache_t *cachep) {
struct Page *page = alloc_pages(1 << cachep->page_order);
if (page == NULL) {
goto failed;
}
slab_t *slabp;
if ((slabp = kmem_cache_slabmgmt(cachep, page)) == NULL) {
goto oops;
}
size_t order_size = (1 << cachep->page_order);
do {
//setup this page in the free list (see memlayout.h: struct page)???
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
//this page is used for slab
SetPageSlab(page);
page ++;
} while (-- order_size);
int i;
for (i = 0; i < cachep->num; i ++) {
slab_bufctl(slabp)[i] = i + 1;
}
slab_bufctl(slabp)[cachep->num - 1] = BUFCTL_END;
slabp->free = 0;
bool intr_flag;
local_intr_save(intr_flag);
{
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
local_intr_restore(intr_flag);
return 1;
oops:
free_pages(page, 1 << cachep->page_order);
failed:
return 0;
}
// kmem_cache_alloc_one - allocate a obj in a slab
static void *
kmem_cache_alloc_one(kmem_cache_t *cachep, slab_t *slabp) {
slabp->inuse ++;
void *objp = slabp->s_mem + slabp->free * cachep->objsize;
slabp->free = slab_bufctl(slabp)[slabp->free];
if (slabp->free == BUFCTL_END) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_full), &(slabp->slab_link));
}
return objp;
void check_slob(void) {
cprintf("check_slob() success\n");
}
// kmem_cache_alloc - call kmem_cache_alloc_one function to allocate a obj
// - if no free obj, try to allocate a slab
static void *
kmem_cache_alloc(kmem_cache_t *cachep) {
void *objp;
bool intr_flag;
try_again:
local_intr_save(intr_flag);
if (list_empty(&(cachep->slabs_notfull))) {
goto alloc_new_slab;
}
slab_t *slabp = le2slab(list_next(&(cachep->slabs_notfull)), slab_link);
objp = kmem_cache_alloc_one(cachep, slabp);
local_intr_restore(intr_flag);
return objp;
alloc_new_slab:
local_intr_restore(intr_flag);
if (kmem_cache_grow(cachep)) {
goto try_again;
}
return NULL;
void
slob_init(void) {
cprintf("use SLOB allocator\n");
check_slob();
}
// kmalloc - simple interface used by outside functions
// - to allocate a free memory using kmem_cache_alloc function
void *
kmalloc(size_t size) {
assert(size > 0);
size_t order = getorder(size);
if (order > MAX_SIZE_ORDER) {
return NULL;
}
return kmem_cache_alloc(slab_cache + (order - MIN_SIZE_ORDER));
inline void
kmalloc_init(void) {
slob_init();
cprintf("kmalloc_init() succeeded!\n");
}
static void kmem_cache_free(kmem_cache_t *cachep, void *obj);
// kmem_slab_destroy - call free_pages & kmem_cache_free to free a slab
static void
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp) {
struct Page *page = kva2page(slabp->s_mem - slabp->offset);
struct Page *p = page;
size_t order_size = (1 << cachep->page_order);
do {
assert(PageSlab(p));
ClearPageSlab(p);
p ++;
} while (-- order_size);
free_pages(page, 1 << cachep->page_order);
if (cachep->off_slab) {
kmem_cache_free(cachep->slab_cachep, slabp);
}
size_t
slob_allocated(void) {
return 0;
}
// kmem_cache_free_one - free an obj in a slab
// - if slab->inuse==0, then free the slab
static void
kmem_cache_free_one(kmem_cache_t *cachep, slab_t *slabp, void *objp) {
//should not use divide operator ???
size_t objnr = (objp - slabp->s_mem) / cachep->objsize;
slab_bufctl(slabp)[objnr] = slabp->free;
slabp->free = objnr;
slabp->inuse --;
if (slabp->inuse == 0) {
list_del(&(slabp->slab_link));
kmem_slab_destroy(cachep, slabp);
}
else if (slabp->inuse == cachep->num -1 ) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
size_t
kallocated(void) {
return slob_allocated();
}
#define GET_PAGE_CACHE(page) \
(kmem_cache_t *)((page)->page_link.next)
#define GET_PAGE_SLAB(page) \
(slab_t *)((page)->page_link.prev)
// kmem_cache_free - call kmem_cache_free_one function to free an obj
static void
kmem_cache_free(kmem_cache_t *cachep, void *objp) {
bool intr_flag;
struct Page *page = kva2page(objp);
if (!PageSlab(page)) {
panic("not a slab page %08x\n", objp);
}
local_intr_save(intr_flag);
{
kmem_cache_free_one(cachep, GET_PAGE_SLAB(page), objp);
}
local_intr_restore(intr_flag);
static int find_order(int size)
{
int order = 0;
for ( ; size > 4096 ; size >>=1)
order++;
return order;
}
// kfree - simple interface used by ooutside functions to free an obj
void
kfree(void *objp) {
kmem_cache_free(GET_PAGE_CACHE(kva2page(objp)), objp);
static void *__kmalloc(size_t size, gfp_t gfp)
{
slob_t *m;
bigblock_t *bb;
unsigned long flags;
if (size < PAGE_SIZE - SLOB_UNIT) {
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
return m ? (void *)(m + 1) : 0;
}
bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
if (!bb)
return 0;
bb->order = find_order(size);
bb->pages = (void *)__slob_get_free_pages(gfp, bb->order);
if (bb->pages) {
spin_lock_irqsave(&block_lock, flags);
bb->next = bigblocks;
bigblocks = bb;
spin_unlock_irqrestore(&block_lock, flags);
return bb->pages;
}
slob_free(bb, sizeof(bigblock_t));
return 0;
}
static inline void
check_slab_empty(void) {
int i;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
assert(list_empty(&(cachep->slabs_full)));
assert(list_empty(&(cachep->slabs_notfull)));
}
void *
kmalloc(size_t size)
{
return __kmalloc(size, 0);
}
void
check_slab(void) {
int i;
void *v0, *v1;
size_t nr_free_pages_store = nr_free_pages();
size_t kernel_allocated_store = slab_allocated();
/* slab must be empty now */
check_slab_empty();
assert(slab_allocated() == 0);
kmem_cache_t *cachep0, *cachep1;
cachep0 = slab_cache;
assert(cachep0->objsize == 32 && cachep0->num > 1 && !cachep0->off_slab);
assert((v0 = kmalloc(16)) != NULL);
slab_t *slabp0, *slabp1;
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
assert(slabp0->inuse == 1 && list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
struct Page *p0, *p1;
size_t order_size;
p0 = kva2page(slabp0->s_mem - slabp0->offset), p1 = p0;
order_size = (1 << cachep0->page_order);
for (i = 0; i < cachep0->page_order; i ++, p1 ++) {
assert(PageSlab(p1));
assert(GET_PAGE_CACHE(p1) == cachep0 && GET_PAGE_SLAB(p1) == slabp0);
}
assert(v0 == slabp0->s_mem);
assert((v1 = kmalloc(16)) != NULL && v1 == v0 + 32);
kfree(v0);
assert(slabp0->free == 0);
kfree(v1);
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->page_order; i ++, p0 ++) {
assert(!PageSlab(p0));
}
v0 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
for (i = 0; i < cachep0->num - 1; i ++) {
kmalloc(16);
}
assert(slabp0->inuse == cachep0->num);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
v1 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
kfree(v0);
assert(list_empty(&(cachep0->slabs_full)));
assert(list_next(&(slabp0->slab_link)) == &(slabp1->slab_link)
|| list_next(&(slabp1->slab_link)) == &(slabp0->slab_link));
kfree(v1);
assert(!list_empty(&(cachep0->slabs_notfull)));
assert(list_next(&(cachep0->slabs_notfull)) == &(slabp0->slab_link));
assert(list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
v1 = kmalloc(16);
assert(v1 == v0);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->num; i ++) {
kfree(v1 + i * cachep0->objsize);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
void kfree(void *block)
{
bigblock_t *bb, **last = &bigblocks;
unsigned long flags;
if (!block)
return;
if (!((unsigned long)block & (PAGE_SIZE-1))) {
/* might be on the big block list */
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
if (bb->pages == block) {
*last = bb->next;
spin_unlock_irqrestore(&block_lock, flags);
__slob_free_pages((unsigned long)block, bb->order);
slob_free(bb, sizeof(bigblock_t));
return;
}
}
spin_unlock_irqrestore(&block_lock, flags);
}
slob_free((slob_t *)block - 1, 0);
return;
}
cachep0 = slab_cache;
bool has_off_slab = 0;
for (i = 0; i < SLAB_CACHE_NUM; i ++, cachep0 ++) {
if (cachep0->off_slab) {
has_off_slab = 1;
cachep1 = cachep0->slab_cachep;
if (!cachep1->off_slab) {
break;
}
}
}
unsigned int ksize(const void *block)
{
bigblock_t *bb;
unsigned long flags;
if (!has_off_slab) {
goto check_pass;
}
if (!block)
return 0;
assert(cachep0->off_slab && !cachep1->off_slab);
assert(cachep1 < cachep0);
if (!((unsigned long)block & (PAGE_SIZE-1))) {
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; bb = bb->next)
if (bb->pages == block) {
spin_unlock_irqrestore(&slob_lock, flags);
return PAGE_SIZE << bb->order;
}
spin_unlock_irqrestore(&block_lock, flags);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
return ((slob_t *)block - 1)->units * SLOB_UNIT;
}
assert(list_empty(&(cachep1->slabs_full)));
assert(list_empty(&(cachep1->slabs_notfull)));
v0 = kmalloc(cachep0->objsize);
p0 = kva2page(v0);
assert(page2kva(p0) == v0);
if (cachep0->num == 1) {
assert(!list_empty(&(cachep0->slabs_full)));
slabp0 = le2slab(list_next(&(cachep0->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
}
assert(slabp0 != NULL);
if (cachep1->num == 1) {
assert(!list_empty(&(cachep1->slabs_full)));
slabp1 = le2slab(list_next(&(cachep1->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep1->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep1->slabs_notfull)), slab_link);
}
assert(slabp1 != NULL);
order_size = (1 << cachep0->page_order);
for (i = 0; i < order_size; i ++, p0 ++) {
assert(PageSlab(p0));
assert(GET_PAGE_CACHE(p0) == cachep0 && GET_PAGE_SLAB(p0) == slabp0);
}
kfree(v0);
check_pass:
check_rb_tree();
check_slab_empty();
assert(slab_allocated() == 0);
assert(nr_free_pages_store == nr_free_pages());
assert(kernel_allocated_store == slab_allocated());
cprintf("check_slab() succeeded!\n");
}

+ 3
- 3
code/lab5/kern/mm/kmalloc.h View File

@ -1,5 +1,5 @@
#ifndef __KERN_MM_SLAB_H__
#define __KERN_MM_SLAB_H__
#ifndef __KERN_MM_KMALLOC_H__
#define __KERN_MM_KMALLOC_H__
#include <defs.h>
@ -12,5 +12,5 @@ void kfree(void *objp);
size_t kallocated(void);
#endif /* !__KERN_MM_SLAB_H__ */
#endif /* !__KERN_MM_KMALLOC_H__ */

+ 0
- 5
code/lab5/kern/mm/memlayout.h View File

@ -156,11 +156,6 @@ typedef struct {
unsigned int nr_free; // # of free pages in this free list
} free_area_t;
/* for slab style kmalloc */
#define PG_slab 2 // page frame is included in a slab
#define SetPageSlab(page) set_bit(PG_slab, &((page)->flags))
#define ClearPageSlab(page) clear_bit(PG_slab, &((page)->flags))
#define PageSlab(page) test_bit(PG_slab, &((page)->flags))
#endif /* !__ASSEMBLER__ */

+ 0
- 4
code/lab5/kern/mm/vmm.c View File

@ -257,8 +257,6 @@ check_vmm(void) {
check_vma_struct();
check_pgfault();
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vmm() succeeded.\n");
}
@ -305,8 +303,6 @@ check_vma_struct(void) {
mm_destroy(mm);
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vma_struct() succeeded!\n");
}

+ 1
- 2
code/lab5/kern/process/proc.c View File

@ -802,8 +802,7 @@ init_main(void *arg) {
assert(nr_process == 2);
assert(list_next(&proc_list) == &(initproc->list_link));
assert(list_prev(&proc_list) == &(initproc->list_link));
assert(nr_free_pages_store == nr_free_pages());
assert(kernel_allocated_store == kallocated());
cprintf("init check memory pass.\n");
return 0;
}

+ 1
- 1
code/lab5/tools/grade.sh View File

@ -338,7 +338,7 @@ default_check() {
'PDE(001) fac00000-fb000000 00400000 -rw' \
' |-- PTE(000e0) faf00000-fafe0000 000e0000 urw' \
' |-- PTE(00001) fafeb000-fafec000 00001000 -rw' \
'check_slab() succeeded!' \
'check_slob() succeeded!' \
'check_vma_struct() succeeded!' \
'page fault at 0x00000100: K/W [no page found].' \
'check_pgfault() succeeded!' \

+ 0
- 528
code/lab6/kern/libs/rb_tree.c View File

@ -1,528 +0,0 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <kmalloc.h>
#include <rb_tree.h>
#include <assert.h>
/* rb_node_create - create a new rb_node */
static inline rb_node *
rb_node_create(void) {
return kmalloc(sizeof(rb_node));
}
/* rb_tree_empty - tests if tree is empty */
static inline bool
rb_tree_empty(rb_tree *tree) {
rb_node *nil = tree->nil, *root = tree->root;
return root->left == nil;
}
/* *
* rb_tree_create - creates a new red-black tree, the 'compare' function
* is required and returns 'NULL' if failed.
*
* Note that, root->left should always point to the node that is the root
* of the tree. And nil points to a 'NULL' node which should always be
* black and may have arbitrary children and parent node.
* */
rb_tree *
rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2)) {
assert(compare != NULL);
rb_tree *tree;
rb_node *nil, *root;
if ((tree = kmalloc(sizeof(rb_tree))) == NULL) {
goto bad_tree;
}
tree->compare = compare;
if ((nil = rb_node_create()) == NULL) {
goto bad_node_cleanup_tree;
}
nil->parent = nil->left = nil->right = nil;
nil->red = 0;
tree->nil = nil;
if ((root = rb_node_create()) == NULL) {
goto bad_node_cleanup_nil;
}
root->parent = root->left = root->right = nil;
root->red = 0;
tree->root = root;
return tree;
bad_node_cleanup_nil:
kfree(nil);
bad_node_cleanup_tree:
kfree(tree);
bad_tree:
return NULL;
}
/* *
* FUNC_ROTATE - rotates as described in "Introduction to Algorithm".
*
* For example, FUNC_ROTATE(rb_left_rotate, left, right) can be expaned to a
* left-rotate function, which requires an red-black 'tree' and a node 'x'
* to be rotated on. Basically, this function, named rb_left_rotate, makes the
* parent of 'x' be the left child of 'x', 'x' the parent of its parent before
* rotation and finally fixes other nodes accordingly.
*
* FUNC_ROTATE(xx, left, right) means left-rotate,
* and FUNC_ROTATE(xx, right, left) means right-rotate.
* */
#define FUNC_ROTATE(func_name, _left, _right) \
static void \
func_name(rb_tree *tree, rb_node *x) { \
rb_node *nil = tree->nil, *y = x->_right; \
assert(x != tree->root && x != nil && y != nil); \
x->_right = y->_left; \
if (y->_left != nil) { \
y->_left->parent = x; \
} \
y->parent = x->parent; \
if (x == x->parent->_left) { \
x->parent->_left = y; \
} \
else { \
x->parent->_right = y; \
} \
y->_left = x; \
x->parent = y; \
assert(!(nil->red)); \
}
FUNC_ROTATE(rb_left_rotate, left, right);
FUNC_ROTATE(rb_right_rotate, right, left);
#undef FUNC_ROTATE
#define COMPARE(tree, node1, node2) \
((tree))->compare((node1), (node2))
/* *
* rb_insert_binary - insert @node to red-black @tree as if it were
* a regular binary tree. This function is only intended to be called
* by function rb_insert.
* */
static inline void
rb_insert_binary(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node, *nil = tree->nil, *root = tree->root;
z->left = z->right = nil;
y = root, x = y->left;
while (x != nil) {
y = x;
x = (COMPARE(tree, x, node) > 0) ? x->left : x->right;
}
z->parent = y;
if (y == root || COMPARE(tree, y, z) > 0) {
y->left = z;
}
else {
y->right = z;
}
}
/* rb_insert - insert a node to red-black tree */
void
rb_insert(rb_tree *tree, rb_node *node) {
rb_insert_binary(tree, node);
node->red = 1;
rb_node *x = node, *y;
#define RB_INSERT_SUB(_left, _right) \
do { \
y = x->parent->parent->_right; \
if (y->red) { \
x->parent->red = 0; \
y->red = 0; \
x->parent->parent->red = 1; \
x = x->parent->parent; \
} \
else { \
if (x == x->parent->_right) { \
x = x->parent; \
rb_##_left##_rotate(tree, x); \
} \
x->parent->red = 0; \
x->parent->parent->red = 1; \
rb_##_right##_rotate(tree, x->parent->parent); \
} \
} while (0)
while (x->parent->red) {
if (x->parent == x->parent->parent->left) {
RB_INSERT_SUB(left, right);
}
else {
RB_INSERT_SUB(right, left);
}
}
tree->root->left->red = 0;
assert(!(tree->nil->red) && !(tree->root->red));
#undef RB_INSERT_SUB
}
/* *
* rb_tree_successor - returns the successor of @node, or nil
* if no successor exists. Make sure that @node must belong to @tree,
* and this function should only be called by rb_node_prev.
* */
static inline rb_node *
rb_tree_successor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->right) != nil) {
while (y->left != nil) {
y = y->left;
}
return y;
}
else {
y = x->parent;
while (x == y->right) {
x = y, y = y->parent;
}
if (y == tree->root) {
return nil;
}
return y;
}
}
/* *
* rb_tree_predecessor - returns the predecessor of @node, or nil
* if no predecessor exists, likes rb_tree_successor.
* */
static inline rb_node *
rb_tree_predecessor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->left) != nil) {
while (y->right != nil) {
y = y->right;
}
return y;
}
else {
y = x->parent;
while (x == y->left) {
if (y == tree->root) {
return nil;
}
x = y, y = y->parent;
}
return y;
}
}
/* *
* rb_search - returns a node with value 'equal' to @key (according to
* function @compare). If there're multiple nodes with value 'equal' to @key,
* the functions returns the one highest in the tree.
* */
rb_node *
rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key) {
rb_node *nil = tree->nil, *node = tree->root->left;
int r;
while (node != nil && (r = compare(node, key)) != 0) {
node = (r > 0) ? node->left : node->right;
}
return (node != nil) ? node : NULL;
}
/* *
* rb_delete_fixup - performs rotations and changes colors to restore
* red-black properties after a node is deleted.
* */
static void
rb_delete_fixup(rb_tree *tree, rb_node *node) {
rb_node *x = node, *w, *root = tree->root->left;
#define RB_DELETE_FIXUP_SUB(_left, _right) \
do { \
w = x->parent->_right; \
if (w->red) { \
w->red = 0; \
x->parent->red = 1; \
rb_##_left##_rotate(tree, x->parent); \
w = x->parent->_right; \
} \
if (!w->_left->red && !w->_right->red) { \
w->red = 1; \
x = x->parent; \
} \
else { \
if (!w->_right->red) { \
w->_left->red = 0; \
w->red = 1; \
rb_##_right##_rotate(tree, w); \
w = x->parent->_right; \
} \
w->red = x->parent->red; \
x->parent->red = 0; \
w->_right->red = 0; \
rb_##_left##_rotate(tree, x->parent); \
x = root; \
} \
} while (0)
while (x != root && !x->red) {
if (x == x->parent->left) {
RB_DELETE_FIXUP_SUB(left, right);
}
else {
RB_DELETE_FIXUP_SUB(right, left);
}
}
x->red = 0;
#undef RB_DELETE_FIXUP_SUB
}
/* *
* rb_delete - deletes @node from @tree, and calls rb_delete_fixup to
* restore red-black properties.
* */
void
rb_delete(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node;
rb_node *nil = tree->nil, *root = tree->root;
y = (z->left == nil || z->right == nil) ? z : rb_tree_successor(tree, z);
x = (y->left != nil) ? y->left : y->right;
assert(y != root && y != nil);
x->parent = y->parent;
if (y == y->parent->left) {
y->parent->left = x;
}
else {
y->parent->right = x;
}
bool need_fixup = !(y->red);
if (y != z) {
if (z == z->parent->left) {
z->parent->left = y;
}
else {
z->parent->right = y;
}
z->left->parent = z->right->parent = y;
*y = *z;
}
if (need_fixup) {
rb_delete_fixup(tree, x);
}
}
/* rb_tree_destroy - destroy a tree and free memory */
void
rb_tree_destroy(rb_tree *tree) {
kfree(tree->root);
kfree(tree->nil);
kfree(tree);
}
/* *
* rb_node_prev - returns the predecessor node of @node in @tree,
* or 'NULL' if no predecessor exists.
* */
rb_node *
rb_node_prev(rb_tree *tree, rb_node *node) {
rb_node *prev = rb_tree_predecessor(tree, node);
return (prev != tree->nil) ? prev : NULL;
}
/* *
* rb_node_next - returns the successor node of @node in @tree,
* or 'NULL' if no successor exists.
* */
rb_node *
rb_node_next(rb_tree *tree, rb_node *node) {
rb_node *next = rb_tree_successor(tree, node);
return (next != tree->nil) ? next : NULL;
}
/* rb_node_root - returns the root node of a @tree, or 'NULL' if tree is empty */
rb_node *
rb_node_root(rb_tree *tree) {
rb_node *node = tree->root->left;
return (node != tree->nil) ? node : NULL;
}
/* rb_node_left - gets the left child of @node, or 'NULL' if no such node */
rb_node *
rb_node_left(rb_tree *tree, rb_node *node) {
rb_node *left = node->left;
return (left != tree->nil) ? left : NULL;
}
/* rb_node_right - gets the right child of @node, or 'NULL' if no such node */
rb_node *
rb_node_right(rb_tree *tree, rb_node *node) {
rb_node *right = node->right;
return (right != tree->nil) ? right : NULL;
}
int
check_tree(rb_tree *tree, rb_node *node) {
rb_node *nil = tree->nil;
if (node == nil) {
assert(!node->red);
return 1;
}
if (node->left != nil) {
assert(COMPARE(tree, node, node->left) >= 0);
assert(node->left->parent == node);
}
if (node->right != nil) {
assert(COMPARE(tree, node, node->right) <= 0);
assert(node->right->parent == node);
}
if (node->red) {
assert(!node->left->red && !node->right->red);
}
int hb_left = check_tree(tree, node->left);
int hb_right = check_tree(tree, node->right);
assert(hb_left == hb_right);
int hb = hb_left;
if (!node->red) {
hb ++;
}
return hb;
}
static void *
check_safe_kmalloc(size_t size) {
void *ret = kmalloc(size);
assert(ret != NULL);
return ret;
}
struct check_data {
long data;
rb_node rb_link;
};
#define rbn2data(node) \
(to_struct(node, struct check_data, rb_link))
static inline int
check_compare1(rb_node *node1, rb_node *node2) {
return rbn2data(node1)->data - rbn2data(node2)->data;
}
static inline int
check_compare2(rb_node *node, void *key) {
return rbn2data(node)->data - (long)key;
}
void
check_rb_tree(void) {
rb_tree *tree = rb_tree_create(check_compare1);
assert(tree != NULL);
rb_node *nil = tree->nil, *root = tree->root;
assert(!nil->red && root->left == nil);
int total = 1000;
struct check_data **all = check_safe_kmalloc(sizeof(struct check_data *) * total);
long i;
for (i = 0; i < total; i ++) {
all[i] = check_safe_kmalloc(sizeof(struct check_data));
all[i]->data = i;
}
int *mark = check_safe_kmalloc(sizeof(int) * total);
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
int j = (rand() % (total - i)) + i;
struct check_data *z = all[i];
all[i] = all[j];
all[j] = z;
}
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_node *node;
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)(all[i]->data));
assert(node != NULL && node == &(all[i]->rb_link));
}
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)i);
assert(node != NULL && rbn2data(node)->data == i);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(!nil->red && root->left == nil);
long max = 32;
if (max > total) {
max = total;
}
for (i = 0; i < max; i ++) {
all[i]->data = max;
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
for (i = 0; i < max; i ++) {
node = rb_search(tree, check_compare2, (void *)max);
assert(node != NULL && rbn2data(node)->data == max);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(rb_tree_empty(tree));
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_tree_destroy(tree);
for (i = 0; i < total; i ++) {
kfree(all[i]);
}
kfree(mark);
kfree(all);
}

+ 0
- 32
code/lab6/kern/libs/rb_tree.h View File

@ -1,32 +0,0 @@
#ifndef __KERN_LIBS_RB_TREE_H__
#define __KERN_LIBS_RB_TREE_H__
#include <defs.h>
typedef struct rb_node {
bool red; // if red = 0, it's a black node
struct rb_node *parent;
struct rb_node *left, *right;
} rb_node;
typedef struct rb_tree {
// compare function should return -1 if *node1 < *node2, 1 if *node1 > *node2, and 0 otherwise
int (*compare)(rb_node *node1, rb_node *node2);
struct rb_node *nil, *root;
} rb_tree;
rb_tree *rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2));
void rb_tree_destroy(rb_tree *tree);
void rb_insert(rb_tree *tree, rb_node *node);
void rb_delete(rb_tree *tree, rb_node *node);
rb_node *rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key);
rb_node *rb_node_prev(rb_tree *tree, rb_node *node);
rb_node *rb_node_next(rb_tree *tree, rb_node *node);
rb_node *rb_node_root(rb_tree *tree);
rb_node *rb_node_left(rb_tree *tree, rb_node *node);
rb_node *rb_node_right(rb_tree *tree, rb_node *node);
void check_rb_tree(void);
#endif /* !__KERN_LIBS_RBTREE_H__ */

+ 253
- 583
code/lab6/kern/mm/kmalloc.c View File

@ -6,635 +6,305 @@
#include <sync.h>
#include <pmm.h>
#include <stdio.h>
#include <rb_tree.h>
/* The slab allocator used in ucore is based on an algorithm first introduced by
Jeff Bonwick for the SunOS operating system. The paper can be download from
http://citeseer.ist.psu.edu/bonwick94slab.html
An implementation of the Slab Allocator as described in outline in;
UNIX Internals: The New Frontiers by Uresh Vahalia
Pub: Prentice Hall ISBN 0-13-101908-2
Within a kernel, a considerable amount of memory is allocated for a finite set
of objects such as file descriptors and other common structures. Jeff found that
the amount of time required to initialize a regular object in the kernel exceeded
the amount of time required to allocate and deallocate it. His conclusion was
that instead of freeing the memory back to a global pool, he would have the memory
remain initialized for its intended purpose.
In our simple slab implementation, the the high-level organization of the slab
structures is simplied. At the highest level is an array slab_cache[SLAB_CACHE_NUM],
and each array element is a slab_cache which has slab chains. Each slab_cache has
two list, one list chains the full allocated slab, and another list chains the notfull
allocated(maybe empty) slab. And each slab has fixed number(2^n) of pages. In each
slab, there are a lot of objects (such as ) with same fixed size(32B ~ 128KB).
+----------------------------------+
| slab_cache[0] for 0~32B obj |
+----------------------------------+
| slab_cache[1] for 33B~64B obj |-->lists for slabs
+----------------------------------+ |
| slab_cache[2] for 65B~128B obj | |
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
+----------------------------------+ |
| slab_cache[12]for 64KB~128KB obj | |
+----------------------------------+ |
|
slabs_full/slabs_not +---------------------+
-<-----------<----------<-+
| | |
slab1 slab2 slab3...
|
|-------|-------|
pages1 pages2 pages3...
|
|
|
slab_t+n*bufctl_t+obj1-obj2-obj3...objn (the size of obj is small)
|
OR
|
obj1-obj2-obj3...objn WITH slab_t+n*bufctl_t in another slab (the size of obj is BIG)
The important functions are:
kmem_cache_grow(kmem_cache_t *cachep)
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp)
kmalloc(size_t size): used by outside functions need dynamicly get memory
kfree(void *objp): used by outside functions need dynamicly release memory
*/
#define BUFCTL_END 0xFFFFFFFFL // the signature of the last bufctl
#define SLAB_LIMIT 0xFFFFFFFEL // the max value of obj number
typedef size_t kmem_bufctl_t; //the index of obj in slab
typedef struct slab_s {
list_entry_t slab_link; // the list entry linked to kmem_cache list
void *s_mem; // the kernel virtual address of the first obj in slab
size_t inuse; // the number of allocated objs
size_t offset; // the first obj's offset value in slab
kmem_bufctl_t free; // the first free obj's index in slab
} slab_t;
// get the slab address according to the link element (see list.h)
#define le2slab(le, member) \
to_struct((le), slab_t, member)
typedef struct kmem_cache_s kmem_cache_t;
struct kmem_cache_s {
list_entry_t slabs_full; // list for fully allocated slabs
list_entry_t slabs_notfull; // list for not-fully allocated slabs
size_t objsize; // the fixed size of obj
size_t num; // number of objs per slab
size_t offset; // this first obj's offset in slab
bool off_slab; // the control part of slab in slab or not.
/* order of pages per slab (2^n) */
size_t page_order;
kmem_cache_t *slab_cachep;
};
#define MIN_SIZE_ORDER 5 // 32
#define MAX_SIZE_ORDER 17 // 128k
#define SLAB_CACHE_NUM (MAX_SIZE_ORDER - MIN_SIZE_ORDER + 1)
/*
* SLOB Allocator: Simple List Of Blocks
*
* Matt Mackall <mpm@selenic.com> 12/30/03
*
* How SLOB works:
*
* The core of SLOB is a traditional K&R style heap allocator, with
* support for returning aligned objects. The granularity of this
* allocator is 8 bytes on x86, though it's perhaps possible to reduce
* this to 4 if it's deemed worth the effort. The slob heap is a
* singly-linked list of pages from __get_free_page, grown on demand
* and allocation from the heap is currently first-fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are 8-byte aligned and prepended with a 8-byte header.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* __get_free_pages directly so that it can return page-aligned blocks
* and keeps a linked list of such pages and their orders. These
* objects are detected in kfree() by their page alignment.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
* destructors for every SLAB allocation. Objects are returned with
* the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
* set, in which case the low-level allocator will fragment blocks to
* create the proper alignment. Again, objects of page-size or greater
* are allocated by calling __get_free_pages. As SLAB objects know
* their size, no separate size bookkeeping is necessary and there is
* essentially no allocation space overhead.
*/
//some helper
#define spin_lock_irqsave(l, f) local_intr_save(f)
#define spin_unlock_irqrestore(l, f) local_intr_restore(f)
typedef unsigned int gfp_t;
#ifndef PAGE_SIZE
#define PAGE_SIZE PGSIZE
#endif
#ifndef L1_CACHE_BYTES
#define L1_CACHE_BYTES 64
#endif
#ifndef ALIGN
#define ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
#endif
struct slob_block {
int units;
struct slob_block *next;
};
typedef struct slob_block slob_t;
static kmem_cache_t slab_cache[SLAB_CACHE_NUM];
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_ALIGN L1_CACHE_BYTES
static void init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align);
static void check_slab(void);
struct bigblock {
int order;
void *pages;
struct bigblock *next;
};
typedef struct bigblock bigblock_t;
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
//slab_init - call init_kmem_cache function to reset the slab_cache array
static void
slab_init(void) {
size_t i;
//the align bit for obj in slab. 2^n could be better for performance
size_t align = 16;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
init_kmem_cache(slab_cache + i, 1 << (i + MIN_SIZE_ORDER), align);
}
check_slab();
}
inline void
kmalloc_init(void) {
slab_init();
cprintf("kmalloc_init() succeeded!\n");
}
//slab_allocated - summary the total size of allocated objs
static size_t
slab_allocated(void) {
size_t total = 0;
int i;
bool intr_flag;
local_intr_save(intr_flag);
{
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
list_entry_t *list, *le;
list = le = &(cachep->slabs_full);
while ((le = list_next(le)) != list) {
total += cachep->num * cachep->objsize;
}
list = le = &(cachep->slabs_notfull);
while ((le = list_next(le)) != list) {
slab_t *slabp = le2slab(le, slab_link);
total += slabp->inuse * cachep->objsize;
}
}
}
local_intr_restore(intr_flag);
return total;
static void* __slob_get_free_pages(gfp_t gfp, int order)
{
struct Page * page = alloc_pages(1 << order);
if(!page)
return NULL;
return page2kva(page);
}
inline size_t
kallocated(void) {
return slab_allocated();
}
#define __slob_get_free_page(gfp) __slob_get_free_pages(gfp, 0)
// slab_mgmt_size - get the size of slab control area (slab_t+num*kmem_bufctl_t)
static size_t
slab_mgmt_size(size_t num, size_t align) {
return ROUNDUP(sizeof(slab_t) + num * sizeof(kmem_bufctl_t), align);
static inline void __slob_free_pages(unsigned long kva, int order)
{
free_pages(kva2page(kva), 1 << order);
}
// cacahe_estimate - estimate the number of objs in a slab
static void
cache_estimate(size_t order, size_t objsize, size_t align, bool off_slab, size_t *remainder, size_t *num) {
size_t nr_objs, mgmt_size;
size_t slab_size = (PGSIZE << order);
if (off_slab) {
mgmt_size = 0;
nr_objs = slab_size / objsize;
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
}
else {
nr_objs = (slab_size - sizeof(slab_t)) / (objsize + sizeof(kmem_bufctl_t));
while (slab_mgmt_size(nr_objs, align) + nr_objs * objsize > slab_size) {
nr_objs --;
}
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
mgmt_size = slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
*remainder = slab_size - nr_objs * objsize - mgmt_size;
static void slob_free(void *b, int size);
static void *slob_alloc(size_t size, gfp_t gfp, int align)
{
assert( (size + SLOB_UNIT) < PAGE_SIZE );
slob_t *prev, *cur, *aligned = 0;
int delta = 0, units = SLOB_UNITS(size);
unsigned long flags;
spin_lock_irqsave(&slob_lock, flags);
prev = slobfree;
for (cur = prev->next; ; prev = cur, cur = cur->next) {
if (align) {
aligned = (slob_t *)ALIGN((unsigned long)cur, align);
delta = aligned - cur;
}
if (cur->units >= units + delta) { /* room enough? */
if (delta) { /* need to fragment head to align? */
aligned->units = cur->units - delta;
aligned->next = cur->next;
cur->next = aligned;
cur->units = delta;
prev = cur;
cur = aligned;
}
if (cur->units == units) /* exact fit? */
prev->next = cur->next; /* unlink */
else { /* fragment */
prev->next = cur + units;
prev->next->units = cur->units - units;
prev->next->next = cur->next;
cur->units = units;
}
slobfree = prev;
spin_unlock_irqrestore(&slob_lock, flags);
return cur;
}
if (cur == slobfree) {
spin_unlock_irqrestore(&slob_lock, flags);
if (size == PAGE_SIZE) /* trying to shrink arena? */
return 0;
cur = (slob_t *)__slob_get_free_page(gfp);
if (!cur)
return 0;
slob_free(cur, PAGE_SIZE);
spin_lock_irqsave(&slob_lock, flags);
cur = slobfree;
}
}
}
// calculate_slab_order - estimate the size(4K~4M) of slab
// paramemters:
// cachep: the slab_cache
// objsize: the size of obj
// align: align bit for objs
// off_slab: the control part of slab in slab or not
// left_over: the size of can not be used area in slab
static void
calculate_slab_order(kmem_cache_t *cachep, size_t objsize, size_t align, bool off_slab, size_t *left_over) {
size_t order;
for (order = 0; order <= KMALLOC_MAX_ORDER; order ++) {
size_t num, remainder;
cache_estimate(order, objsize, align, off_slab, &remainder, &num);
if (num != 0) {
if (off_slab) {
size_t off_slab_limit = objsize - sizeof(slab_t);
off_slab_limit /= sizeof(kmem_bufctl_t);
if (num > off_slab_limit) {
panic("off_slab: objsize = %d, num = %d.", objsize, num);
}
}
if (remainder * 8 <= (PGSIZE << order)) {
cachep->num = num;
cachep->page_order = order;
if (left_over != NULL) {
*left_over = remainder;
}
return ;
}
}
}
panic("calculate_slab_over: failed.");
}
static void slob_free(void *block, int size)
{
slob_t *cur, *b = (slob_t *)block;
unsigned long flags;
// getorder - find order, should satisfy n <= minest 2^order
static inline size_t
getorder(size_t n) {
size_t order = MIN_SIZE_ORDER, order_size = (1 << order);
for (; order <= MAX_SIZE_ORDER; order ++, order_size <<= 1) {
if (n <= order_size) {
return order;
}
}
panic("getorder failed. %d\n", n);
}
if (!block)
return;
// init_kmem_cache - initial a slab_cache cachep according to the obj with the size = objsize
static void
init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align) {
list_init(&(cachep->slabs_full));
list_init(&(cachep->slabs_notfull));
if (size)
b->units = SLOB_UNITS(size);
objsize = ROUNDUP(objsize, align);
cachep->objsize = objsize;
cachep->off_slab = (objsize >= (PGSIZE >> 3));
/* Find reinsertion point */
spin_lock_irqsave(&slob_lock, flags);
for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
if (cur >= cur->next && (b > cur || b < cur->next))
break;
size_t left_over;
calculate_slab_order(cachep, objsize, align, cachep->off_slab, &left_over);
if (b + b->units == cur->next) {
b->units += cur->next->units;
b->next = cur->next->next;
} else
b->next = cur->next;
assert(cachep->num > 0);
if (cur + cur->units == b) {
cur->units += b->units;
cur->next = b->next;
} else
cur->next = b;
size_t mgmt_size = slab_mgmt_size(cachep->num, align);
slobfree = cur;
if (cachep->off_slab && left_over >= mgmt_size) {
cachep->off_slab = 0;
}
if (cachep->off_slab) {
cachep->offset = 0;
cachep->slab_cachep = slab_cache + (getorder(mgmt_size) - MIN_SIZE_ORDER);
}
else {
cachep->offset = mgmt_size;
}
spin_unlock_irqrestore(&slob_lock, flags);
}
static void *kmem_cache_alloc(kmem_cache_t *cachep);
#define slab_bufctl(slabp) \
((kmem_bufctl_t*)(((slab_t *)(slabp)) + 1))
// kmem_cache_slabmgmt - get the address of a slab according to page
// - and initialize the slab according to cachep
static slab_t *
kmem_cache_slabmgmt(kmem_cache_t *cachep, struct Page *page) {
void *objp = page2kva(page);
slab_t *slabp;
if (cachep->off_slab) {
if ((slabp = kmem_cache_alloc(cachep->slab_cachep)) == NULL) {
return NULL;
}
}
else {
slabp = page2kva(page);
}
slabp->inuse = 0;
slabp->offset = cachep->offset;
slabp->s_mem = objp + cachep->offset;
return slabp;
}
#define SET_PAGE_CACHE(page, cachep) \
do { \
struct Page *__page = (struct Page *)(page); \
kmem_cache_t **__cachepp = (kmem_cache_t **)&(__page->page_link.next); \
*__cachepp = (kmem_cache_t *)(cachep); \
} while (0)
#define SET_PAGE_SLAB(page, slabp) \
do { \
struct Page *__page = (struct Page *)(page); \
slab_t **__cachepp = (slab_t **)&(__page->page_link.prev); \
*__cachepp = (slab_t *)(slabp); \
} while (0)
// kmem_cache_grow - allocate a new slab by calling alloc_pages
// - set control area in the new slab
static bool
kmem_cache_grow(kmem_cache_t *cachep) {
struct Page *page = alloc_pages(1 << cachep->page_order);
if (page == NULL) {
goto failed;
}
slab_t *slabp;
if ((slabp = kmem_cache_slabmgmt(cachep, page)) == NULL) {
goto oops;
}
size_t order_size = (1 << cachep->page_order);
do {
//setup this page in the free list (see memlayout.h: struct page)???
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
//this page is used for slab
SetPageSlab(page);
page ++;
} while (-- order_size);
int i;
for (i = 0; i < cachep->num; i ++) {
slab_bufctl(slabp)[i] = i + 1;
}
slab_bufctl(slabp)[cachep->num - 1] = BUFCTL_END;
slabp->free = 0;
bool intr_flag;
local_intr_save(intr_flag);
{
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
local_intr_restore(intr_flag);
return 1;
oops:
free_pages(page, 1 << cachep->page_order);
failed:
return 0;
}
// kmem_cache_alloc_one - allocate a obj in a slab
static void *
kmem_cache_alloc_one(kmem_cache_t *cachep, slab_t *slabp) {
slabp->inuse ++;
void *objp = slabp->s_mem + slabp->free * cachep->objsize;
slabp->free = slab_bufctl(slabp)[slabp->free];
if (slabp->free == BUFCTL_END) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_full), &(slabp->slab_link));
}
return objp;
void check_slob(void) {
cprintf("check_slob() success\n");
}
// kmem_cache_alloc - call kmem_cache_alloc_one function to allocate a obj
// - if no free obj, try to allocate a slab
static void *
kmem_cache_alloc(kmem_cache_t *cachep) {
void *objp;
bool intr_flag;
try_again:
local_intr_save(intr_flag);
if (list_empty(&(cachep->slabs_notfull))) {
goto alloc_new_slab;
}
slab_t *slabp = le2slab(list_next(&(cachep->slabs_notfull)), slab_link);
objp = kmem_cache_alloc_one(cachep, slabp);
local_intr_restore(intr_flag);
return objp;
alloc_new_slab:
local_intr_restore(intr_flag);
if (kmem_cache_grow(cachep)) {
goto try_again;
}
return NULL;
void
slob_init(void) {
cprintf("use SLOB allocator\n");
check_slob();
}
// kmalloc - simple interface used by outside functions
// - to allocate a free memory using kmem_cache_alloc function
void *
kmalloc(size_t size) {
assert(size > 0);
size_t order = getorder(size);
if (order > MAX_SIZE_ORDER) {
return NULL;
}
return kmem_cache_alloc(slab_cache + (order - MIN_SIZE_ORDER));
inline void
kmalloc_init(void) {
slob_init();
cprintf("kmalloc_init() succeeded!\n");
}
static void kmem_cache_free(kmem_cache_t *cachep, void *obj);
// kmem_slab_destroy - call free_pages & kmem_cache_free to free a slab
static void
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp) {
struct Page *page = kva2page(slabp->s_mem - slabp->offset);
struct Page *p = page;
size_t order_size = (1 << cachep->page_order);
do {
assert(PageSlab(p));
ClearPageSlab(p);
p ++;
} while (-- order_size);
free_pages(page, 1 << cachep->page_order);
if (cachep->off_slab) {
kmem_cache_free(cachep->slab_cachep, slabp);
}
size_t
slob_allocated(void) {
return 0;
}
// kmem_cache_free_one - free an obj in a slab
// - if slab->inuse==0, then free the slab
static void
kmem_cache_free_one(kmem_cache_t *cachep, slab_t *slabp, void *objp) {
//should not use divide operator ???
size_t objnr = (objp - slabp->s_mem) / cachep->objsize;
slab_bufctl(slabp)[objnr] = slabp->free;
slabp->free = objnr;
slabp->inuse --;
if (slabp->inuse == 0) {
list_del(&(slabp->slab_link));
kmem_slab_destroy(cachep, slabp);
}
else if (slabp->inuse == cachep->num -1 ) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
size_t
kallocated(void) {
return slob_allocated();
}
#define GET_PAGE_CACHE(page) \
(kmem_cache_t *)((page)->page_link.next)
#define GET_PAGE_SLAB(page) \
(slab_t *)((page)->page_link.prev)
// kmem_cache_free - call kmem_cache_free_one function to free an obj
static void
kmem_cache_free(kmem_cache_t *cachep, void *objp) {
bool intr_flag;
struct Page *page = kva2page(objp);
if (!PageSlab(page)) {
panic("not a slab page %08x\n", objp);
}
local_intr_save(intr_flag);
{
kmem_cache_free_one(cachep, GET_PAGE_SLAB(page), objp);
}
local_intr_restore(intr_flag);
static int find_order(int size)
{
int order = 0;
for ( ; size > 4096 ; size >>=1)
order++;
return order;
}
// kfree - simple interface used by ooutside functions to free an obj
void
kfree(void *objp) {
kmem_cache_free(GET_PAGE_CACHE(kva2page(objp)), objp);
static void *__kmalloc(size_t size, gfp_t gfp)
{
slob_t *m;
bigblock_t *bb;
unsigned long flags;
if (size < PAGE_SIZE - SLOB_UNIT) {
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
return m ? (void *)(m + 1) : 0;
}
bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
if (!bb)
return 0;
bb->order = find_order(size);
bb->pages = (void *)__slob_get_free_pages(gfp, bb->order);
if (bb->pages) {
spin_lock_irqsave(&block_lock, flags);
bb->next = bigblocks;
bigblocks = bb;
spin_unlock_irqrestore(&block_lock, flags);
return bb->pages;
}
slob_free(bb, sizeof(bigblock_t));
return 0;
}
static inline void
check_slab_empty(void) {
int i;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
assert(list_empty(&(cachep->slabs_full)));
assert(list_empty(&(cachep->slabs_notfull)));
}
void *
kmalloc(size_t size)
{
return __kmalloc(size, 0);
}
void
check_slab(void) {
int i;
void *v0, *v1;
size_t nr_free_pages_store = nr_free_pages();
size_t kernel_allocated_store = slab_allocated();
/* slab must be empty now */
check_slab_empty();
assert(slab_allocated() == 0);
kmem_cache_t *cachep0, *cachep1;
cachep0 = slab_cache;
assert(cachep0->objsize == 32 && cachep0->num > 1 && !cachep0->off_slab);
assert((v0 = kmalloc(16)) != NULL);
slab_t *slabp0, *slabp1;
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
assert(slabp0->inuse == 1 && list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
struct Page *p0, *p1;
size_t order_size;
p0 = kva2page(slabp0->s_mem - slabp0->offset), p1 = p0;
order_size = (1 << cachep0->page_order);
for (i = 0; i < cachep0->page_order; i ++, p1 ++) {
assert(PageSlab(p1));
assert(GET_PAGE_CACHE(p1) == cachep0 && GET_PAGE_SLAB(p1) == slabp0);
}
assert(v0 == slabp0->s_mem);
assert((v1 = kmalloc(16)) != NULL && v1 == v0 + 32);
kfree(v0);
assert(slabp0->free == 0);
kfree(v1);
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->page_order; i ++, p0 ++) {
assert(!PageSlab(p0));
}
v0 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
for (i = 0; i < cachep0->num - 1; i ++) {
kmalloc(16);
}
assert(slabp0->inuse == cachep0->num);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
v1 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
kfree(v0);
assert(list_empty(&(cachep0->slabs_full)));
assert(list_next(&(slabp0->slab_link)) == &(slabp1->slab_link)
|| list_next(&(slabp1->slab_link)) == &(slabp0->slab_link));
kfree(v1);
assert(!list_empty(&(cachep0->slabs_notfull)));
assert(list_next(&(cachep0->slabs_notfull)) == &(slabp0->slab_link));
assert(list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
v1 = kmalloc(16);
assert(v1 == v0);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->num; i ++) {
kfree(v1 + i * cachep0->objsize);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
void kfree(void *block)
{
bigblock_t *bb, **last = &bigblocks;
unsigned long flags;
if (!block)
return;
if (!((unsigned long)block & (PAGE_SIZE-1))) {
/* might be on the big block list */
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
if (bb->pages == block) {
*last = bb->next;
spin_unlock_irqrestore(&block_lock, flags);
__slob_free_pages((unsigned long)block, bb->order);
slob_free(bb, sizeof(bigblock_t));
return;
}
}
spin_unlock_irqrestore(&block_lock, flags);
}
slob_free((slob_t *)block - 1, 0);
return;
}
cachep0 = slab_cache;
bool has_off_slab = 0;
for (i = 0; i < SLAB_CACHE_NUM; i ++, cachep0 ++) {
if (cachep0->off_slab) {
has_off_slab = 1;
cachep1 = cachep0->slab_cachep;
if (!cachep1->off_slab) {
break;
}
}
}
unsigned int ksize(const void *block)
{
bigblock_t *bb;
unsigned long flags;
if (!has_off_slab) {
goto check_pass;
}
if (!block)
return 0;
assert(cachep0->off_slab && !cachep1->off_slab);
assert(cachep1 < cachep0);
if (!((unsigned long)block & (PAGE_SIZE-1))) {
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; bb = bb->next)
if (bb->pages == block) {
spin_unlock_irqrestore(&slob_lock, flags);
return PAGE_SIZE << bb->order;
}
spin_unlock_irqrestore(&block_lock, flags);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
return ((slob_t *)block - 1)->units * SLOB_UNIT;
}
assert(list_empty(&(cachep1->slabs_full)));
assert(list_empty(&(cachep1->slabs_notfull)));
v0 = kmalloc(cachep0->objsize);
p0 = kva2page(v0);
assert(page2kva(p0) == v0);
if (cachep0->num == 1) {
assert(!list_empty(&(cachep0->slabs_full)));
slabp0 = le2slab(list_next(&(cachep0->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
}
assert(slabp0 != NULL);
if (cachep1->num == 1) {
assert(!list_empty(&(cachep1->slabs_full)));
slabp1 = le2slab(list_next(&(cachep1->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep1->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep1->slabs_notfull)), slab_link);
}
assert(slabp1 != NULL);
order_size = (1 << cachep0->page_order);
for (i = 0; i < order_size; i ++, p0 ++) {
assert(PageSlab(p0));
assert(GET_PAGE_CACHE(p0) == cachep0 && GET_PAGE_SLAB(p0) == slabp0);
}
kfree(v0);
check_pass:
check_rb_tree();
check_slab_empty();
assert(slab_allocated() == 0);
assert(nr_free_pages_store == nr_free_pages());
assert(kernel_allocated_store == slab_allocated());
cprintf("check_slab() succeeded!\n");
}

+ 3
- 3
code/lab6/kern/mm/kmalloc.h View File

@ -1,5 +1,5 @@
#ifndef __KERN_MM_SLAB_H__
#define __KERN_MM_SLAB_H__
#ifndef __KERN_MM_KMALLOC_H__
#define __KERN_MM_KMALLOC_H__
#include <defs.h>
@ -12,5 +12,5 @@ void kfree(void *objp);
size_t kallocated(void);
#endif /* !__KERN_MM_SLAB_H__ */
#endif /* !__KERN_MM_KMALLOC_H__ */

+ 0
- 5
code/lab6/kern/mm/memlayout.h View File

@ -156,11 +156,6 @@ typedef struct {
unsigned int nr_free; // # of free pages in this free list
} free_area_t;
/* for slab style kmalloc */
#define PG_slab 2 // page frame is included in a slab
#define SetPageSlab(page) set_bit(PG_slab, &((page)->flags))
#define ClearPageSlab(page) clear_bit(PG_slab, &((page)->flags))
#define PageSlab(page) test_bit(PG_slab, &((page)->flags))
#endif /* !__ASSEMBLER__ */

+ 0
- 4
code/lab6/kern/mm/vmm.c View File

@ -257,8 +257,6 @@ check_vmm(void) {
check_vma_struct();
check_pgfault();
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vmm() succeeded.\n");
}
@ -305,8 +303,6 @@ check_vma_struct(void) {
mm_destroy(mm);
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vma_struct() succeeded!\n");
}

+ 1
- 2
code/lab6/kern/process/proc.c View File

@ -812,8 +812,7 @@ init_main(void *arg) {
assert(nr_process == 2);
assert(list_next(&proc_list) == &(initproc->list_link));
assert(list_prev(&proc_list) == &(initproc->list_link));
assert(nr_free_pages_store == nr_free_pages());
assert(kernel_allocated_store == kallocated());
cprintf("init check memory pass.\n");
return 0;
}

+ 1
- 1
code/lab6/tools/grade.sh View File

@ -338,7 +338,7 @@ default_check() {
'PDE(001) fac00000-fb000000 00400000 -rw' \
' |-- PTE(000e0) faf00000-fafe0000 000e0000 urw' \
' |-- PTE(00001) fafeb000-fafec000 00001000 -rw' \
'check_slab() succeeded!' \
'check_slob() succeeded!' \
'check_vma_struct() succeeded!' \
'page fault at 0x00000100: K/W [no page found].' \
'check_pgfault() succeeded!' \

+ 0
- 528
code/lab7/kern/libs/rb_tree.c View File

@ -1,528 +0,0 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <kmalloc.h>
#include <rb_tree.h>
#include <assert.h>
/* rb_node_create - create a new rb_node */
static inline rb_node *
rb_node_create(void) {
return kmalloc(sizeof(rb_node));
}
/* rb_tree_empty - tests if tree is empty */
static inline bool
rb_tree_empty(rb_tree *tree) {
rb_node *nil = tree->nil, *root = tree->root;
return root->left == nil;
}
/* *
* rb_tree_create - creates a new red-black tree, the 'compare' function
* is required and returns 'NULL' if failed.
*
* Note that, root->left should always point to the node that is the root
* of the tree. And nil points to a 'NULL' node which should always be
* black and may have arbitrary children and parent node.
* */
rb_tree *
rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2)) {
assert(compare != NULL);
rb_tree *tree;
rb_node *nil, *root;
if ((tree = kmalloc(sizeof(rb_tree))) == NULL) {
goto bad_tree;
}
tree->compare = compare;
if ((nil = rb_node_create()) == NULL) {
goto bad_node_cleanup_tree;
}
nil->parent = nil->left = nil->right = nil;
nil->red = 0;
tree->nil = nil;
if ((root = rb_node_create()) == NULL) {
goto bad_node_cleanup_nil;
}
root->parent = root->left = root->right = nil;
root->red = 0;
tree->root = root;
return tree;
bad_node_cleanup_nil:
kfree(nil);
bad_node_cleanup_tree:
kfree(tree);
bad_tree:
return NULL;
}
/* *
* FUNC_ROTATE - rotates as described in "Introduction to Algorithm".
*
* For example, FUNC_ROTATE(rb_left_rotate, left, right) can be expaned to a
* left-rotate function, which requires an red-black 'tree' and a node 'x'
* to be rotated on. Basically, this function, named rb_left_rotate, makes the
* parent of 'x' be the left child of 'x', 'x' the parent of its parent before
* rotation and finally fixes other nodes accordingly.
*
* FUNC_ROTATE(xx, left, right) means left-rotate,
* and FUNC_ROTATE(xx, right, left) means right-rotate.
* */
#define FUNC_ROTATE(func_name, _left, _right) \
static void \
func_name(rb_tree *tree, rb_node *x) { \
rb_node *nil = tree->nil, *y = x->_right; \
assert(x != tree->root && x != nil && y != nil); \
x->_right = y->_left; \
if (y->_left != nil) { \
y->_left->parent = x; \
} \
y->parent = x->parent; \
if (x == x->parent->_left) { \
x->parent->_left = y; \
} \
else { \
x->parent->_right = y; \
} \
y->_left = x; \
x->parent = y; \
assert(!(nil->red)); \
}
FUNC_ROTATE(rb_left_rotate, left, right);
FUNC_ROTATE(rb_right_rotate, right, left);
#undef FUNC_ROTATE
#define COMPARE(tree, node1, node2) \
((tree))->compare((node1), (node2))
/* *
* rb_insert_binary - insert @node to red-black @tree as if it were
* a regular binary tree. This function is only intended to be called
* by function rb_insert.
* */
static inline void
rb_insert_binary(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node, *nil = tree->nil, *root = tree->root;
z->left = z->right = nil;
y = root, x = y->left;
while (x != nil) {
y = x;
x = (COMPARE(tree, x, node) > 0) ? x->left : x->right;
}
z->parent = y;
if (y == root || COMPARE(tree, y, z) > 0) {
y->left = z;
}
else {
y->right = z;
}
}
/* rb_insert - insert a node to red-black tree */
void
rb_insert(rb_tree *tree, rb_node *node) {
rb_insert_binary(tree, node);
node->red = 1;
rb_node *x = node, *y;
#define RB_INSERT_SUB(_left, _right) \
do { \
y = x->parent->parent->_right; \
if (y->red) { \
x->parent->red = 0; \
y->red = 0; \
x->parent->parent->red = 1; \
x = x->parent->parent; \
} \
else { \
if (x == x->parent->_right) { \
x = x->parent; \
rb_##_left##_rotate(tree, x); \
} \
x->parent->red = 0; \
x->parent->parent->red = 1; \
rb_##_right##_rotate(tree, x->parent->parent); \
} \
} while (0)
while (x->parent->red) {
if (x->parent == x->parent->parent->left) {
RB_INSERT_SUB(left, right);
}
else {
RB_INSERT_SUB(right, left);
}
}
tree->root->left->red = 0;
assert(!(tree->nil->red) && !(tree->root->red));
#undef RB_INSERT_SUB
}
/* *
* rb_tree_successor - returns the successor of @node, or nil
* if no successor exists. Make sure that @node must belong to @tree,
* and this function should only be called by rb_node_prev.
* */
static inline rb_node *
rb_tree_successor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->right) != nil) {
while (y->left != nil) {
y = y->left;
}
return y;
}
else {
y = x->parent;
while (x == y->right) {
x = y, y = y->parent;
}
if (y == tree->root) {
return nil;
}
return y;
}
}
/* *
* rb_tree_predecessor - returns the predecessor of @node, or nil
* if no predecessor exists, likes rb_tree_successor.
* */
static inline rb_node *
rb_tree_predecessor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->left) != nil) {
while (y->right != nil) {
y = y->right;
}
return y;
}
else {
y = x->parent;
while (x == y->left) {
if (y == tree->root) {
return nil;
}
x = y, y = y->parent;
}
return y;
}
}
/* *
* rb_search - returns a node with value 'equal' to @key (according to
* function @compare). If there're multiple nodes with value 'equal' to @key,
* the functions returns the one highest in the tree.
* */
rb_node *
rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key) {
rb_node *nil = tree->nil, *node = tree->root->left;
int r;
while (node != nil && (r = compare(node, key)) != 0) {
node = (r > 0) ? node->left : node->right;
}
return (node != nil) ? node : NULL;
}
/* *
* rb_delete_fixup - performs rotations and changes colors to restore
* red-black properties after a node is deleted.
* */
static void
rb_delete_fixup(rb_tree *tree, rb_node *node) {
rb_node *x = node, *w, *root = tree->root->left;
#define RB_DELETE_FIXUP_SUB(_left, _right) \
do { \
w = x->parent->_right; \
if (w->red) { \
w->red = 0; \
x->parent->red = 1; \
rb_##_left##_rotate(tree, x->parent); \
w = x->parent->_right; \
} \
if (!w->_left->red && !w->_right->red) { \
w->red = 1; \
x = x->parent; \
} \
else { \
if (!w->_right->red) { \
w->_left->red = 0; \
w->red = 1; \
rb_##_right##_rotate(tree, w); \
w = x->parent->_right; \
} \
w->red = x->parent->red; \
x->parent->red = 0; \
w->_right->red = 0; \
rb_##_left##_rotate(tree, x->parent); \
x = root; \
} \
} while (0)
while (x != root && !x->red) {
if (x == x->parent->left) {
RB_DELETE_FIXUP_SUB(left, right);
}
else {
RB_DELETE_FIXUP_SUB(right, left);
}
}
x->red = 0;
#undef RB_DELETE_FIXUP_SUB
}
/* *
* rb_delete - deletes @node from @tree, and calls rb_delete_fixup to
* restore red-black properties.
* */
void
rb_delete(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node;
rb_node *nil = tree->nil, *root = tree->root;
y = (z->left == nil || z->right == nil) ? z : rb_tree_successor(tree, z);
x = (y->left != nil) ? y->left : y->right;
assert(y != root && y != nil);
x->parent = y->parent;
if (y == y->parent->left) {
y->parent->left = x;
}
else {
y->parent->right = x;
}
bool need_fixup = !(y->red);
if (y != z) {
if (z == z->parent->left) {
z->parent->left = y;
}
else {
z->parent->right = y;
}
z->left->parent = z->right->parent = y;
*y = *z;
}
if (need_fixup) {
rb_delete_fixup(tree, x);
}
}
/* rb_tree_destroy - destroy a tree and free memory */
void
rb_tree_destroy(rb_tree *tree) {
kfree(tree->root);
kfree(tree->nil);
kfree(tree);
}
/* *
* rb_node_prev - returns the predecessor node of @node in @tree,
* or 'NULL' if no predecessor exists.
* */
rb_node *
rb_node_prev(rb_tree *tree, rb_node *node) {
rb_node *prev = rb_tree_predecessor(tree, node);
return (prev != tree->nil) ? prev : NULL;
}
/* *
* rb_node_next - returns the successor node of @node in @tree,
* or 'NULL' if no successor exists.
* */
rb_node *
rb_node_next(rb_tree *tree, rb_node *node) {
rb_node *next = rb_tree_successor(tree, node);
return (next != tree->nil) ? next : NULL;
}
/* rb_node_root - returns the root node of a @tree, or 'NULL' if tree is empty */
rb_node *
rb_node_root(rb_tree *tree) {
rb_node *node = tree->root->left;
return (node != tree->nil) ? node : NULL;
}
/* rb_node_left - gets the left child of @node, or 'NULL' if no such node */
rb_node *
rb_node_left(rb_tree *tree, rb_node *node) {
rb_node *left = node->left;
return (left != tree->nil) ? left : NULL;
}
/* rb_node_right - gets the right child of @node, or 'NULL' if no such node */
rb_node *
rb_node_right(rb_tree *tree, rb_node *node) {
rb_node *right = node->right;
return (right != tree->nil) ? right : NULL;
}
int
check_tree(rb_tree *tree, rb_node *node) {
rb_node *nil = tree->nil;
if (node == nil) {
assert(!node->red);
return 1;
}
if (node->left != nil) {
assert(COMPARE(tree, node, node->left) >= 0);
assert(node->left->parent == node);
}
if (node->right != nil) {
assert(COMPARE(tree, node, node->right) <= 0);
assert(node->right->parent == node);
}
if (node->red) {
assert(!node->left->red && !node->right->red);
}
int hb_left = check_tree(tree, node->left);
int hb_right = check_tree(tree, node->right);
assert(hb_left == hb_right);
int hb = hb_left;
if (!node->red) {
hb ++;
}
return hb;
}
static void *
check_safe_kmalloc(size_t size) {
void *ret = kmalloc(size);
assert(ret != NULL);
return ret;
}
struct check_data {
long data;
rb_node rb_link;
};
#define rbn2data(node) \
(to_struct(node, struct check_data, rb_link))
static inline int
check_compare1(rb_node *node1, rb_node *node2) {
return rbn2data(node1)->data - rbn2data(node2)->data;
}
static inline int
check_compare2(rb_node *node, void *key) {
return rbn2data(node)->data - (long)key;
}
void
check_rb_tree(void) {
rb_tree *tree = rb_tree_create(check_compare1);
assert(tree != NULL);
rb_node *nil = tree->nil, *root = tree->root;
assert(!nil->red && root->left == nil);
int total = 1000;
struct check_data **all = check_safe_kmalloc(sizeof(struct check_data *) * total);
long i;
for (i = 0; i < total; i ++) {
all[i] = check_safe_kmalloc(sizeof(struct check_data));
all[i]->data = i;
}
int *mark = check_safe_kmalloc(sizeof(int) * total);
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
int j = (rand() % (total - i)) + i;
struct check_data *z = all[i];
all[i] = all[j];
all[j] = z;
}
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_node *node;
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)(all[i]->data));
assert(node != NULL && node == &(all[i]->rb_link));
}
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)i);
assert(node != NULL && rbn2data(node)->data == i);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(!nil->red && root->left == nil);
long max = 32;
if (max > total) {
max = total;
}
for (i = 0; i < max; i ++) {
all[i]->data = max;
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
for (i = 0; i < max; i ++) {
node = rb_search(tree, check_compare2, (void *)max);
assert(node != NULL && rbn2data(node)->data == max);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(rb_tree_empty(tree));
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_tree_destroy(tree);
for (i = 0; i < total; i ++) {
kfree(all[i]);
}
kfree(mark);
kfree(all);
}

+ 0
- 32
code/lab7/kern/libs/rb_tree.h View File

@ -1,32 +0,0 @@
#ifndef __KERN_LIBS_RB_TREE_H__
#define __KERN_LIBS_RB_TREE_H__
#include <defs.h>
typedef struct rb_node {
bool red; // if red = 0, it's a black node
struct rb_node *parent;
struct rb_node *left, *right;
} rb_node;
typedef struct rb_tree {
// compare function should return -1 if *node1 < *node2, 1 if *node1 > *node2, and 0 otherwise
int (*compare)(rb_node *node1, rb_node *node2);
struct rb_node *nil, *root;
} rb_tree;
rb_tree *rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2));
void rb_tree_destroy(rb_tree *tree);
void rb_insert(rb_tree *tree, rb_node *node);
void rb_delete(rb_tree *tree, rb_node *node);
rb_node *rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key);
rb_node *rb_node_prev(rb_tree *tree, rb_node *node);
rb_node *rb_node_next(rb_tree *tree, rb_node *node);
rb_node *rb_node_root(rb_tree *tree);
rb_node *rb_node_left(rb_tree *tree, rb_node *node);
rb_node *rb_node_right(rb_tree *tree, rb_node *node);
void check_rb_tree(void);
#endif /* !__KERN_LIBS_RBTREE_H__ */

+ 253
- 583
code/lab7/kern/mm/kmalloc.c View File

@ -6,635 +6,305 @@
#include <sync.h>
#include <pmm.h>
#include <stdio.h>
#include <rb_tree.h>
/* The slab allocator used in ucore is based on an algorithm first introduced by
Jeff Bonwick for the SunOS operating system. The paper can be download from
http://citeseer.ist.psu.edu/bonwick94slab.html
An implementation of the Slab Allocator as described in outline in;
UNIX Internals: The New Frontiers by Uresh Vahalia
Pub: Prentice Hall ISBN 0-13-101908-2
Within a kernel, a considerable amount of memory is allocated for a finite set
of objects such as file descriptors and other common structures. Jeff found that
the amount of time required to initialize a regular object in the kernel exceeded
the amount of time required to allocate and deallocate it. His conclusion was
that instead of freeing the memory back to a global pool, he would have the memory
remain initialized for its intended purpose.
In our simple slab implementation, the the high-level organization of the slab
structures is simplied. At the highest level is an array slab_cache[SLAB_CACHE_NUM],
and each array element is a slab_cache which has slab chains. Each slab_cache has
two list, one list chains the full allocated slab, and another list chains the notfull
allocated(maybe empty) slab. And each slab has fixed number(2^n) of pages. In each
slab, there are a lot of objects (such as ) with same fixed size(32B ~ 128KB).
+----------------------------------+
| slab_cache[0] for 0~32B obj |
+----------------------------------+
| slab_cache[1] for 33B~64B obj |-->lists for slabs
+----------------------------------+ |
| slab_cache[2] for 65B~128B obj | |
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
+----------------------------------+ |
| slab_cache[12]for 64KB~128KB obj | |
+----------------------------------+ |
|
slabs_full/slabs_not +---------------------+
-<-----------<----------<-+
| | |
slab1 slab2 slab3...
|
|-------|-------|
pages1 pages2 pages3...
|
|
|
slab_t+n*bufctl_t+obj1-obj2-obj3...objn (the size of obj is small)
|
OR
|
obj1-obj2-obj3...objn WITH slab_t+n*bufctl_t in another slab (the size of obj is BIG)
The important functions are:
kmem_cache_grow(kmem_cache_t *cachep)
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp)
kmalloc(size_t size): used by outside functions need dynamicly get memory
kfree(void *objp): used by outside functions need dynamicly release memory
*/
#define BUFCTL_END 0xFFFFFFFFL // the signature of the last bufctl
#define SLAB_LIMIT 0xFFFFFFFEL // the max value of obj number
typedef size_t kmem_bufctl_t; //the index of obj in slab
typedef struct slab_s {
list_entry_t slab_link; // the list entry linked to kmem_cache list
void *s_mem; // the kernel virtual address of the first obj in slab
size_t inuse; // the number of allocated objs
size_t offset; // the first obj's offset value in slab
kmem_bufctl_t free; // the first free obj's index in slab
} slab_t;
// get the slab address according to the link element (see list.h)
#define le2slab(le, member) \
to_struct((le), slab_t, member)
typedef struct kmem_cache_s kmem_cache_t;
struct kmem_cache_s {
list_entry_t slabs_full; // list for fully allocated slabs
list_entry_t slabs_notfull; // list for not-fully allocated slabs
size_t objsize; // the fixed size of obj
size_t num; // number of objs per slab
size_t offset; // this first obj's offset in slab
bool off_slab; // the control part of slab in slab or not.
/* order of pages per slab (2^n) */
size_t page_order;
kmem_cache_t *slab_cachep;
};
#define MIN_SIZE_ORDER 5 // 32
#define MAX_SIZE_ORDER 17 // 128k
#define SLAB_CACHE_NUM (MAX_SIZE_ORDER - MIN_SIZE_ORDER + 1)
/*
* SLOB Allocator: Simple List Of Blocks
*
* Matt Mackall <mpm@selenic.com> 12/30/03
*
* How SLOB works:
*
* The core of SLOB is a traditional K&R style heap allocator, with
* support for returning aligned objects. The granularity of this
* allocator is 8 bytes on x86, though it's perhaps possible to reduce
* this to 4 if it's deemed worth the effort. The slob heap is a
* singly-linked list of pages from __get_free_page, grown on demand
* and allocation from the heap is currently first-fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are 8-byte aligned and prepended with a 8-byte header.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* __get_free_pages directly so that it can return page-aligned blocks
* and keeps a linked list of such pages and their orders. These
* objects are detected in kfree() by their page alignment.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
* destructors for every SLAB allocation. Objects are returned with
* the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
* set, in which case the low-level allocator will fragment blocks to
* create the proper alignment. Again, objects of page-size or greater
* are allocated by calling __get_free_pages. As SLAB objects know
* their size, no separate size bookkeeping is necessary and there is
* essentially no allocation space overhead.
*/
//some helper
#define spin_lock_irqsave(l, f) local_intr_save(f)
#define spin_unlock_irqrestore(l, f) local_intr_restore(f)
typedef unsigned int gfp_t;
#ifndef PAGE_SIZE
#define PAGE_SIZE PGSIZE
#endif
#ifndef L1_CACHE_BYTES
#define L1_CACHE_BYTES 64
#endif
#ifndef ALIGN
#define ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
#endif
struct slob_block {
int units;
struct slob_block *next;
};
typedef struct slob_block slob_t;
static kmem_cache_t slab_cache[SLAB_CACHE_NUM];
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_ALIGN L1_CACHE_BYTES
static void init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align);
static void check_slab(void);
struct bigblock {
int order;
void *pages;
struct bigblock *next;
};
typedef struct bigblock bigblock_t;
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
//slab_init - call init_kmem_cache function to reset the slab_cache array
static void
slab_init(void) {
size_t i;
//the align bit for obj in slab. 2^n could be better for performance
size_t align = 16;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
init_kmem_cache(slab_cache + i, 1 << (i + MIN_SIZE_ORDER), align);
}
check_slab();
}
inline void
kmalloc_init(void) {
slab_init();
cprintf("kmalloc_init() succeeded!\n");
}
//slab_allocated - summary the total size of allocated objs
static size_t
slab_allocated(void) {
size_t total = 0;
int i;
bool intr_flag;
local_intr_save(intr_flag);
{
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
list_entry_t *list, *le;
list = le = &(cachep->slabs_full);
while ((le = list_next(le)) != list) {
total += cachep->num * cachep->objsize;
}
list = le = &(cachep->slabs_notfull);
while ((le = list_next(le)) != list) {
slab_t *slabp = le2slab(le, slab_link);
total += slabp->inuse * cachep->objsize;
}
}
}
local_intr_restore(intr_flag);
return total;
static void* __slob_get_free_pages(gfp_t gfp, int order)
{
struct Page * page = alloc_pages(1 << order);
if(!page)
return NULL;
return page2kva(page);
}
inline size_t
kallocated(void) {
return slab_allocated();
}
#define __slob_get_free_page(gfp) __slob_get_free_pages(gfp, 0)
// slab_mgmt_size - get the size of slab control area (slab_t+num*kmem_bufctl_t)
static size_t
slab_mgmt_size(size_t num, size_t align) {
return ROUNDUP(sizeof(slab_t) + num * sizeof(kmem_bufctl_t), align);
static inline void __slob_free_pages(unsigned long kva, int order)
{
free_pages(kva2page(kva), 1 << order);
}
// cacahe_estimate - estimate the number of objs in a slab
static void
cache_estimate(size_t order, size_t objsize, size_t align, bool off_slab, size_t *remainder, size_t *num) {
size_t nr_objs, mgmt_size;
size_t slab_size = (PGSIZE << order);
if (off_slab) {
mgmt_size = 0;
nr_objs = slab_size / objsize;
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
}
else {
nr_objs = (slab_size - sizeof(slab_t)) / (objsize + sizeof(kmem_bufctl_t));
while (slab_mgmt_size(nr_objs, align) + nr_objs * objsize > slab_size) {
nr_objs --;
}
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
mgmt_size = slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
*remainder = slab_size - nr_objs * objsize - mgmt_size;
static void slob_free(void *b, int size);
static void *slob_alloc(size_t size, gfp_t gfp, int align)
{
assert( (size + SLOB_UNIT) < PAGE_SIZE );
slob_t *prev, *cur, *aligned = 0;
int delta = 0, units = SLOB_UNITS(size);
unsigned long flags;
spin_lock_irqsave(&slob_lock, flags);
prev = slobfree;
for (cur = prev->next; ; prev = cur, cur = cur->next) {
if (align) {
aligned = (slob_t *)ALIGN((unsigned long)cur, align);
delta = aligned - cur;
}
if (cur->units >= units + delta) { /* room enough? */
if (delta) { /* need to fragment head to align? */
aligned->units = cur->units - delta;
aligned->next = cur->next;
cur->next = aligned;
cur->units = delta;
prev = cur;
cur = aligned;
}
if (cur->units == units) /* exact fit? */
prev->next = cur->next; /* unlink */
else { /* fragment */
prev->next = cur + units;
prev->next->units = cur->units - units;
prev->next->next = cur->next;
cur->units = units;
}
slobfree = prev;
spin_unlock_irqrestore(&slob_lock, flags);
return cur;
}
if (cur == slobfree) {
spin_unlock_irqrestore(&slob_lock, flags);
if (size == PAGE_SIZE) /* trying to shrink arena? */
return 0;
cur = (slob_t *)__slob_get_free_page(gfp);
if (!cur)
return 0;
slob_free(cur, PAGE_SIZE);
spin_lock_irqsave(&slob_lock, flags);
cur = slobfree;
}
}
}
// calculate_slab_order - estimate the size(4K~4M) of slab
// paramemters:
// cachep: the slab_cache
// objsize: the size of obj
// align: align bit for objs
// off_slab: the control part of slab in slab or not
// left_over: the size of can not be used area in slab
static void
calculate_slab_order(kmem_cache_t *cachep, size_t objsize, size_t align, bool off_slab, size_t *left_over) {
size_t order;
for (order = 0; order <= KMALLOC_MAX_ORDER; order ++) {
size_t num, remainder;
cache_estimate(order, objsize, align, off_slab, &remainder, &num);
if (num != 0) {
if (off_slab) {
size_t off_slab_limit = objsize - sizeof(slab_t);
off_slab_limit /= sizeof(kmem_bufctl_t);
if (num > off_slab_limit) {
panic("off_slab: objsize = %d, num = %d.", objsize, num);
}
}
if (remainder * 8 <= (PGSIZE << order)) {
cachep->num = num;
cachep->page_order = order;
if (left_over != NULL) {
*left_over = remainder;
}
return ;
}
}
}
panic("calculate_slab_over: failed.");
}
static void slob_free(void *block, int size)
{
slob_t *cur, *b = (slob_t *)block;
unsigned long flags;
// getorder - find order, should satisfy n <= minest 2^order
static inline size_t
getorder(size_t n) {
size_t order = MIN_SIZE_ORDER, order_size = (1 << order);
for (; order <= MAX_SIZE_ORDER; order ++, order_size <<= 1) {
if (n <= order_size) {
return order;
}
}
panic("getorder failed. %d\n", n);
}
if (!block)
return;
// init_kmem_cache - initial a slab_cache cachep according to the obj with the size = objsize
static void
init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align) {
list_init(&(cachep->slabs_full));
list_init(&(cachep->slabs_notfull));
if (size)
b->units = SLOB_UNITS(size);
objsize = ROUNDUP(objsize, align);
cachep->objsize = objsize;
cachep->off_slab = (objsize >= (PGSIZE >> 3));
/* Find reinsertion point */
spin_lock_irqsave(&slob_lock, flags);
for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
if (cur >= cur->next && (b > cur || b < cur->next))
break;
size_t left_over;
calculate_slab_order(cachep, objsize, align, cachep->off_slab, &left_over);
if (b + b->units == cur->next) {
b->units += cur->next->units;
b->next = cur->next->next;
} else
b->next = cur->next;
assert(cachep->num > 0);
if (cur + cur->units == b) {
cur->units += b->units;
cur->next = b->next;
} else
cur->next = b;
size_t mgmt_size = slab_mgmt_size(cachep->num, align);
slobfree = cur;
if (cachep->off_slab && left_over >= mgmt_size) {
cachep->off_slab = 0;
}
if (cachep->off_slab) {
cachep->offset = 0;
cachep->slab_cachep = slab_cache + (getorder(mgmt_size) - MIN_SIZE_ORDER);
}
else {
cachep->offset = mgmt_size;
}
spin_unlock_irqrestore(&slob_lock, flags);
}
static void *kmem_cache_alloc(kmem_cache_t *cachep);
#define slab_bufctl(slabp) \
((kmem_bufctl_t*)(((slab_t *)(slabp)) + 1))
// kmem_cache_slabmgmt - get the address of a slab according to page
// - and initialize the slab according to cachep
static slab_t *
kmem_cache_slabmgmt(kmem_cache_t *cachep, struct Page *page) {
void *objp = page2kva(page);
slab_t *slabp;
if (cachep->off_slab) {
if ((slabp = kmem_cache_alloc(cachep->slab_cachep)) == NULL) {
return NULL;
}
}
else {
slabp = page2kva(page);
}
slabp->inuse = 0;
slabp->offset = cachep->offset;
slabp->s_mem = objp + cachep->offset;
return slabp;
}
#define SET_PAGE_CACHE(page, cachep) \
do { \
struct Page *__page = (struct Page *)(page); \
kmem_cache_t **__cachepp = (kmem_cache_t **)&(__page->page_link.next); \
*__cachepp = (kmem_cache_t *)(cachep); \
} while (0)
#define SET_PAGE_SLAB(page, slabp) \
do { \
struct Page *__page = (struct Page *)(page); \
slab_t **__cachepp = (slab_t **)&(__page->page_link.prev); \
*__cachepp = (slab_t *)(slabp); \
} while (0)
// kmem_cache_grow - allocate a new slab by calling alloc_pages
// - set control area in the new slab
static bool
kmem_cache_grow(kmem_cache_t *cachep) {
struct Page *page = alloc_pages(1 << cachep->page_order);
if (page == NULL) {
goto failed;
}
slab_t *slabp;
if ((slabp = kmem_cache_slabmgmt(cachep, page)) == NULL) {
goto oops;
}
size_t order_size = (1 << cachep->page_order);
do {
//setup this page in the free list (see memlayout.h: struct page)???
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
//this page is used for slab
SetPageSlab(page);
page ++;
} while (-- order_size);
int i;
for (i = 0; i < cachep->num; i ++) {
slab_bufctl(slabp)[i] = i + 1;
}
slab_bufctl(slabp)[cachep->num - 1] = BUFCTL_END;
slabp->free = 0;
bool intr_flag;
local_intr_save(intr_flag);
{
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
local_intr_restore(intr_flag);
return 1;
oops:
free_pages(page, 1 << cachep->page_order);
failed:
return 0;
}
// kmem_cache_alloc_one - allocate a obj in a slab
static void *
kmem_cache_alloc_one(kmem_cache_t *cachep, slab_t *slabp) {
slabp->inuse ++;
void *objp = slabp->s_mem + slabp->free * cachep->objsize;
slabp->free = slab_bufctl(slabp)[slabp->free];
if (slabp->free == BUFCTL_END) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_full), &(slabp->slab_link));
}
return objp;
void check_slob(void) {
cprintf("check_slob() success\n");
}
// kmem_cache_alloc - call kmem_cache_alloc_one function to allocate a obj
// - if no free obj, try to allocate a slab
static void *
kmem_cache_alloc(kmem_cache_t *cachep) {
void *objp;
bool intr_flag;
try_again:
local_intr_save(intr_flag);
if (list_empty(&(cachep->slabs_notfull))) {
goto alloc_new_slab;
}
slab_t *slabp = le2slab(list_next(&(cachep->slabs_notfull)), slab_link);
objp = kmem_cache_alloc_one(cachep, slabp);
local_intr_restore(intr_flag);
return objp;
alloc_new_slab:
local_intr_restore(intr_flag);
if (kmem_cache_grow(cachep)) {
goto try_again;
}
return NULL;
void
slob_init(void) {
cprintf("use SLOB allocator\n");
check_slob();
}
// kmalloc - simple interface used by outside functions
// - to allocate a free memory using kmem_cache_alloc function
void *
kmalloc(size_t size) {
assert(size > 0);
size_t order = getorder(size);
if (order > MAX_SIZE_ORDER) {
return NULL;
}
return kmem_cache_alloc(slab_cache + (order - MIN_SIZE_ORDER));
inline void
kmalloc_init(void) {
slob_init();
cprintf("kmalloc_init() succeeded!\n");
}
static void kmem_cache_free(kmem_cache_t *cachep, void *obj);
// kmem_slab_destroy - call free_pages & kmem_cache_free to free a slab
static void
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp) {
struct Page *page = kva2page(slabp->s_mem - slabp->offset);
struct Page *p = page;
size_t order_size = (1 << cachep->page_order);
do {
assert(PageSlab(p));
ClearPageSlab(p);
p ++;
} while (-- order_size);
free_pages(page, 1 << cachep->page_order);
if (cachep->off_slab) {
kmem_cache_free(cachep->slab_cachep, slabp);
}
size_t
slob_allocated(void) {
return 0;
}
// kmem_cache_free_one - free an obj in a slab
// - if slab->inuse==0, then free the slab
static void
kmem_cache_free_one(kmem_cache_t *cachep, slab_t *slabp, void *objp) {
//should not use divide operator ???
size_t objnr = (objp - slabp->s_mem) / cachep->objsize;
slab_bufctl(slabp)[objnr] = slabp->free;
slabp->free = objnr;
slabp->inuse --;
if (slabp->inuse == 0) {
list_del(&(slabp->slab_link));
kmem_slab_destroy(cachep, slabp);
}
else if (slabp->inuse == cachep->num -1 ) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
size_t
kallocated(void) {
return slob_allocated();
}
#define GET_PAGE_CACHE(page) \
(kmem_cache_t *)((page)->page_link.next)
#define GET_PAGE_SLAB(page) \
(slab_t *)((page)->page_link.prev)
// kmem_cache_free - call kmem_cache_free_one function to free an obj
static void
kmem_cache_free(kmem_cache_t *cachep, void *objp) {
bool intr_flag;
struct Page *page = kva2page(objp);
if (!PageSlab(page)) {
panic("not a slab page %08x\n", objp);
}
local_intr_save(intr_flag);
{
kmem_cache_free_one(cachep, GET_PAGE_SLAB(page), objp);
}
local_intr_restore(intr_flag);
static int find_order(int size)
{
int order = 0;
for ( ; size > 4096 ; size >>=1)
order++;
return order;
}
// kfree - simple interface used by ooutside functions to free an obj
void
kfree(void *objp) {
kmem_cache_free(GET_PAGE_CACHE(kva2page(objp)), objp);
static void *__kmalloc(size_t size, gfp_t gfp)
{
slob_t *m;
bigblock_t *bb;
unsigned long flags;
if (size < PAGE_SIZE - SLOB_UNIT) {
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
return m ? (void *)(m + 1) : 0;
}
bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
if (!bb)
return 0;
bb->order = find_order(size);
bb->pages = (void *)__slob_get_free_pages(gfp, bb->order);
if (bb->pages) {
spin_lock_irqsave(&block_lock, flags);
bb->next = bigblocks;
bigblocks = bb;
spin_unlock_irqrestore(&block_lock, flags);
return bb->pages;
}
slob_free(bb, sizeof(bigblock_t));
return 0;
}
static inline void
check_slab_empty(void) {
int i;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
assert(list_empty(&(cachep->slabs_full)));
assert(list_empty(&(cachep->slabs_notfull)));
}
void *
kmalloc(size_t size)
{
return __kmalloc(size, 0);
}
void
check_slab(void) {
int i;
void *v0, *v1;
size_t nr_free_pages_store = nr_free_pages();
size_t kernel_allocated_store = slab_allocated();
/* slab must be empty now */
check_slab_empty();
assert(slab_allocated() == 0);
kmem_cache_t *cachep0, *cachep1;
cachep0 = slab_cache;
assert(cachep0->objsize == 32 && cachep0->num > 1 && !cachep0->off_slab);
assert((v0 = kmalloc(16)) != NULL);
slab_t *slabp0, *slabp1;
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
assert(slabp0->inuse == 1 && list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
struct Page *p0, *p1;
size_t order_size;
p0 = kva2page(slabp0->s_mem - slabp0->offset), p1 = p0;
order_size = (1 << cachep0->page_order);
for (i = 0; i < cachep0->page_order; i ++, p1 ++) {
assert(PageSlab(p1));
assert(GET_PAGE_CACHE(p1) == cachep0 && GET_PAGE_SLAB(p1) == slabp0);
}
assert(v0 == slabp0->s_mem);
assert((v1 = kmalloc(16)) != NULL && v1 == v0 + 32);
kfree(v0);
assert(slabp0->free == 0);
kfree(v1);
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->page_order; i ++, p0 ++) {
assert(!PageSlab(p0));
}
v0 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
for (i = 0; i < cachep0->num - 1; i ++) {
kmalloc(16);
}
assert(slabp0->inuse == cachep0->num);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
v1 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
kfree(v0);
assert(list_empty(&(cachep0->slabs_full)));
assert(list_next(&(slabp0->slab_link)) == &(slabp1->slab_link)
|| list_next(&(slabp1->slab_link)) == &(slabp0->slab_link));
kfree(v1);
assert(!list_empty(&(cachep0->slabs_notfull)));
assert(list_next(&(cachep0->slabs_notfull)) == &(slabp0->slab_link));
assert(list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
v1 = kmalloc(16);
assert(v1 == v0);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->num; i ++) {
kfree(v1 + i * cachep0->objsize);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
void kfree(void *block)
{
bigblock_t *bb, **last = &bigblocks;
unsigned long flags;
if (!block)
return;
if (!((unsigned long)block & (PAGE_SIZE-1))) {
/* might be on the big block list */
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
if (bb->pages == block) {
*last = bb->next;
spin_unlock_irqrestore(&block_lock, flags);
__slob_free_pages((unsigned long)block, bb->order);
slob_free(bb, sizeof(bigblock_t));
return;
}
}
spin_unlock_irqrestore(&block_lock, flags);
}
slob_free((slob_t *)block - 1, 0);
return;
}
cachep0 = slab_cache;
bool has_off_slab = 0;
for (i = 0; i < SLAB_CACHE_NUM; i ++, cachep0 ++) {
if (cachep0->off_slab) {
has_off_slab = 1;
cachep1 = cachep0->slab_cachep;
if (!cachep1->off_slab) {
break;
}
}
}
unsigned int ksize(const void *block)
{
bigblock_t *bb;
unsigned long flags;
if (!has_off_slab) {
goto check_pass;
}
if (!block)
return 0;
assert(cachep0->off_slab && !cachep1->off_slab);
assert(cachep1 < cachep0);
if (!((unsigned long)block & (PAGE_SIZE-1))) {
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; bb = bb->next)
if (bb->pages == block) {
spin_unlock_irqrestore(&slob_lock, flags);
return PAGE_SIZE << bb->order;
}
spin_unlock_irqrestore(&block_lock, flags);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
return ((slob_t *)block - 1)->units * SLOB_UNIT;
}
assert(list_empty(&(cachep1->slabs_full)));
assert(list_empty(&(cachep1->slabs_notfull)));
v0 = kmalloc(cachep0->objsize);
p0 = kva2page(v0);
assert(page2kva(p0) == v0);
if (cachep0->num == 1) {
assert(!list_empty(&(cachep0->slabs_full)));
slabp0 = le2slab(list_next(&(cachep0->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
}
assert(slabp0 != NULL);
if (cachep1->num == 1) {
assert(!list_empty(&(cachep1->slabs_full)));
slabp1 = le2slab(list_next(&(cachep1->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep1->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep1->slabs_notfull)), slab_link);
}
assert(slabp1 != NULL);
order_size = (1 << cachep0->page_order);
for (i = 0; i < order_size; i ++, p0 ++) {
assert(PageSlab(p0));
assert(GET_PAGE_CACHE(p0) == cachep0 && GET_PAGE_SLAB(p0) == slabp0);
}
kfree(v0);
check_pass:
check_rb_tree();
check_slab_empty();
assert(slab_allocated() == 0);
assert(nr_free_pages_store == nr_free_pages());
assert(kernel_allocated_store == slab_allocated());
cprintf("check_slab() succeeded!\n");
}

+ 3
- 3
code/lab7/kern/mm/kmalloc.h View File

@ -1,5 +1,5 @@
#ifndef __KERN_MM_SLAB_H__
#define __KERN_MM_SLAB_H__
#ifndef __KERN_MM_KMALLOC_H__
#define __KERN_MM_KMALLOC_H__
#include <defs.h>
@ -12,5 +12,5 @@ void kfree(void *objp);
size_t kallocated(void);
#endif /* !__KERN_MM_SLAB_H__ */
#endif /* !__KERN_MM_KMALLOC_H__ */

+ 0
- 5
code/lab7/kern/mm/memlayout.h View File

@ -156,11 +156,6 @@ typedef struct {
unsigned int nr_free; // # of free pages in this free list
} free_area_t;
/* for slab style kmalloc */
#define PG_slab 2 // page frame is included in a slab
#define SetPageSlab(page) set_bit(PG_slab, &((page)->flags))
#define ClearPageSlab(page) clear_bit(PG_slab, &((page)->flags))
#define PageSlab(page) test_bit(PG_slab, &((page)->flags))
#endif /* !__ASSEMBLER__ */

+ 0
- 4
code/lab7/kern/mm/vmm.c View File

@ -257,8 +257,6 @@ check_vmm(void) {
check_vma_struct();
check_pgfault();
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vmm() succeeded.\n");
}
@ -305,8 +303,6 @@ check_vma_struct(void) {
mm_destroy(mm);
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vma_struct() succeeded!\n");
}

+ 1
- 2
code/lab7/kern/process/proc.c View File

@ -814,8 +814,7 @@ init_main(void *arg) {
assert(nr_process == 2);
assert(list_next(&proc_list) == &(initproc->list_link));
assert(list_prev(&proc_list) == &(initproc->list_link));
assert(nr_free_pages_store == nr_free_pages());
assert(kernel_allocated_store == kallocated());
cprintf("init check memory pass.\n");
return 0;
}

+ 1
- 1
code/lab7/tools/grade.sh View File

@ -338,7 +338,7 @@ default_check() {
'PDE(001) fac00000-fb000000 00400000 -rw' \
' |-- PTE(000e0) faf00000-fafe0000 000e0000 urw' \
' |-- PTE(00001) fafeb000-fafec000 00001000 -rw' \
'check_slab() succeeded!' \
'check_slob() succeeded!' \
'check_vma_struct() succeeded!' \
'page fault at 0x00000100: K/W [no page found].' \
'check_pgfault() succeeded!' \

+ 0
- 528
code/lab8/kern/libs/rb_tree.c View File

@ -1,528 +0,0 @@
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <kmalloc.h>
#include <rb_tree.h>
#include <assert.h>
/* rb_node_create - create a new rb_node */
static inline rb_node *
rb_node_create(void) {
return kmalloc(sizeof(rb_node));
}
/* rb_tree_empty - tests if tree is empty */
static inline bool
rb_tree_empty(rb_tree *tree) {
rb_node *nil = tree->nil, *root = tree->root;
return root->left == nil;
}
/* *
* rb_tree_create - creates a new red-black tree, the 'compare' function
* is required and returns 'NULL' if failed.
*
* Note that, root->left should always point to the node that is the root
* of the tree. And nil points to a 'NULL' node which should always be
* black and may have arbitrary children and parent node.
* */
rb_tree *
rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2)) {
assert(compare != NULL);
rb_tree *tree;
rb_node *nil, *root;
if ((tree = kmalloc(sizeof(rb_tree))) == NULL) {
goto bad_tree;
}
tree->compare = compare;
if ((nil = rb_node_create()) == NULL) {
goto bad_node_cleanup_tree;
}
nil->parent = nil->left = nil->right = nil;
nil->red = 0;
tree->nil = nil;
if ((root = rb_node_create()) == NULL) {
goto bad_node_cleanup_nil;
}
root->parent = root->left = root->right = nil;
root->red = 0;
tree->root = root;
return tree;
bad_node_cleanup_nil:
kfree(nil);
bad_node_cleanup_tree:
kfree(tree);
bad_tree:
return NULL;
}
/* *
* FUNC_ROTATE - rotates as described in "Introduction to Algorithm".
*
* For example, FUNC_ROTATE(rb_left_rotate, left, right) can be expaned to a
* left-rotate function, which requires an red-black 'tree' and a node 'x'
* to be rotated on. Basically, this function, named rb_left_rotate, makes the
* parent of 'x' be the left child of 'x', 'x' the parent of its parent before
* rotation and finally fixes other nodes accordingly.
*
* FUNC_ROTATE(xx, left, right) means left-rotate,
* and FUNC_ROTATE(xx, right, left) means right-rotate.
* */
#define FUNC_ROTATE(func_name, _left, _right) \
static void \
func_name(rb_tree *tree, rb_node *x) { \
rb_node *nil = tree->nil, *y = x->_right; \
assert(x != tree->root && x != nil && y != nil); \
x->_right = y->_left; \
if (y->_left != nil) { \
y->_left->parent = x; \
} \
y->parent = x->parent; \
if (x == x->parent->_left) { \
x->parent->_left = y; \
} \
else { \
x->parent->_right = y; \
} \
y->_left = x; \
x->parent = y; \
assert(!(nil->red)); \
}
FUNC_ROTATE(rb_left_rotate, left, right);
FUNC_ROTATE(rb_right_rotate, right, left);
#undef FUNC_ROTATE
#define COMPARE(tree, node1, node2) \
((tree))->compare((node1), (node2))
/* *
* rb_insert_binary - insert @node to red-black @tree as if it were
* a regular binary tree. This function is only intended to be called
* by function rb_insert.
* */
static inline void
rb_insert_binary(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node, *nil = tree->nil, *root = tree->root;
z->left = z->right = nil;
y = root, x = y->left;
while (x != nil) {
y = x;
x = (COMPARE(tree, x, node) > 0) ? x->left : x->right;
}
z->parent = y;
if (y == root || COMPARE(tree, y, z) > 0) {
y->left = z;
}
else {
y->right = z;
}
}
/* rb_insert - insert a node to red-black tree */
void
rb_insert(rb_tree *tree, rb_node *node) {
rb_insert_binary(tree, node);
node->red = 1;
rb_node *x = node, *y;
#define RB_INSERT_SUB(_left, _right) \
do { \
y = x->parent->parent->_right; \
if (y->red) { \
x->parent->red = 0; \
y->red = 0; \
x->parent->parent->red = 1; \
x = x->parent->parent; \
} \
else { \
if (x == x->parent->_right) { \
x = x->parent; \
rb_##_left##_rotate(tree, x); \
} \
x->parent->red = 0; \
x->parent->parent->red = 1; \
rb_##_right##_rotate(tree, x->parent->parent); \
} \
} while (0)
while (x->parent->red) {
if (x->parent == x->parent->parent->left) {
RB_INSERT_SUB(left, right);
}
else {
RB_INSERT_SUB(right, left);
}
}
tree->root->left->red = 0;
assert(!(tree->nil->red) && !(tree->root->red));
#undef RB_INSERT_SUB
}
/* *
* rb_tree_successor - returns the successor of @node, or nil
* if no successor exists. Make sure that @node must belong to @tree,
* and this function should only be called by rb_node_prev.
* */
static inline rb_node *
rb_tree_successor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->right) != nil) {
while (y->left != nil) {
y = y->left;
}
return y;
}
else {
y = x->parent;
while (x == y->right) {
x = y, y = y->parent;
}
if (y == tree->root) {
return nil;
}
return y;
}
}
/* *
* rb_tree_predecessor - returns the predecessor of @node, or nil
* if no predecessor exists, likes rb_tree_successor.
* */
static inline rb_node *
rb_tree_predecessor(rb_tree *tree, rb_node *node) {
rb_node *x = node, *y, *nil = tree->nil;
if ((y = x->left) != nil) {
while (y->right != nil) {
y = y->right;
}
return y;
}
else {
y = x->parent;
while (x == y->left) {
if (y == tree->root) {
return nil;
}
x = y, y = y->parent;
}
return y;
}
}
/* *
* rb_search - returns a node with value 'equal' to @key (according to
* function @compare). If there're multiple nodes with value 'equal' to @key,
* the functions returns the one highest in the tree.
* */
rb_node *
rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key) {
rb_node *nil = tree->nil, *node = tree->root->left;
int r;
while (node != nil && (r = compare(node, key)) != 0) {
node = (r > 0) ? node->left : node->right;
}
return (node != nil) ? node : NULL;
}
/* *
* rb_delete_fixup - performs rotations and changes colors to restore
* red-black properties after a node is deleted.
* */
static void
rb_delete_fixup(rb_tree *tree, rb_node *node) {
rb_node *x = node, *w, *root = tree->root->left;
#define RB_DELETE_FIXUP_SUB(_left, _right) \
do { \
w = x->parent->_right; \
if (w->red) { \
w->red = 0; \
x->parent->red = 1; \
rb_##_left##_rotate(tree, x->parent); \
w = x->parent->_right; \
} \
if (!w->_left->red && !w->_right->red) { \
w->red = 1; \
x = x->parent; \
} \
else { \
if (!w->_right->red) { \
w->_left->red = 0; \
w->red = 1; \
rb_##_right##_rotate(tree, w); \
w = x->parent->_right; \
} \
w->red = x->parent->red; \
x->parent->red = 0; \
w->_right->red = 0; \
rb_##_left##_rotate(tree, x->parent); \
x = root; \
} \
} while (0)
while (x != root && !x->red) {
if (x == x->parent->left) {
RB_DELETE_FIXUP_SUB(left, right);
}
else {
RB_DELETE_FIXUP_SUB(right, left);
}
}
x->red = 0;
#undef RB_DELETE_FIXUP_SUB
}
/* *
* rb_delete - deletes @node from @tree, and calls rb_delete_fixup to
* restore red-black properties.
* */
void
rb_delete(rb_tree *tree, rb_node *node) {
rb_node *x, *y, *z = node;
rb_node *nil = tree->nil, *root = tree->root;
y = (z->left == nil || z->right == nil) ? z : rb_tree_successor(tree, z);
x = (y->left != nil) ? y->left : y->right;
assert(y != root && y != nil);
x->parent = y->parent;
if (y == y->parent->left) {
y->parent->left = x;
}
else {
y->parent->right = x;
}
bool need_fixup = !(y->red);
if (y != z) {
if (z == z->parent->left) {
z->parent->left = y;
}
else {
z->parent->right = y;
}
z->left->parent = z->right->parent = y;
*y = *z;
}
if (need_fixup) {
rb_delete_fixup(tree, x);
}
}
/* rb_tree_destroy - destroy a tree and free memory */
void
rb_tree_destroy(rb_tree *tree) {
kfree(tree->root);
kfree(tree->nil);
kfree(tree);
}
/* *
* rb_node_prev - returns the predecessor node of @node in @tree,
* or 'NULL' if no predecessor exists.
* */
rb_node *
rb_node_prev(rb_tree *tree, rb_node *node) {
rb_node *prev = rb_tree_predecessor(tree, node);
return (prev != tree->nil) ? prev : NULL;
}
/* *
* rb_node_next - returns the successor node of @node in @tree,
* or 'NULL' if no successor exists.
* */
rb_node *
rb_node_next(rb_tree *tree, rb_node *node) {
rb_node *next = rb_tree_successor(tree, node);
return (next != tree->nil) ? next : NULL;
}
/* rb_node_root - returns the root node of a @tree, or 'NULL' if tree is empty */
rb_node *
rb_node_root(rb_tree *tree) {
rb_node *node = tree->root->left;
return (node != tree->nil) ? node : NULL;
}
/* rb_node_left - gets the left child of @node, or 'NULL' if no such node */
rb_node *
rb_node_left(rb_tree *tree, rb_node *node) {
rb_node *left = node->left;
return (left != tree->nil) ? left : NULL;
}
/* rb_node_right - gets the right child of @node, or 'NULL' if no such node */
rb_node *
rb_node_right(rb_tree *tree, rb_node *node) {
rb_node *right = node->right;
return (right != tree->nil) ? right : NULL;
}
int
check_tree(rb_tree *tree, rb_node *node) {
rb_node *nil = tree->nil;
if (node == nil) {
assert(!node->red);
return 1;
}
if (node->left != nil) {
assert(COMPARE(tree, node, node->left) >= 0);
assert(node->left->parent == node);
}
if (node->right != nil) {
assert(COMPARE(tree, node, node->right) <= 0);
assert(node->right->parent == node);
}
if (node->red) {
assert(!node->left->red && !node->right->red);
}
int hb_left = check_tree(tree, node->left);
int hb_right = check_tree(tree, node->right);
assert(hb_left == hb_right);
int hb = hb_left;
if (!node->red) {
hb ++;
}
return hb;
}
static void *
check_safe_kmalloc(size_t size) {
void *ret = kmalloc(size);
assert(ret != NULL);
return ret;
}
struct check_data {
long data;
rb_node rb_link;
};
#define rbn2data(node) \
(to_struct(node, struct check_data, rb_link))
static inline int
check_compare1(rb_node *node1, rb_node *node2) {
return rbn2data(node1)->data - rbn2data(node2)->data;
}
static inline int
check_compare2(rb_node *node, void *key) {
return rbn2data(node)->data - (long)key;
}
void
check_rb_tree(void) {
rb_tree *tree = rb_tree_create(check_compare1);
assert(tree != NULL);
rb_node *nil = tree->nil, *root = tree->root;
assert(!nil->red && root->left == nil);
int total = 1000;
struct check_data **all = check_safe_kmalloc(sizeof(struct check_data *) * total);
long i;
for (i = 0; i < total; i ++) {
all[i] = check_safe_kmalloc(sizeof(struct check_data));
all[i]->data = i;
}
int *mark = check_safe_kmalloc(sizeof(int) * total);
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
int j = (rand() % (total - i)) + i;
struct check_data *z = all[i];
all[i] = all[j];
all[j] = z;
}
memset(mark, 0, sizeof(int) * total);
for (i = 0; i < total; i ++) {
mark[all[i]->data] = 1;
}
for (i = 0; i < total; i ++) {
assert(mark[i] == 1);
}
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_node *node;
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)(all[i]->data));
assert(node != NULL && node == &(all[i]->rb_link));
}
for (i = 0; i < total; i ++) {
node = rb_search(tree, check_compare2, (void *)i);
assert(node != NULL && rbn2data(node)->data == i);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(!nil->red && root->left == nil);
long max = 32;
if (max > total) {
max = total;
}
for (i = 0; i < max; i ++) {
all[i]->data = max;
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
for (i = 0; i < max; i ++) {
node = rb_search(tree, check_compare2, (void *)max);
assert(node != NULL && rbn2data(node)->data == max);
rb_delete(tree, node);
check_tree(tree, root->left);
}
assert(rb_tree_empty(tree));
for (i = 0; i < total; i ++) {
rb_insert(tree, &(all[i]->rb_link));
check_tree(tree, root->left);
}
rb_tree_destroy(tree);
for (i = 0; i < total; i ++) {
kfree(all[i]);
}
kfree(mark);
kfree(all);
}

+ 0
- 32
code/lab8/kern/libs/rb_tree.h View File

@ -1,32 +0,0 @@
#ifndef __KERN_LIBS_RB_TREE_H__
#define __KERN_LIBS_RB_TREE_H__
#include <defs.h>
typedef struct rb_node {
bool red; // if red = 0, it's a black node
struct rb_node *parent;
struct rb_node *left, *right;
} rb_node;
typedef struct rb_tree {
// compare function should return -1 if *node1 < *node2, 1 if *node1 > *node2, and 0 otherwise
int (*compare)(rb_node *node1, rb_node *node2);
struct rb_node *nil, *root;
} rb_tree;
rb_tree *rb_tree_create(int (*compare)(rb_node *node1, rb_node *node2));
void rb_tree_destroy(rb_tree *tree);
void rb_insert(rb_tree *tree, rb_node *node);
void rb_delete(rb_tree *tree, rb_node *node);
rb_node *rb_search(rb_tree *tree, int (*compare)(rb_node *node, void *key), void *key);
rb_node *rb_node_prev(rb_tree *tree, rb_node *node);
rb_node *rb_node_next(rb_tree *tree, rb_node *node);
rb_node *rb_node_root(rb_tree *tree);
rb_node *rb_node_left(rb_tree *tree, rb_node *node);
rb_node *rb_node_right(rb_tree *tree, rb_node *node);
void check_rb_tree(void);
#endif /* !__KERN_LIBS_RBTREE_H__ */

+ 253
- 583
code/lab8/kern/mm/kmalloc.c View File

@ -6,635 +6,305 @@
#include <sync.h>
#include <pmm.h>
#include <stdio.h>
#include <rb_tree.h>
/* The slab allocator used in ucore is based on an algorithm first introduced by
Jeff Bonwick for the SunOS operating system. The paper can be download from
http://citeseer.ist.psu.edu/bonwick94slab.html
An implementation of the Slab Allocator as described in outline in;
UNIX Internals: The New Frontiers by Uresh Vahalia
Pub: Prentice Hall ISBN 0-13-101908-2
Within a kernel, a considerable amount of memory is allocated for a finite set
of objects such as file descriptors and other common structures. Jeff found that
the amount of time required to initialize a regular object in the kernel exceeded
the amount of time required to allocate and deallocate it. His conclusion was
that instead of freeing the memory back to a global pool, he would have the memory
remain initialized for its intended purpose.
In our simple slab implementation, the the high-level organization of the slab
structures is simplied. At the highest level is an array slab_cache[SLAB_CACHE_NUM],
and each array element is a slab_cache which has slab chains. Each slab_cache has
two list, one list chains the full allocated slab, and another list chains the notfull
allocated(maybe empty) slab. And each slab has fixed number(2^n) of pages. In each
slab, there are a lot of objects (such as ) with same fixed size(32B ~ 128KB).
+----------------------------------+
| slab_cache[0] for 0~32B obj |
+----------------------------------+
| slab_cache[1] for 33B~64B obj |-->lists for slabs
+----------------------------------+ |
| slab_cache[2] for 65B~128B obj | |
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
+----------------------------------+ |
| slab_cache[12]for 64KB~128KB obj | |
+----------------------------------+ |
|
slabs_full/slabs_not +---------------------+
-<-----------<----------<-+
| | |
slab1 slab2 slab3...
|
|-------|-------|
pages1 pages2 pages3...
|
|
|
slab_t+n*bufctl_t+obj1-obj2-obj3...objn (the size of obj is small)
|
OR
|
obj1-obj2-obj3...objn WITH slab_t+n*bufctl_t in another slab (the size of obj is BIG)
The important functions are:
kmem_cache_grow(kmem_cache_t *cachep)
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp)
kmalloc(size_t size): used by outside functions need dynamicly get memory
kfree(void *objp): used by outside functions need dynamicly release memory
*/
#define BUFCTL_END 0xFFFFFFFFL // the signature of the last bufctl
#define SLAB_LIMIT 0xFFFFFFFEL // the max value of obj number
typedef size_t kmem_bufctl_t; //the index of obj in slab
typedef struct slab_s {
list_entry_t slab_link; // the list entry linked to kmem_cache list
void *s_mem; // the kernel virtual address of the first obj in slab
size_t inuse; // the number of allocated objs
size_t offset; // the first obj's offset value in slab
kmem_bufctl_t free; // the first free obj's index in slab
} slab_t;
// get the slab address according to the link element (see list.h)
#define le2slab(le, member) \
to_struct((le), slab_t, member)
typedef struct kmem_cache_s kmem_cache_t;
struct kmem_cache_s {
list_entry_t slabs_full; // list for fully allocated slabs
list_entry_t slabs_notfull; // list for not-fully allocated slabs
size_t objsize; // the fixed size of obj
size_t num; // number of objs per slab
size_t offset; // this first obj's offset in slab
bool off_slab; // the control part of slab in slab or not.
/* order of pages per slab (2^n) */
size_t page_order;
kmem_cache_t *slab_cachep;
};
#define MIN_SIZE_ORDER 5 // 32
#define MAX_SIZE_ORDER 17 // 128k
#define SLAB_CACHE_NUM (MAX_SIZE_ORDER - MIN_SIZE_ORDER + 1)
/*
* SLOB Allocator: Simple List Of Blocks
*
* Matt Mackall <mpm@selenic.com> 12/30/03
*
* How SLOB works:
*
* The core of SLOB is a traditional K&R style heap allocator, with
* support for returning aligned objects. The granularity of this
* allocator is 8 bytes on x86, though it's perhaps possible to reduce
* this to 4 if it's deemed worth the effort. The slob heap is a
* singly-linked list of pages from __get_free_page, grown on demand
* and allocation from the heap is currently first-fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are 8-byte aligned and prepended with a 8-byte header.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* __get_free_pages directly so that it can return page-aligned blocks
* and keeps a linked list of such pages and their orders. These
* objects are detected in kfree() by their page alignment.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
* destructors for every SLAB allocation. Objects are returned with
* the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
* set, in which case the low-level allocator will fragment blocks to
* create the proper alignment. Again, objects of page-size or greater
* are allocated by calling __get_free_pages. As SLAB objects know
* their size, no separate size bookkeeping is necessary and there is
* essentially no allocation space overhead.
*/
//some helper
#define spin_lock_irqsave(l, f) local_intr_save(f)
#define spin_unlock_irqrestore(l, f) local_intr_restore(f)
typedef unsigned int gfp_t;
#ifndef PAGE_SIZE
#define PAGE_SIZE PGSIZE
#endif
#ifndef L1_CACHE_BYTES
#define L1_CACHE_BYTES 64
#endif
#ifndef ALIGN
#define ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
#endif
struct slob_block {
int units;
struct slob_block *next;
};
typedef struct slob_block slob_t;
static kmem_cache_t slab_cache[SLAB_CACHE_NUM];
#define SLOB_UNIT sizeof(slob_t)
#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
#define SLOB_ALIGN L1_CACHE_BYTES
static void init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align);
static void check_slab(void);
struct bigblock {
int order;
void *pages;
struct bigblock *next;
};
typedef struct bigblock bigblock_t;
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
//slab_init - call init_kmem_cache function to reset the slab_cache array
static void
slab_init(void) {
size_t i;
//the align bit for obj in slab. 2^n could be better for performance
size_t align = 16;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
init_kmem_cache(slab_cache + i, 1 << (i + MIN_SIZE_ORDER), align);
}
check_slab();
}
inline void
kmalloc_init(void) {
slab_init();
cprintf("kmalloc_init() succeeded!\n");
}
//slab_allocated - summary the total size of allocated objs
static size_t
slab_allocated(void) {
size_t total = 0;
int i;
bool intr_flag;
local_intr_save(intr_flag);
{
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
list_entry_t *list, *le;
list = le = &(cachep->slabs_full);
while ((le = list_next(le)) != list) {
total += cachep->num * cachep->objsize;
}
list = le = &(cachep->slabs_notfull);
while ((le = list_next(le)) != list) {
slab_t *slabp = le2slab(le, slab_link);
total += slabp->inuse * cachep->objsize;
}
}
}
local_intr_restore(intr_flag);
return total;
static void* __slob_get_free_pages(gfp_t gfp, int order)
{
struct Page * page = alloc_pages(1 << order);
if(!page)
return NULL;
return page2kva(page);
}
inline size_t
kallocated(void) {
return slab_allocated();
}
#define __slob_get_free_page(gfp) __slob_get_free_pages(gfp, 0)
// slab_mgmt_size - get the size of slab control area (slab_t+num*kmem_bufctl_t)
static size_t
slab_mgmt_size(size_t num, size_t align) {
return ROUNDUP(sizeof(slab_t) + num * sizeof(kmem_bufctl_t), align);
static inline void __slob_free_pages(unsigned long kva, int order)
{
free_pages(kva2page(kva), 1 << order);
}
// cacahe_estimate - estimate the number of objs in a slab
static void
cache_estimate(size_t order, size_t objsize, size_t align, bool off_slab, size_t *remainder, size_t *num) {
size_t nr_objs, mgmt_size;
size_t slab_size = (PGSIZE << order);
if (off_slab) {
mgmt_size = 0;
nr_objs = slab_size / objsize;
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
}
else {
nr_objs = (slab_size - sizeof(slab_t)) / (objsize + sizeof(kmem_bufctl_t));
while (slab_mgmt_size(nr_objs, align) + nr_objs * objsize > slab_size) {
nr_objs --;
}
if (nr_objs > SLAB_LIMIT) {
nr_objs = SLAB_LIMIT;
}
mgmt_size = slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
*remainder = slab_size - nr_objs * objsize - mgmt_size;
static void slob_free(void *b, int size);
static void *slob_alloc(size_t size, gfp_t gfp, int align)
{
assert( (size + SLOB_UNIT) < PAGE_SIZE );
slob_t *prev, *cur, *aligned = 0;
int delta = 0, units = SLOB_UNITS(size);
unsigned long flags;
spin_lock_irqsave(&slob_lock, flags);
prev = slobfree;
for (cur = prev->next; ; prev = cur, cur = cur->next) {
if (align) {
aligned = (slob_t *)ALIGN((unsigned long)cur, align);
delta = aligned - cur;
}
if (cur->units >= units + delta) { /* room enough? */
if (delta) { /* need to fragment head to align? */
aligned->units = cur->units - delta;
aligned->next = cur->next;
cur->next = aligned;
cur->units = delta;
prev = cur;
cur = aligned;
}
if (cur->units == units) /* exact fit? */
prev->next = cur->next; /* unlink */
else { /* fragment */
prev->next = cur + units;
prev->next->units = cur->units - units;
prev->next->next = cur->next;
cur->units = units;
}
slobfree = prev;
spin_unlock_irqrestore(&slob_lock, flags);
return cur;
}
if (cur == slobfree) {
spin_unlock_irqrestore(&slob_lock, flags);
if (size == PAGE_SIZE) /* trying to shrink arena? */
return 0;
cur = (slob_t *)__slob_get_free_page(gfp);
if (!cur)
return 0;
slob_free(cur, PAGE_SIZE);
spin_lock_irqsave(&slob_lock, flags);
cur = slobfree;
}
}
}
// calculate_slab_order - estimate the size(4K~4M) of slab
// paramemters:
// cachep: the slab_cache
// objsize: the size of obj
// align: align bit for objs
// off_slab: the control part of slab in slab or not
// left_over: the size of can not be used area in slab
static void
calculate_slab_order(kmem_cache_t *cachep, size_t objsize, size_t align, bool off_slab, size_t *left_over) {
size_t order;
for (order = 0; order <= KMALLOC_MAX_ORDER; order ++) {
size_t num, remainder;
cache_estimate(order, objsize, align, off_slab, &remainder, &num);
if (num != 0) {
if (off_slab) {
size_t off_slab_limit = objsize - sizeof(slab_t);
off_slab_limit /= sizeof(kmem_bufctl_t);
if (num > off_slab_limit) {
panic("off_slab: objsize = %d, num = %d.", objsize, num);
}
}
if (remainder * 8 <= (PGSIZE << order)) {
cachep->num = num;
cachep->page_order = order;
if (left_over != NULL) {
*left_over = remainder;
}
return ;
}
}
}
panic("calculate_slab_over: failed.");
}
static void slob_free(void *block, int size)
{
slob_t *cur, *b = (slob_t *)block;
unsigned long flags;
// getorder - find order, should satisfy n <= minest 2^order
static inline size_t
getorder(size_t n) {
size_t order = MIN_SIZE_ORDER, order_size = (1 << order);
for (; order <= MAX_SIZE_ORDER; order ++, order_size <<= 1) {
if (n <= order_size) {
return order;
}
}
panic("getorder failed. %d\n", n);
}
if (!block)
return;
// init_kmem_cache - initial a slab_cache cachep according to the obj with the size = objsize
static void
init_kmem_cache(kmem_cache_t *cachep, size_t objsize, size_t align) {
list_init(&(cachep->slabs_full));
list_init(&(cachep->slabs_notfull));
if (size)
b->units = SLOB_UNITS(size);
objsize = ROUNDUP(objsize, align);
cachep->objsize = objsize;
cachep->off_slab = (objsize >= (PGSIZE >> 3));
/* Find reinsertion point */
spin_lock_irqsave(&slob_lock, flags);
for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
if (cur >= cur->next && (b > cur || b < cur->next))
break;
size_t left_over;
calculate_slab_order(cachep, objsize, align, cachep->off_slab, &left_over);
if (b + b->units == cur->next) {
b->units += cur->next->units;
b->next = cur->next->next;
} else
b->next = cur->next;
assert(cachep->num > 0);
if (cur + cur->units == b) {
cur->units += b->units;
cur->next = b->next;
} else
cur->next = b;
size_t mgmt_size = slab_mgmt_size(cachep->num, align);
slobfree = cur;
if (cachep->off_slab && left_over >= mgmt_size) {
cachep->off_slab = 0;
}
if (cachep->off_slab) {
cachep->offset = 0;
cachep->slab_cachep = slab_cache + (getorder(mgmt_size) - MIN_SIZE_ORDER);
}
else {
cachep->offset = mgmt_size;
}
spin_unlock_irqrestore(&slob_lock, flags);
}
static void *kmem_cache_alloc(kmem_cache_t *cachep);
#define slab_bufctl(slabp) \
((kmem_bufctl_t*)(((slab_t *)(slabp)) + 1))
// kmem_cache_slabmgmt - get the address of a slab according to page
// - and initialize the slab according to cachep
static slab_t *
kmem_cache_slabmgmt(kmem_cache_t *cachep, struct Page *page) {
void *objp = page2kva(page);
slab_t *slabp;
if (cachep->off_slab) {
if ((slabp = kmem_cache_alloc(cachep->slab_cachep)) == NULL) {
return NULL;
}
}
else {
slabp = page2kva(page);
}
slabp->inuse = 0;
slabp->offset = cachep->offset;
slabp->s_mem = objp + cachep->offset;
return slabp;
}
#define SET_PAGE_CACHE(page, cachep) \
do { \
struct Page *__page = (struct Page *)(page); \
kmem_cache_t **__cachepp = (kmem_cache_t **)&(__page->page_link.next); \
*__cachepp = (kmem_cache_t *)(cachep); \
} while (0)
#define SET_PAGE_SLAB(page, slabp) \
do { \
struct Page *__page = (struct Page *)(page); \
slab_t **__cachepp = (slab_t **)&(__page->page_link.prev); \
*__cachepp = (slab_t *)(slabp); \
} while (0)
// kmem_cache_grow - allocate a new slab by calling alloc_pages
// - set control area in the new slab
static bool
kmem_cache_grow(kmem_cache_t *cachep) {
struct Page *page = alloc_pages(1 << cachep->page_order);
if (page == NULL) {
goto failed;
}
slab_t *slabp;
if ((slabp = kmem_cache_slabmgmt(cachep, page)) == NULL) {
goto oops;
}
size_t order_size = (1 << cachep->page_order);
do {
//setup this page in the free list (see memlayout.h: struct page)???
SET_PAGE_CACHE(page, cachep);
SET_PAGE_SLAB(page, slabp);
//this page is used for slab
SetPageSlab(page);
page ++;
} while (-- order_size);
int i;
for (i = 0; i < cachep->num; i ++) {
slab_bufctl(slabp)[i] = i + 1;
}
slab_bufctl(slabp)[cachep->num - 1] = BUFCTL_END;
slabp->free = 0;
bool intr_flag;
local_intr_save(intr_flag);
{
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
local_intr_restore(intr_flag);
return 1;
oops:
free_pages(page, 1 << cachep->page_order);
failed:
return 0;
}
// kmem_cache_alloc_one - allocate a obj in a slab
static void *
kmem_cache_alloc_one(kmem_cache_t *cachep, slab_t *slabp) {
slabp->inuse ++;
void *objp = slabp->s_mem + slabp->free * cachep->objsize;
slabp->free = slab_bufctl(slabp)[slabp->free];
if (slabp->free == BUFCTL_END) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_full), &(slabp->slab_link));
}
return objp;
void check_slob(void) {
cprintf("check_slob() success\n");
}
// kmem_cache_alloc - call kmem_cache_alloc_one function to allocate a obj
// - if no free obj, try to allocate a slab
static void *
kmem_cache_alloc(kmem_cache_t *cachep) {
void *objp;
bool intr_flag;
try_again:
local_intr_save(intr_flag);
if (list_empty(&(cachep->slabs_notfull))) {
goto alloc_new_slab;
}
slab_t *slabp = le2slab(list_next(&(cachep->slabs_notfull)), slab_link);
objp = kmem_cache_alloc_one(cachep, slabp);
local_intr_restore(intr_flag);
return objp;
alloc_new_slab:
local_intr_restore(intr_flag);
if (kmem_cache_grow(cachep)) {
goto try_again;
}
return NULL;
void
slob_init(void) {
cprintf("use SLOB allocator\n");
check_slob();
}
// kmalloc - simple interface used by outside functions
// - to allocate a free memory using kmem_cache_alloc function
void *
kmalloc(size_t size) {
assert(size > 0);
size_t order = getorder(size);
if (order > MAX_SIZE_ORDER) {
return NULL;
}
return kmem_cache_alloc(slab_cache + (order - MIN_SIZE_ORDER));
inline void
kmalloc_init(void) {
slob_init();
cprintf("kmalloc_init() succeeded!\n");
}
static void kmem_cache_free(kmem_cache_t *cachep, void *obj);
// kmem_slab_destroy - call free_pages & kmem_cache_free to free a slab
static void
kmem_slab_destroy(kmem_cache_t *cachep, slab_t *slabp) {
struct Page *page = kva2page(slabp->s_mem - slabp->offset);
struct Page *p = page;
size_t order_size = (1 << cachep->page_order);
do {
assert(PageSlab(p));
ClearPageSlab(p);
p ++;
} while (-- order_size);
free_pages(page, 1 << cachep->page_order);
if (cachep->off_slab) {
kmem_cache_free(cachep->slab_cachep, slabp);
}
size_t
slob_allocated(void) {
return 0;
}
// kmem_cache_free_one - free an obj in a slab
// - if slab->inuse==0, then free the slab
static void
kmem_cache_free_one(kmem_cache_t *cachep, slab_t *slabp, void *objp) {
//should not use divide operator ???
size_t objnr = (objp - slabp->s_mem) / cachep->objsize;
slab_bufctl(slabp)[objnr] = slabp->free;
slabp->free = objnr;
slabp->inuse --;
if (slabp->inuse == 0) {
list_del(&(slabp->slab_link));
kmem_slab_destroy(cachep, slabp);
}
else if (slabp->inuse == cachep->num -1 ) {
list_del(&(slabp->slab_link));
list_add(&(cachep->slabs_notfull), &(slabp->slab_link));
}
size_t
kallocated(void) {
return slob_allocated();
}
#define GET_PAGE_CACHE(page) \
(kmem_cache_t *)((page)->page_link.next)
#define GET_PAGE_SLAB(page) \
(slab_t *)((page)->page_link.prev)
// kmem_cache_free - call kmem_cache_free_one function to free an obj
static void
kmem_cache_free(kmem_cache_t *cachep, void *objp) {
bool intr_flag;
struct Page *page = kva2page(objp);
if (!PageSlab(page)) {
panic("not a slab page %08x\n", objp);
}
local_intr_save(intr_flag);
{
kmem_cache_free_one(cachep, GET_PAGE_SLAB(page), objp);
}
local_intr_restore(intr_flag);
static int find_order(int size)
{
int order = 0;
for ( ; size > 4096 ; size >>=1)
order++;
return order;
}
// kfree - simple interface used by ooutside functions to free an obj
void
kfree(void *objp) {
kmem_cache_free(GET_PAGE_CACHE(kva2page(objp)), objp);
static void *__kmalloc(size_t size, gfp_t gfp)
{
slob_t *m;
bigblock_t *bb;
unsigned long flags;
if (size < PAGE_SIZE - SLOB_UNIT) {
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
return m ? (void *)(m + 1) : 0;
}
bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
if (!bb)
return 0;
bb->order = find_order(size);
bb->pages = (void *)__slob_get_free_pages(gfp, bb->order);
if (bb->pages) {
spin_lock_irqsave(&block_lock, flags);
bb->next = bigblocks;
bigblocks = bb;
spin_unlock_irqrestore(&block_lock, flags);
return bb->pages;
}
slob_free(bb, sizeof(bigblock_t));
return 0;
}
static inline void
check_slab_empty(void) {
int i;
for (i = 0; i < SLAB_CACHE_NUM; i ++) {
kmem_cache_t *cachep = slab_cache + i;
assert(list_empty(&(cachep->slabs_full)));
assert(list_empty(&(cachep->slabs_notfull)));
}
void *
kmalloc(size_t size)
{
return __kmalloc(size, 0);
}
void
check_slab(void) {
int i;
void *v0, *v1;
size_t nr_free_pages_store = nr_free_pages();
size_t kernel_allocated_store = slab_allocated();
/* slab must be empty now */
check_slab_empty();
assert(slab_allocated() == 0);
kmem_cache_t *cachep0, *cachep1;
cachep0 = slab_cache;
assert(cachep0->objsize == 32 && cachep0->num > 1 && !cachep0->off_slab);
assert((v0 = kmalloc(16)) != NULL);
slab_t *slabp0, *slabp1;
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
assert(slabp0->inuse == 1 && list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
struct Page *p0, *p1;
size_t order_size;
p0 = kva2page(slabp0->s_mem - slabp0->offset), p1 = p0;
order_size = (1 << cachep0->page_order);
for (i = 0; i < cachep0->page_order; i ++, p1 ++) {
assert(PageSlab(p1));
assert(GET_PAGE_CACHE(p1) == cachep0 && GET_PAGE_SLAB(p1) == slabp0);
}
assert(v0 == slabp0->s_mem);
assert((v1 = kmalloc(16)) != NULL && v1 == v0 + 32);
kfree(v0);
assert(slabp0->free == 0);
kfree(v1);
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->page_order; i ++, p0 ++) {
assert(!PageSlab(p0));
}
v0 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
for (i = 0; i < cachep0->num - 1; i ++) {
kmalloc(16);
}
assert(slabp0->inuse == cachep0->num);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
v1 = kmalloc(16);
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
kfree(v0);
assert(list_empty(&(cachep0->slabs_full)));
assert(list_next(&(slabp0->slab_link)) == &(slabp1->slab_link)
|| list_next(&(slabp1->slab_link)) == &(slabp0->slab_link));
kfree(v1);
assert(!list_empty(&(cachep0->slabs_notfull)));
assert(list_next(&(cachep0->slabs_notfull)) == &(slabp0->slab_link));
assert(list_next(&(slabp0->slab_link)) == &(cachep0->slabs_notfull));
v1 = kmalloc(16);
assert(v1 == v0);
assert(list_next(&(cachep0->slabs_full)) == &(slabp0->slab_link));
assert(list_empty(&(cachep0->slabs_notfull)));
for (i = 0; i < cachep0->num; i ++) {
kfree(v1 + i * cachep0->objsize);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
void kfree(void *block)
{
bigblock_t *bb, **last = &bigblocks;
unsigned long flags;
if (!block)
return;
if (!((unsigned long)block & (PAGE_SIZE-1))) {
/* might be on the big block list */
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
if (bb->pages == block) {
*last = bb->next;
spin_unlock_irqrestore(&block_lock, flags);
__slob_free_pages((unsigned long)block, bb->order);
slob_free(bb, sizeof(bigblock_t));
return;
}
}
spin_unlock_irqrestore(&block_lock, flags);
}
slob_free((slob_t *)block - 1, 0);
return;
}
cachep0 = slab_cache;
bool has_off_slab = 0;
for (i = 0; i < SLAB_CACHE_NUM; i ++, cachep0 ++) {
if (cachep0->off_slab) {
has_off_slab = 1;
cachep1 = cachep0->slab_cachep;
if (!cachep1->off_slab) {
break;
}
}
}
unsigned int ksize(const void *block)
{
bigblock_t *bb;
unsigned long flags;
if (!has_off_slab) {
goto check_pass;
}
if (!block)
return 0;
assert(cachep0->off_slab && !cachep1->off_slab);
assert(cachep1 < cachep0);
if (!((unsigned long)block & (PAGE_SIZE-1))) {
spin_lock_irqsave(&block_lock, flags);
for (bb = bigblocks; bb; bb = bb->next)
if (bb->pages == block) {
spin_unlock_irqrestore(&slob_lock, flags);
return PAGE_SIZE << bb->order;
}
spin_unlock_irqrestore(&block_lock, flags);
}
assert(list_empty(&(cachep0->slabs_full)));
assert(list_empty(&(cachep0->slabs_notfull)));
return ((slob_t *)block - 1)->units * SLOB_UNIT;
}
assert(list_empty(&(cachep1->slabs_full)));
assert(list_empty(&(cachep1->slabs_notfull)));
v0 = kmalloc(cachep0->objsize);
p0 = kva2page(v0);
assert(page2kva(p0) == v0);
if (cachep0->num == 1) {
assert(!list_empty(&(cachep0->slabs_full)));
slabp0 = le2slab(list_next(&(cachep0->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep0->slabs_notfull)));
slabp0 = le2slab(list_next(&(cachep0->slabs_notfull)), slab_link);
}
assert(slabp0 != NULL);
if (cachep1->num == 1) {
assert(!list_empty(&(cachep1->slabs_full)));
slabp1 = le2slab(list_next(&(cachep1->slabs_full)), slab_link);
}
else {
assert(!list_empty(&(cachep1->slabs_notfull)));
slabp1 = le2slab(list_next(&(cachep1->slabs_notfull)), slab_link);
}
assert(slabp1 != NULL);
order_size = (1 << cachep0->page_order);
for (i = 0; i < order_size; i ++, p0 ++) {
assert(PageSlab(p0));
assert(GET_PAGE_CACHE(p0) == cachep0 && GET_PAGE_SLAB(p0) == slabp0);
}
kfree(v0);
check_pass:
check_rb_tree();
check_slab_empty();
assert(slab_allocated() == 0);
assert(nr_free_pages_store == nr_free_pages());
assert(kernel_allocated_store == slab_allocated());
cprintf("check_slab() succeeded!\n");
}

+ 3
- 3
code/lab8/kern/mm/kmalloc.h View File

@ -1,5 +1,5 @@
#ifndef __KERN_MM_SLAB_H__
#define __KERN_MM_SLAB_H__
#ifndef __KERN_MM_KMALLOC_H__
#define __KERN_MM_KMALLOC_H__
#include <defs.h>
@ -12,5 +12,5 @@ void kfree(void *objp);
size_t kallocated(void);
#endif /* !__KERN_MM_SLAB_H__ */
#endif /* !__KERN_MM_KMALLOC_H__ */

+ 0
- 5
code/lab8/kern/mm/memlayout.h View File

@ -156,11 +156,6 @@ typedef struct {
unsigned int nr_free; // # of free pages in this free list
} free_area_t;
/* for slab style kmalloc */
#define PG_slab 2 // page frame is included in a slab
#define SetPageSlab(page) set_bit(PG_slab, &((page)->flags))
#define ClearPageSlab(page) clear_bit(PG_slab, &((page)->flags))
#define PageSlab(page) test_bit(PG_slab, &((page)->flags))
#endif /* !__ASSEMBLER__ */

+ 0
- 4
code/lab8/kern/mm/vmm.c View File

@ -257,8 +257,6 @@ check_vmm(void) {
check_vma_struct();
check_pgfault();
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vmm() succeeded.\n");
}
@ -305,8 +303,6 @@ check_vma_struct(void) {
mm_destroy(mm);
assert(nr_free_pages_store == nr_free_pages());
cprintf("check_vma_struct() succeeded!\n");
}

+ 1
- 2
code/lab8/kern/process/proc.c View File

@ -834,8 +834,7 @@ init_main(void *arg) {
assert(nr_process == 2);
assert(list_next(&proc_list) == &(initproc->list_link));
assert(list_prev(&proc_list) == &(initproc->list_link));
assert(nr_free_pages_store == nr_free_pages());
assert(kernel_allocated_store == kallocated());
cprintf("init check memory pass.\n");
return 0;
}

+ 1
- 1
code/lab8/tools/grade.sh View File

@ -338,7 +338,7 @@ default_check() {
'PDE(001) fac00000-fb000000 00400000 -rw' \
' |-- PTE(000e0) faf00000-fafe0000 000e0000 urw' \
' |-- PTE(00001) fafeb000-fafec000 00001000 -rw' \
'check_slab() succeeded!' \
'check_slob() succeeded!' \
'check_vma_struct() succeeded!' \
'page fault at 0x00000100: K/W [no page found].' \
'check_pgfault() succeeded!' \

Loading…
Cancel
Save