# HG changeset patch # User George Dunlap # Date 1255446396 -3600 # Node ID d27bb3c56e714b2084807ce0da226f8395e3cf93 Inital commit. diff -r 000000000000 -r d27bb3c56e71 Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Makefile Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,25 @@ +CC = gcc + +CFLAGS += -g +CFLAGS += -fno-strict-aliasing +CFLAGS += -std=gnu99 +CFLAGS += -Wall -Wstrict-prototypes +CFLAGS += -Wno-unused-value +CFLAGS += -Wdeclaration-after-statement +CFLAGS += -Werror + +BIN = simulator + +HDRS = list.h workload.h sim.h stats.h + +all: $(BIN) + +.PHONY: clean +clean: + $(RM) *.a *.so *.o $(BIN) $(LIBBIN) + +%.o: %.c $(HDRS) Makefile + $(CC) $(CFLAGS) -c -o $@ $< + +simulator: simulator.o workloads.o sched_rr.o stats.o + $(CC) $(CFLAGS) -o $@ $^ \ No newline at end of file diff -r 000000000000 -r d27bb3c56e71 design.txt --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/design.txt Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,29 @@ +Discrete event simulator to speed the development and analysis of +different scheduling algorithms for the new scheduler. + +Inputs: Scheduler, Workload description + +Hmm... compile in all scheduler variants...? + +scheduler callbacks { + init processor, + init VM, + schedule, + wake, + block +} + +scheduler interface { + insert event (perhaps just SCHEDULE event) +} + +Workload description: + To begin, wake / block lists; all unconditional. + Later, deal with "dropped" work (e.g., video, audio)? + Dependencies (dom0, stubdoms, driver doms)? + +Types of event: +* Wake +* Block +* Schedule timer + diff -r 000000000000 -r d27bb3c56e71 list.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/list.h Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,928 @@ +/****************************************************************************** + * list.h + * + * Useful linked-list definitions taken from the Linux kernel (2.6.18). + */ + +#ifndef __XEN_LIST_H__ +#define __XEN_LIST_H__ + +#if __GNUC__ > 3 +#define offsetof(a,b) __builtin_offsetof(a,b) +#else +#define offsetof(a,b) ((unsigned long)&(((a *)0)->b)) +#endif + +/** + * container_of - cast a member of a structure out to the containing structure + * + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +#define prefetch(_x) (_x) + +/* These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x00100100) +#define LIST_POISON2 ((void *) 0x00200200) + +/* + * Simple doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +struct list_head { + struct list_head *next, *prev; +}; + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + list->next = list; + list->prev = list; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add_rcu(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + new->next = next; + new->prev = prev; + next->prev = new; + prev->next = new; +} + +/** + * list_add_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_rcu(struct list_head *new, struct list_head *head) +{ + __list_add_rcu(new, head, head->next); +} + +/** + * list_add_tail_rcu - add a new entry to rcu-protected list + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_add_tail_rcu() + * or list_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + */ +static inline void list_add_tail_rcu(struct list_head *new, + struct list_head *head) +{ + __list_add_rcu(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head *prev, + struct list_head *next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty on entry does not return true after this, the entry is + * in an undefined state. + */ +static inline void list_del(struct list_head *entry) +{ + ASSERT(entry->next->prev == entry); + ASSERT(entry->prev->next == entry); + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} + +/** + * list_del_rcu - deletes entry from list without re-initialization + * @entry: the element to delete from the list. + * + * Note: list_empty on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as list_del_rcu() + * or list_add_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * list_for_each_entry_rcu(). + * + * Note that the caller is not permitted to immediately free + * the newly deleted entry. Instead, either synchronize_rcu() + * or call_rcu() must be used to defer freeing until an RCU + * grace period has elapsed. + */ +static inline void list_del_rcu(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->prev = LIST_POISON2; +} + +/** + * list_replace - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * Note: if 'old' was empty, it will be overwritten. + */ +static inline void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} + +static inline void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + +/* + * list_replace_rcu - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * The old entry will be replaced with the new entry atomically. + * Note: 'old' should not be empty. + */ +static inline void list_replace_rcu(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->prev = old->prev; + new->next->prev = new; + new->prev->next = new; + old->prev = LIST_POISON2; +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add_tail(list, head); +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/** + * list_empty_careful - tests whether a list is empty and not being modified + * @head: the list to test + * + * Description: + * tests whether a list is empty _and_ checks that no other CPU might be + * in the process of modifying either member (next or prev) + * + * NOTE: using list_empty_careful() without synchronization + * can only be safe if the only activity that can happen + * to the list entry is list_del_init(). Eg. it cannot be used + * if another CPU could re-list_add() it. + */ +static inline int list_empty_careful(const struct list_head *head) +{ + struct list_head *next = head->next; + return (next == head) && (next == head->prev); +} + +static inline void __list_splice(struct list_head *list, + struct list_head *head) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + struct list_head *at = head->next; + + first->prev = head; + head->next = first; + + last->next = at; + at->prev = last; +} + +/** + * list_splice - join two lists + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(struct list_head *list, struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; prefetch(pos->next), pos != (head); \ + pos = pos->next) + +/** + * __list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * This variant differs from list_for_each() in that it's the + * simplest possible list iteration code, no prefetching is done. + * Use this for code that knows the list to be very short (empty + * or 1 entry) most of the time. + */ +#define __list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ + pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_backwards_safe - iterate backwards over a list safe + * against removal of list entry + * @pos: the &struct list_head to use as a loop counter. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_backwards_safe(pos, n, head) \ + for ( pos = (head)->prev, n = pos->prev; pos != (head); \ + pos = n, n = pos->prev ) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_reverse - iterate backwards over list of given type. + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member); \ + prefetch(pos->member.prev), &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_prepare_entry - prepare a pos entry for use in + * list_for_each_entry_continue + * @pos: the type * to use as a start point + * @head: the head of the list + * @member: the name of the list_struct within the struct. + * + * Prepares a pos entry for use as a start point in + * list_for_each_entry_continue. + */ +#define list_prepare_entry(pos, head, member) \ + ((pos) ? : list_entry(head, typeof(*pos), member)) + +/** + * list_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_from - iterate over list of given type from the + * current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing from current position. + */ +#define list_for_each_entry_from(pos, head, member) \ + for (; prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe + * against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_continue + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing after current point, + * safe against removal of list entry. + */ +#define list_for_each_entry_safe_continue(pos, n, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_from + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type from current point, safe against + * removal of list entry. + */ +#define list_for_each_entry_safe_from(pos, n, head, member) \ + for (n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_reverse + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate backwards over list of given type, safe against removal + * of list entry. + */ +#define list_for_each_entry_safe_reverse(pos, n, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member), \ + n = list_entry(pos->member.prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.prev, typeof(*n), member)) + +/** + * list_for_each_rcu - iterate over an rcu-protected list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_rcu(pos, head) \ + for (pos = (head)->next; \ + prefetch(rcu_dereference(pos)->next), pos != (head); \ + pos = pos->next) + +#define __list_for_each_rcu(pos, head) \ + for (pos = (head)->next; \ + rcu_dereference(pos) != (head); \ + pos = pos->next) + +/** + * list_for_each_safe_rcu + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + * + * Iterate over an rcu-protected list, safe against removal of list entry. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_safe_rcu(pos, n, head) \ + for (pos = (head)->next; \ + n = rcu_dereference(pos)->next, pos != (head); \ + pos = n) + +/** + * list_for_each_entry_rcu - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_entry_rcu(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + prefetch(rcu_dereference(pos)->member.next), \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_continue_rcu + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * Iterate over an rcu-protected list, continuing after current point. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as list_add_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define list_for_each_continue_rcu(pos, head) \ + for ((pos) = (pos)->next; \ + prefetch(rcu_dereference((pos))->next), (pos) != (head); \ + (pos) = (pos)->next) + +/* + * Double linked lists with a single pointer list head. + * Mostly useful for hash tables where the two pointer list head is + * too wasteful. + * You lose the ability to access the tail in O(1). + */ + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +static inline int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +/** + * hlist_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry(). + */ +static inline void hlist_del_rcu(struct hlist_node *n) +{ + __hlist_del(n); + n->pprev = LIST_POISON2; +} + +static inline void hlist_del_init(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} + +/* + * hlist_replace_rcu - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * The old entry will be replaced with the new entry atomically. + */ +static inline void hlist_replace_rcu(struct hlist_node *old, + struct hlist_node *new) +{ + struct hlist_node *next = old->next; + + new->next = next; + new->pprev = old->pprev; + if (next) + new->next->pprev = &new->next; + *new->pprev = new; + old->pprev = LIST_POISON2; +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +/** + * hlist_add_head_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_add_head_rcu(struct hlist_node *n, + struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + n->pprev = &h->first; + if (first) + first->pprev = &n->next; + h->first = n; +} + +/* next must be != NULL */ +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; + *(n->pprev) = n; +} + +static inline void hlist_add_after(struct hlist_node *n, + struct hlist_node *next) +{ + next->next = n->next; + n->next = next; + next->pprev = &n->next; + + if(next->next) + next->next->pprev = &next->next; +} + +/** + * hlist_add_before_rcu + * @n: the new element to add to the hash list. + * @next: the existing element to add the new element before. + * + * Description: + * Adds the specified element to the specified hlist + * before the specified node while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. + */ +static inline void hlist_add_before_rcu(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; + *(n->pprev) = n; +} + +/** + * hlist_add_after_rcu + * @prev: the existing element to add the new element after. + * @n: the new element to add to the hash list. + * + * Description: + * Adds the specified element to the specified hlist + * after the specified node while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_add_head_rcu() + * or hlist_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. + */ +static inline void hlist_add_after_rcu(struct hlist_node *prev, + struct hlist_node *n) +{ + n->next = prev->next; + n->pprev = &prev->next; + prev->next = n; + if (n->next) + n->next->pprev = &n->next; +} + +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each(pos, head) \ + for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ + pos = pos->next) + +#define hlist_for_each_safe(pos, n, head) \ + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ + pos = n) + +/** + * hlist_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_continue - iterate over a hlist continuing + * after current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue(tpos, pos, member) \ + for (pos = (pos)->next; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_from - iterate over a hlist continuing from + * current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from(tpos, pos, member) \ + for (; pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_safe - iterate over list of given type safe + * against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = (head)->first; \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + + +/** + * hlist_for_each_entry_rcu - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + * This list-traversal primitive may safely run concurrently with + * the _rcu list-mutation primitives such as hlist_add_head_rcu() + * as long as the traversal is guarded by rcu_read_lock(). + */ +#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = (head)->first; \ + rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +#endif /* __XEN_LIST_H__ */ + diff -r 000000000000 -r d27bb3c56e71 sched_rr.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/sched_rr.c Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,116 @@ +#include +#include +#include + +#define ASSERT assert + +#include "list.h" +#include "sim.h" + + +#define MAX_VMS 16 +#define TSLICE 2000 + +struct sched_vm { + struct list_head queue; + int vid; + struct vm *v; +}; + +struct { + struct list_head queue; + struct sched_vm vms[MAX_VMS]; +} sched_priv; + + +void sched_rr_init(void) +{ + printf("%s()\n", __func__); + INIT_LIST_HEAD(&sched_priv.queue); +} + +void sched_rr_vm_init(int vid) +{ + struct sched_vm *svm; + + printf("%s: vm %d\n", __func__, vid); + + if ( vid > MAX_VMS ) + { + fprintf(stderr, "vid %d > MAX_VMS %d!\n", vid, MAX_VMS); + exit(1); + } + + svm = sched_priv.vms + vid; + + INIT_LIST_HEAD(&svm->queue); + + svm->vid = vid; + svm->v = vm_from_vid(vid); + +} + +void sched_rr_wake(int time, struct vm * v) +{ + struct sched_vm *svm; + + printf("%s: time %d vid %d\n", + __func__, time, v->vid); + + svm = sched_priv.vms + v->vid; + + ASSERT(list_empty(&svm->queue)); + + list_add_tail(&svm->queue, &sched_priv.queue); +} + +struct vm* sched_rr_schedule(int time, int pid) +{ + struct sched_vm *svm; + struct vm *next, *prev; + + printf("%s: time %d pid %d\n", + __func__, time, pid); + prev = current(pid); + + if ( prev ) + { + printf(" current v%d\n", prev->vid); + svm = sched_priv.vms + prev->vid; + + if ( svm->v->runstate == RUNSTATE_RUNNING ) + { + printf(" adding to runqueue\n"); + list_add_tail(&svm->queue, &sched_priv.queue); + } + } + + /* Take guy on front of runqueue, set new timer */ + if ( list_empty(&sched_priv.queue) ) + { + printf(" No runnable entities\n"); + return NULL; + } + + svm = list_entry(sched_priv.queue.next, struct sched_vm, queue); + + list_del_init(&svm->queue); + next = svm->v; + + sim_sched_timer(TSLICE, pid); + + printf(" next: v%d\n", next->vid); + + return next; +} + +struct scheduler sched_rr = +{ + .name="round-robin", + .ops = { + .sched_init = sched_rr_init, + .vm_init = sched_rr_vm_init, + .wake = sched_rr_wake, + .schedule = sched_rr_schedule + } +}; diff -r 000000000000 -r d27bb3c56e71 sim.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/sim.h Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,74 @@ +#ifndef __SIM_H +#define __SIM_H + +#include "stats.h" + +enum runstate { + RUNSTATE_RUNNING, + RUNSTATE_RUNNABLE, + RUNSTATE_BLOCKED +}; + +enum { + STATE_RUN, + STATE_PREEMPT, + STATE_WAKE, + STATE_BLOCK, + STATE_MAX +}; + +struct vm +{ + /* Public */ + int vid; + + enum runstate runstate; + int processor; + + void *private; + + /* State: i.e., runstate. Phase: "runnable" or "blocked". A single "runnable" phase may go through + * several "runnable" and "running" states. */ + int state_start_time; + int time_this_phase; + int was_preempted; + + struct { + struct cycle_summary state[STATE_MAX]; + } stats; + + int phase_index; + const struct sim_phase *e; /* Shortcut pointer to workload->list[phase_index] */ + const struct vm_workload *workload; + +}; + +struct sched_ops { + void (*sched_init)(void); + void (*vm_init)(int vid); + void (*wake)(int time, struct vm* v); + struct vm* (*schedule)(int time, int pid); +}; + +struct scheduler { + char *name; + struct sched_ops ops; +}; + +#define MAX_PCPU +struct global_pcpu_data { + int count; + struct pcpu { + int pid; + struct vm* current; + } pcpus[MAX_PCPU]; +}; +extern struct global_pcpu_data P; + +struct vm* vm_from_vid(int vid); +#define current(_pid) (P.pcpus[(_pid)].current) +void sim_sched_timer(int time, int pid); +void sim_runstate_change(int now, struct vm *v, int new_runstate); + +#endif /* __SIM_H */ + diff -r 000000000000 -r d27bb3c56e71 simulator.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/simulator.c Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,491 @@ +#include +#include +#include + +#define ASSERT assert + +#include "stats.h" +#include "list.h" +#include "sim.h" +#include "workload.h" + +FILE *warn; + +enum event_type { + EVT_BLOCK, + EVT_WAKE, + EVT_TIMER, + EVT_MAX +}; + +char *event_name[EVT_MAX] = { + [EVT_BLOCK]="block", + [EVT_WAKE] ="wake ", + [EVT_TIMER]="timer" +}; + +struct event { + struct list_head event_list; + enum event_type type; + int time; + int param; /* Usually VM ID */ +}; + +char * state_name[STATE_MAX] = { + [STATE_RUN]= "run ", + [STATE_PREEMPT]="preempt", + [STATE_WAKE]= "wake ", + [STATE_BLOCK]= "block ", +}; + +struct { + int now; + struct list_head events; + struct list_head *timer; + const struct sched_ops *sched_ops; +} sim; + + +struct { + int count; + struct vm vms[MAX_VMS]; +} V; + +extern struct scheduler sched_rr; +int default_scheduler = 0; +struct scheduler *schedulers[] = +{ + &sched_rr, +}; + +/* Options */ +struct { + int time_limit; + int pcpu_count; + const struct workload * workload; + const struct scheduler * scheduler; +} opt = { + .time_limit = 100000, + .pcpu_count = 1, + .workload = NULL, +}; + +struct global_pcpu_data P; + +/* Sim list interface */ +/* NB: Caller must free if they're not going to use it! */ +#define list_event(_l) (list_entry((_l), struct event, event_list)) + +struct event* sim_remove_event(int type, int param) +{ + struct event* ret = NULL; + struct list_head *pos, *tmp; + + /* Look for an event that matches this one and remove it */ + list_for_each_safe(pos, tmp, &sim.events) + { + struct event *tevt = list_event(pos); + if ( tevt->type == type + && tevt->param == param ) + { + list_del(pos); + ret = tevt; + break; + } + } + + return ret; +} + +void sim_insert_event(int time, int type, int param, int reset) +{ + struct list_head *pos = NULL; + struct event *evt=NULL; + + ASSERT(time >= sim.now); + + if ( reset ) + evt=sim_remove_event(type, param); + + if ( !evt ) + evt = (struct event *)malloc(sizeof(*evt)); + + printf(" [insert t%d %s param%d]\n", + evt->time, event_name[evt->type], evt->param); + + evt->time = time; + evt->type = type; + evt->param = param; + + INIT_LIST_HEAD(&evt->event_list); + + list_for_each(pos, &sim.events) + { + if ( list_event(pos)->time > evt->time ) + break; + } + list_add_tail(&evt->event_list, pos); +} + +struct event sim_next_event(void) +{ + struct event *evt; + struct list_head *next; + + ASSERT(!list_empty(&sim.events)); + + next=sim.events.next; + + list_del(next); + + evt=list_event(next); + + printf("%d: evt %s param%d\n", + evt->time, event_name[evt->type], evt->param); + + free(evt); + + /* XXX */ + return *evt; +} + +/* + * VM simulation + */ +void vm_next_event(struct vm *v) +{ + v->phase_index = ( v->phase_index + 1 ) % v->workload->phase_count; + + v->e = v->workload->list + v->phase_index; +} + +struct vm* vm_from_vid(int vid) +{ + ASSERT(vid < V.count); + + return V.vms + vid; +} + +void vm_block(int now, struct vm *v) +{ + ASSERT(v->e->type == PHASE_RUN); + v->time_this_phase += now - v->state_start_time; + printf("%s: v%d time_this_phase %d\n", + __func__, v->vid, v->time_this_phase); + + ASSERT(v->time_this_phase == v->e->time); + + vm_next_event(v); + + ASSERT(v->e->type == PHASE_BLOCK); + + sim_insert_event(now + v->e->time, EVT_WAKE, v->vid, 0); + v->time_this_phase = 0; + v->was_preempted = 0; +} + +/* Called when wake event happens; increment timer and reset state */ +void vm_wake(int now, struct vm *v) +{ + ASSERT(v->e->type == PHASE_BLOCK); + ASSERT(v->time_this_phase == 0); + + v->time_this_phase = now - v->state_start_time; + + if ( now != 0 ) + ASSERT(v->time_this_phase == v->e->time); + + vm_next_event(v); + + v->time_this_phase = 0; +} + +/* Called when actually starting to run; make block event and set state */ +void vm_run(int now, struct vm *v) +{ + ASSERT(v->e->type == PHASE_RUN); + ASSERT(v->time_this_phase < v->e->time); + + sim_insert_event(now + v->e->time - v->time_this_phase, EVT_BLOCK, v->vid, 0); + v->state_start_time = now; +} + +/* Preempt: Remove block event, update amount of runtime (so that when it runs again we can accurately + * generate a new block event) */ +void vm_preempt(int now, struct vm *v) +{ + struct event* evt; + + if ( ( evt = sim_remove_event(EVT_BLOCK, v->vid) ) ) + free(evt); + + v->time_this_phase += now - v->state_start_time; + printf("%s: v%d time_this_phase %d\n", + __func__, v->vid, v->time_this_phase); + + ASSERT(v->time_this_phase < v->e->time); + + v->was_preempted = 1; +} + + +/* Callbacks the scheduler may make */ +void sim_sched_timer(int time, int pid) +{ + sim_insert_event(sim.now + time, EVT_TIMER, pid, 1); +} + +void sim_runstate_change(int now, struct vm *v, int new_runstate) +{ + int ostate, nstate; + int stime = now - v->state_start_time; + + /* Valid transitions: + * + R->A (preemption): remove block event + * + R->B (block) : Insert wake event + * + A->R (run) : Insert block event + * + B->A (wake) : No action necessary + */ + + switch ( v->runstate ) + { + case RUNSTATE_RUNNING: + ostate = STATE_RUN; + break; + case RUNSTATE_RUNNABLE: + if ( v->was_preempted ) + ostate = STATE_PREEMPT; + else + ostate = STATE_WAKE; + break; + case RUNSTATE_BLOCKED: + ostate = STATE_BLOCK; + break; + } + + update_cycles(&v->stats.state[ostate], stime); + + + if ( v->runstate == RUNSTATE_RUNNING + && new_runstate == RUNSTATE_RUNNABLE ) + { + nstate = STATE_PREEMPT; + vm_preempt(now, v); + } + else if ( v->runstate == RUNSTATE_RUNNING + && new_runstate == RUNSTATE_BLOCKED ) + { + nstate = STATE_BLOCK; + vm_block(now, v); + } + else if ( v->runstate == RUNSTATE_RUNNABLE + && new_runstate == RUNSTATE_RUNNING ) + { + nstate = STATE_RUN; + vm_run(now, v); + } + else if ( v->runstate == RUNSTATE_BLOCKED + && new_runstate == RUNSTATE_RUNNABLE ) + { + nstate = STATE_WAKE; + vm_wake(now, v); + } + else + goto unexpected_transition; + + printf("%d: v%d %s %d -> %s\n", + now, v->vid, state_name[ostate], stime, state_name[nstate]); + + v->runstate = new_runstate; + v->state_start_time = now; + + return; + +unexpected_transition: + fprintf(stderr, "Unexpected transition for vm %d: %d->%d\n", + v->vid, + v->runstate, + new_runstate); + exit(1); +} + +/* + * Main loop + */ +void simulate(void) +{ + while ( sim.now < opt.time_limit ) + { + /* Take next event off list */ + struct event evt; + + evt = sim_next_event(); + + sim.now = evt.time; + + switch(evt.type) + { + case EVT_WAKE: + { + struct vm *v = vm_from_vid(evt.param); + ASSERT(v->processor == -1); + sim_runstate_change(sim.now, v, RUNSTATE_RUNNABLE); + sim.sched_ops->wake(sim.now, v); + } + break; + case EVT_BLOCK: + { + struct vm *v = vm_from_vid(evt.param); + + ASSERT(v->processor != -1); + ASSERT(v->processor <= P.count); + + sim_runstate_change(sim.now, v, RUNSTATE_BLOCKED); + + evt.param = v->processor; /* FIXME */ + } + /* FALL-THRU */ + case EVT_TIMER: + { + struct vm *prev, *next; + int pid = evt.param; + + ASSERT(pid < P.count); + + prev = P.pcpus[pid].current; + + next = sim.sched_ops->schedule(sim.now, pid); + + if ( prev && prev != next ) + { + prev->processor = -1; + if( prev->runstate != RUNSTATE_BLOCKED ) + sim_runstate_change(sim.now, prev, RUNSTATE_RUNNABLE); + } + + sim_runstate_change(sim.now, next, RUNSTATE_RUNNING); + P.pcpus[pid].current = next; + next->processor = pid; + } + break; + default: + fprintf(stderr, "Unexpected event type: %d\n", evt.type); + exit(1); + break; + } + } +} + +void init(void) +{ + int vid, i; + const struct workload *w; + + /* Initialize simulation variables */ + sim.now=0; + sim.timer=NULL; + INIT_LIST_HEAD(&sim.events); + sim.sched_ops = &opt.scheduler->ops; + + /* Initialize pcpus */ + P.count = opt.pcpu_count; + for ( i=0; isched_init(); + + /* Initialize vms */ + w=opt.workload; + for ( vid=0; vidvm_count; vid++) + { + struct vm *v = V.vms+vid; + + v->vid = vid; + v->runstate = RUNSTATE_BLOCKED; + v->processor = -1; + v->private = NULL; + + v->state_start_time = 0; + v->time_this_phase = 0; + + + v->phase_index = -1; + v->e = NULL; + v->workload = w->vm_workloads+vid; + + V.count++; + + sim.sched_ops->vm_init(vid); + } + + /* Set VM starting conditions */ + for ( vid=0; vidworkload->list[0].type) + { + case PHASE_RUN: + v->phase_index = v->workload->phase_count - 1; + v->e = v->workload->list + v->phase_index; + + sim_insert_event(sim.now, EVT_WAKE, v->vid, 0); + v->state_start_time = sim.now; + v->time_this_phase = 0; + break; + case PHASE_BLOCK: + v->phase_index = 0; + v->e = v->workload->list; + + sim_insert_event(sim.now + v->e->time, EVT_WAKE, v->vid, 0); + v->state_start_time = sim.now; + v->time_this_phase = 0; + break; + } + } + + /* Insert initial scheduler timer */ + for ( i=0; istats.state[j], sim.now, s); + } + } +} + +int main(int argc, char * argv[]) +{ + warn = stdout; + + /* Read opts, config file? */ + if ( !opt.workload ) + opt.workload = builtin_workloads+default_workload; + + if ( !opt.scheduler ) + opt.scheduler = schedulers[default_scheduler]; + /* Setup simulation */ + init(); + + /* Run simulation */ + simulate(); + /* Report statistics */ + report(); +} diff -r 000000000000 -r d27bb3c56e71 stats.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/stats.c Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,339 @@ +#include +#include +#include + +#include "stats.h" + +#define DEFAULT_SAMPLE_SIZE 10240 + +static struct { + int sample_size; +} opt = { + .sample_size = DEFAULT_SAMPLE_SIZE, +}; + +void set_sampel_size(int n) +{ + opt.sample_size = n; +} + +extern FILE *warn; + +/* With compliments to "Numerical Recipes in C", which provided the algorithm + * and basic template for this function. */ +#if 0 +static long long percentile(long long * A, int N, int ple) { + int I, J, L, R, K; + + long long X, W; + + /* No samples! */ + if ( N == 0 ) + return 0; + + /* Find K, the element # we want */ + K=N*ple/100; + + /* Set the left and right boundaries of the current search space */ + L=0; R=N-1; + + while(L < R) { + /* X: The value to order everything higher / lower than */ + X=A[K]; + + /* Starting at the left and the right... */ + I=L; + J=R; + + do { + /* Find the first element on the left that is out-of-order w/ X */ + while(A[I]> 1; + /* X: The value to order everything higher / lower than */ + X=A[K]; + + /* Starting at the left and the right... */ + I=L; I_weight = L_weight; + J=R; J_weight = R_weight; + + do { + /* Find the first element on the left that is out-of-order w/ X */ + while(A[I]> 1; + /* X: The value to order everything higher / lower than */ + X=A[K]; + + /* Starting at the left and the right... */ + I=L; I_weight = L_weight; + J=R; J_weight = R_weight; + + do { + /* Find the first element on the left that is out-of-order w/ X */ + while(A[I]count/opt.sample_size)+1; + index =s->count % opt.sample_size; + + if((index - (lap/3))%lap == 0) { + if(!s->sample) { + s->sample = malloc(sizeof(*s->sample) * opt.sample_size); + if(!s->sample) { + fprintf(stderr, "%s: malloc failed!\n", __func__); + exit(1); + } + } + s->sample[index] = c; + } + } + + if(c > 0) { + s->cycles += c; + } else { + s->cycles += -c; + } + s->count++; +} + +void print_cycle_summary(struct cycle_summary *s, + tsc_t total, char *p) { + if(s->count) { + long long avg; + double percent; + + avg = s->cycles / s->count; + + if ( total ) + percent = ((double)(s->cycles * 100)) / total; + + if ( opt.sample_size ) { + long long p5, p50, p95; + int data_size = s->count; + + if(data_size > opt.sample_size) + data_size = opt.sample_size; + + p50 = self_weighted_percentile(s->sample, data_size, 50); + p5 = self_weighted_percentile(s->sample, data_size, 5); + p95 = self_weighted_percentile(s->sample, data_size, 95); + + if ( total ) + printf("%s: %7d %llu %5.2lf%% %6lld {%6lld|%6lld|%6lld}\n", + p, s->count, + s->cycles, + percent, + avg, p5, p50, p95); + else + printf("%s: %7d %llu %6lld {%6lld|%6lld|%6lld}\n", + p, s->count, + s->cycles, + avg, p5, p50, p95); + + } else { + if ( total ) + printf("%s: %7d %llu %5.2lf%% %6lld\n", + p, s->count, + s->cycles, + percent, + avg); + else + printf("%s: %7d %llu %6lld\n", + p, s->count, + s->cycles, + avg); + } + } +} + diff -r 000000000000 -r d27bb3c56e71 stats.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/stats.h Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,16 @@ +#ifndef _STATS_H +#define _STATS_H + +typedef unsigned long long tsc_t; + +struct cycle_summary { + int count; + unsigned long long cycles; + long long *sample; +}; + +void set_sample_size(int n); +void update_cycles(struct cycle_summary *s, long long c); +void print_cycle_summary(struct cycle_summary *s, + tsc_t total, char *p); +#endif diff -r 000000000000 -r d27bb3c56e71 workload.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/workload.h Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,23 @@ +#ifndef __WORKLOAD_H +#define __WORKLOAD_H +struct sim_phase { + enum { PHASE_RUN, PHASE_BLOCK } type; + int time; +}; + +#define MAX_VMS 16 +#define MAX_PHASES 16 +struct vm_workload { + int phase_count; + const struct sim_phase list[MAX_PHASES]; +}; + +struct workload { + const char * name; + int vm_count; + const struct vm_workload vm_workloads[MAX_VMS]; +}; + +extern const int default_workload; +extern struct workload builtin_workloads[]; +#endif diff -r 000000000000 -r d27bb3c56e71 workloads.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/workloads.c Tue Oct 13 16:06:36 2009 +0100 @@ -0,0 +1,48 @@ +#include "workload.h" + +const int default_workload = 0; +struct workload builtin_workloads[] = +{ + { + .name="Sx3", + .vm_count=3, + .vm_workloads = { + { .phase_count = 2, + .list = { + { + .type=PHASE_RUN, + .time=695 + }, + { + .type=PHASE_BLOCK, + .time=5 + }, + } + }, + { .phase_count = 2, + .list = { + { + .type=PHASE_RUN, + .time=1095 + }, + { + .type=PHASE_BLOCK, + .time=5 + }, + } + }, + { .phase_count = 2, + .list = { + { + .type=PHASE_RUN, + .time=1295 + }, + { + .type=PHASE_BLOCK, + .time=5 + }, + } + }, + } + }, +};