vx32

Local 9vx git repository for patches.
git clone git://r-36.net/vx32
Log | Files | Refs

sched.c (5116B)


      1 /*
      2  * Plan 9 VX scheduler
      3  *
      4  * Allocate new processors (host threads) for each kproc.
      5  * Everyone else gets scheduled via the regular scheduler
      6  * on cpu0.  It is tempting to use multiple threads and 
      7  * multiple vx32 instances on an SMP to get parallelism of
      8  * user execution, but we can't: there's only one user address space.
      9  * (The kprocs are okay because they never touch user memory.)
     10  */
     11 
     12 #define	WANT_M
     13 
     14 #include	"u.h"
     15 #include	<pthread.h>
     16 #include	<sys/poll.h>
     17 #include	<sched.h>
     18 #include	"lib.h"
     19 #include	"mem.h"
     20 #include	"dat.h"
     21 #include	"fns.h"
     22 #include	"error.h"
     23 #include	"trace.h"
     24 
     25 /*
     26  * The cpu0 scheduler calls idlehands when there is
     27  * nothing left on the main runqueue (runproc
     28  * is returning nil).  Instead of chewing up the
     29  * host CPU spinning, we go to sleep using pthreads,
     30  * but then if some other kproc readies a normal
     31  * proc, it needs to call noidlehands to kick cpu0.
     32  */
     33 static int idlewakeup;
     34 static Psleep idling;
     35 
     36 void
     37 idlehands(void)
     38 {
     39 	int nbad;
     40 
     41 	plock(&idling);
     42 	nbad = 0;
     43 	while(!idlewakeup){
     44 		if(traceprocs)
     45 			iprint("cpu%d: idlehands\n", m->machno);
     46 		psleep(&idling);
     47 		if(traceprocs)
     48 			iprint("cpu%d: busy hands\n", m->machno);
     49 		if(!idlewakeup && ++nbad%1000 == 0)
     50 			iprint("idlehands spurious wakeup\n");
     51 	}
     52 	idlewakeup = 0;
     53 	if(traceprocs)
     54 		iprint("cpu%d: idlehands returning\n", m->machno);
     55 	punlock(&idling);
     56 }
     57 
     58 void
     59 noidlehands(void)
     60 {
     61 	if(m->machno == 0)
     62 		return;
     63 	plock(&idling);
     64 	idlewakeup = 1;
     65 	pwakeup(&idling);
     66 	punlock(&idling);
     67 }
     68 
     69 /*
     70  * Special run queue for kprocs.
     71  */
     72 static Schedq kprocq;
     73 static int nrunproc;
     74 static Psleep run;
     75 
     76 /*
     77  * Ready the proc p. 
     78  * If it's a normal proc, it goes to the normal scheduler.
     79  * Otherwise it gets put on the kproc run queue, and
     80  * maybe a new "cpu" gets forked to run the kproc.
     81  */
     82 void
     83 ready(Proc *p)
     84 {
     85 	if(p->kp == 0){
     86 		_ready(p);
     87 		noidlehands();	/* kick cpu0 if it is sleeping */
     88 		return;
     89 	}
     90 	plock(&run);
     91 	lock(&kprocq.lk);	/* redundant but fine */
     92 	p->state = Ready;
     93 	p->rnext = 0;
     94 	if(kprocq.tail)
     95 		kprocq.tail->rnext = p;
     96 	else
     97 		kprocq.head = p;
     98 	kprocq.tail = p;
     99 	/*
    100 	 * If there are more kprocs on the queue
    101 	 * than there are cpus waiting to run kprocs,
    102 	 * kick off a new one.
    103 	 */
    104 	kprocq.n++;
    105 	if(kprocq.n > nrunproc){
    106 		if(traceprocs)
    107 			iprint("create new cpu: kprocq.n=%d nrunproc=%d\n", kprocq.n, nrunproc);
    108 		nrunproc++;
    109 		newmach();
    110 	}
    111 	if(traceprocs)
    112 		iprint("cpu%d: ready %ld %s; wakeup kproc cpus\n", m->machno, p->pid, p->text);
    113 	pwakeup(&run);
    114 	unlock(&kprocq.lk);
    115 	punlock(&run);
    116 }
    117 
    118 /*
    119  * Get a new proc to run.
    120  * If we're running on cpu0, use the normal scheduler
    121  * to get a normal proc.
    122  */
    123 Proc*
    124 runproc(void)
    125 {
    126 	int nbad;
    127 	Proc *p;
    128 
    129 	if(m->machno == 0)
    130 		return _runproc();
    131 
    132 	nbad = 0;
    133 	plock(&run);
    134 	lock(&kprocq.lk);	/* redundant but fine */
    135 	if(m->new){
    136 		nrunproc--;
    137 		m->new = 0;
    138 	}
    139 	while((p = kprocq.head) == nil){
    140 		nrunproc++;
    141 		unlock(&kprocq.lk);
    142 		if(traceprocs)
    143 			iprint("cpu%d: runproc psleep %d %d\n", m->machno, kprocq.n, nrunproc);
    144 		psleep(&run);
    145 		lock(&kprocq.lk);
    146 		if(kprocq.head == nil && ++nbad%1000 == 0)
    147 			iprint("cpu%d: runproc spurious wakeup\n", m->machno);	
    148 		if(traceprocs)
    149 			iprint("cpu%d: runproc awake\n", m->machno);
    150 		nrunproc--;
    151 	}
    152 	kprocq.head = p->rnext;
    153 	if(kprocq.head == 0)
    154 		kprocq.tail = nil;
    155 	kprocq.n--;
    156 	if(traceprocs)
    157 		iprint("cpu%d: runproc %ld %s [%d %d]\n",
    158 			m->machno, p->pid, p->text, kprocq.n, nrunproc);
    159 	unlock(&kprocq.lk);
    160 	punlock(&run);
    161 	/*
    162 	 * To avoid the "double sleep" bug. See:
    163 	 * http://9fans.net/archive/2010/06/71
    164 	 */
    165 	while (p->mach)
    166 		sched_yield();
    167 	return p;
    168 }
    169 
    170 /*
    171  * Limit CPU usage going to sleep while holding the run lock
    172  */
    173 void
    174 plimitproc(void *v)
    175 {
    176 	int lim;
    177 	uint sleeping, working;
    178 
    179 	lim = *((int*)v);
    180 	sleeping = 100000 * (100 - lim) / 100;
    181 	working = 100000 * lim / 100;
    182 
    183 	for(;;){
    184 		usleep(working);
    185 		plock(&run);
    186 		usleep(sleeping);
    187 		punlock(&run);
    188 	}
    189 }
    190 
    191 /*
    192  * Host OS process sleep and wakeup.
    193  */
    194 static pthread_mutex_t initmutex = PTHREAD_MUTEX_INITIALIZER;
    195 
    196 struct Pwaiter
    197 {
    198 	pthread_cond_t cond;
    199 	Pwaiter *next;
    200 	int awake;
    201 };
    202 
    203 void
    204 __plock(Psleep *p)
    205 {
    206 	int r;
    207 
    208 	if(!p->init){
    209 		if((r = pthread_mutex_lock(&initmutex)) != 0)
    210 			panic("pthread_mutex_lock initmutex: %d", r);
    211 		if(!p->init){
    212 			p->init = 1;
    213 			pthread_mutex_init(&p->mutex, nil);
    214 		}
    215 		if((r = pthread_mutex_unlock(&initmutex)) != 0)
    216 			panic("pthread_mutex_unlock initmutex: %d", r);
    217 	}
    218 	if((r = pthread_mutex_lock(&p->mutex)) != 0)
    219 		panic("pthread_mutex_lock: %d", r);
    220 }
    221 
    222 void
    223 __punlock(Psleep *p)
    224 {
    225 	int r;
    226 
    227 	if((r = pthread_mutex_unlock(&p->mutex)) != 0)
    228 		panic("pthread_mutex_unlock: %d", r);
    229 }
    230 
    231 void
    232 __psleep(Psleep *p)
    233 {
    234 	int r;
    235 	Pwaiter w;
    236 
    237 	memset(&w, 0, sizeof w);
    238 	pthread_cond_init(&w.cond, nil);
    239 	w.next = p->waiter;
    240 	p->waiter = &w;
    241 	while(!w.awake)
    242 		if((r = pthread_cond_wait(&w.cond, &p->mutex)) != 0)
    243 			panic("pthread_cond_wait: %d", r);
    244 	pthread_cond_destroy(&w.cond);
    245 }
    246 
    247 void
    248 __pwakeup(Psleep *p)
    249 {
    250 	int r;
    251 	Pwaiter *w;
    252 
    253 	w = p->waiter;
    254 	if(w){
    255 		p->waiter = w->next;
    256 		w->awake = 1;
    257 		if((r = pthread_cond_signal(&w->cond)) != 0)
    258 			panic("pthread_cond_signal: %d", r);
    259 	}
    260 }
    261