BeRTOS
proc.c
Go to the documentation of this file.
00001 
00088 #include "proc_p.h"
00089 #include "proc.h"
00090 
00091 #include "cfg/cfg_proc.h"
00092 #define LOG_LEVEL KERN_LOG_LEVEL
00093 #define LOG_FORMAT KERN_LOG_FORMAT
00094 #include <cfg/log.h>
00095 
00096 #include "cfg/cfg_monitor.h"
00097 #include <cfg/macros.h>    // ROUND_UP2
00098 #include <cfg/module.h>
00099 #include <cfg/depend.h>    // CONFIG_DEPEND()
00100 
00101 #include <cpu/irq.h>
00102 #include <cpu/types.h>
00103 #include <cpu/attr.h>
00104 #include <cpu/frame.h>
00105 
00106 #if CONFIG_KERN_HEAP
00107     #include <struct/heap.h>
00108 #endif
00109 
00110 #include <string.h>           /* memset() */
00111 
00112 #define PROC_SIZE_WORDS (ROUND_UP2(sizeof(Process), sizeof(cpu_stack_t)) / sizeof(cpu_stack_t))
00113 
00114 /*
00115  * The scheduer tracks ready processes by enqueuing them in the
00116  * ready list.
00117  *
00118  * \note Access to the list must occur while interrupts are disabled.
00119  */
00120 REGISTER List proc_ready_list;
00121 
00122 /*
00123  * Holds a pointer to the TCB of the currently running process.
00124  *
00125  * \note User applications should use proc_current() to retrieve this value.
00126  */
00127 REGISTER Process *current_process;
00128 
00130 static struct Process main_process;
00131 
00132 #if CONFIG_KERN_HEAP
00133 
00137 static HEAP_DEFINE_BUF(heap_buf, CONFIG_KERN_HEAP_SIZE);
00138 static Heap proc_heap;
00139 
00140 /*
00141  * Keep track of zombie processes (processes that are exiting and need to
00142  * release some resources).
00143  *
00144  * \note Access to the list must occur while kernel preemption is disabled.
00145  */
00146 static List zombie_list;
00147 
00148 #endif /* CONFIG_KERN_HEAP */
00149 
00150 /*
00151  * Check if the process context switch can be performed directly by the
00152  * architecture-dependent asm_switch_context() or if it must be delayed
00153  * because we're in the middle of an ISR.
00154  *
00155  * Return true if asm_switch_context() can be executed, false
00156  * otherwise.
00157  *
00158  * NOTE: if an architecture does not implement IRQ_RUNNING() this function
00159  * always returns true.
00160  */
00161 #define CONTEXT_SWITCH_FROM_ISR()   (!IRQ_RUNNING())
00162 
00163 /*
00164  * Save context of old process and switch to new process.
00165   */
00166 static void proc_context_switch(Process *next, Process *prev)
00167 {
00168     cpu_stack_t *dummy;
00169 
00170     if (UNLIKELY(next == prev))
00171         return;
00172     /*
00173      * If there is no old process, we save the old stack pointer into a
00174      * dummy variable that we ignore.  In fact, this happens only when the
00175      * old process has just exited.
00176      */
00177     asm_switch_context(&next->stack, prev ? &prev->stack : &dummy);
00178 }
00179 
00180 static void proc_initStruct(Process *proc)
00181 {
00182     /* Avoid warning for unused argument. */
00183     (void)proc;
00184 
00185 #if CONFIG_KERN_SIGNALS
00186     proc->sig.recv = 0;
00187     proc->sig.wait = 0;
00188 #endif
00189 
00190 #if CONFIG_KERN_HEAP
00191     proc->flags = 0;
00192 #endif
00193 
00194 #if CONFIG_KERN_PRI
00195     proc->link.pri = 0;
00196 #endif
00197 }
00198 
00199 MOD_DEFINE(proc);
00200 
00201 void proc_init(void)
00202 {
00203     LIST_INIT(&proc_ready_list);
00204 
00205 #if CONFIG_KERN_HEAP
00206     LIST_INIT(&zombie_list);
00207     heap_init(&proc_heap, heap_buf, sizeof(heap_buf));
00208 #endif
00209     /*
00210      * We "promote" the current context into a real process. The only thing we have
00211      * to do is create a PCB and make it current. We don't need to setup the stack
00212      * pointer because it will be written the first time we switch to another process.
00213      */
00214     proc_initStruct(&main_process);
00215     current_process = &main_process;
00216 
00217 #if CONFIG_KERN_MONITOR
00218     monitor_init();
00219     monitor_add(current_process, "main");
00220 #endif
00221     MOD_INIT(proc);
00222 }
00223 
00224 
00225 #if CONFIG_KERN_HEAP
00226 
00231 static void proc_freeZombies(void)
00232 {
00233     Process *proc;
00234 
00235     while (1)
00236     {
00237         PROC_ATOMIC(proc = (Process *)list_remHead(&zombie_list));
00238         if (proc == NULL)
00239             return;
00240 
00241         if (proc->flags & PF_FREESTACK)
00242         {
00243             PROC_ATOMIC(heap_freemem(&proc_heap, proc->stack_base,
00244                 proc->stack_size + PROC_SIZE_WORDS * sizeof(cpu_stack_t)));
00245         }
00246     }
00247 }
00248 
00252 static void proc_addZombie(Process *proc)
00253 {
00254     Node *node;
00255 #if CONFIG_KERN_PREEMPT
00256     ASSERT(!proc_preemptAllowed());
00257 #endif
00258 
00259 #if CONFIG_KERN_PRI
00260     node = &(proc)->link.link;
00261 #else
00262     node = &(proc)->link;
00263 #endif
00264     LIST_ASSERT_VALID(&zombie_list);
00265     ADDTAIL(&zombie_list, node);
00266 }
00267 
00268 #endif /* CONFIG_KERN_HEAP */
00269 
00284 struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base)
00285 {
00286     Process *proc;
00287     LOG_INFO("name=%s", name);
00288 #if CONFIG_KERN_HEAP
00289     bool free_stack = false;
00290 
00291     /*
00292      * Free up resources of a zombie process.
00293      *
00294      * We're implementing a kind of lazy garbage collector here for
00295      * efficiency reasons: we can avoid to introduce overhead into another
00296      * kernel task dedicated to free up resources (e.g., idle) and we're
00297      * not introducing any overhead into the scheduler after a context
00298      * switch (that would be *very* bad, because the scheduler runs with
00299      * IRQ disabled).
00300      *
00301      * In this way we are able to release the memory of the zombie tasks
00302      * without disabling IRQs and without introducing any significant
00303      * overhead in any other kernel task.
00304      */
00305     proc_freeZombies();
00306 
00307     /* Did the caller provide a stack for us? */
00308     if (!stack_base)
00309     {
00310         /* Did the caller specify the desired stack size? */
00311         if (!stack_size)
00312             stack_size = KERN_MINSTACKSIZE;
00313 
00314         /* Allocate stack dinamically */
00315         PROC_ATOMIC(stack_base =
00316             (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size));
00317         if (stack_base == NULL)
00318             return NULL;
00319 
00320         free_stack = true;
00321     }
00322 
00323 #else // CONFIG_KERN_HEAP
00324 
00325     /* Stack must have been provided by the user */
00326     ASSERT2(IS_VALID_PTR(stack_base), "Invalid stack pointer. Did you forget to \
00327         enable CONFIG_KERN_HEAP?");
00328     ASSERT2(stack_size, "Stack size cannot be 0.");
00329 
00330 #endif // CONFIG_KERN_HEAP
00331 
00332 #if CONFIG_KERN_MONITOR
00333     /*
00334      * Fill-in the stack with a special marker to help debugging.
00335      * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger
00336      * than an int, so the (int) cast is required to silence the
00337      * warning for truncating its size.
00338      */
00339     memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size);
00340 #endif
00341 
00342     /* Initialize the process control block */
00343     if (CPU_STACK_GROWS_UPWARD)
00344     {
00345         proc = (Process *)stack_base;
00346         proc->stack = stack_base + PROC_SIZE_WORDS;
00347         // On some architecture stack should be aligned, so we do it.
00348         proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t))));
00349         if (CPU_SP_ON_EMPTY_SLOT)
00350             proc->stack++;
00351     }
00352     else
00353     {
00354         proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS);
00355         // On some architecture stack should be aligned, so we do it.
00356         proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t)));
00357         if (CPU_SP_ON_EMPTY_SLOT)
00358             proc->stack--;
00359     }
00360     /* Ensure stack is aligned */
00361     ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);
00362 
00363     stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
00364     proc_initStruct(proc);
00365     proc->user_data = data;
00366 
00367 #if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
00368     proc->stack_base = stack_base;
00369     proc->stack_size = stack_size;
00370     #if CONFIG_KERN_HEAP
00371     if (free_stack)
00372         proc->flags |= PF_FREESTACK;
00373     #endif
00374 #endif
00375     proc->user_entry = entry;
00376     CPU_CREATE_NEW_STACK(proc->stack);
00377 
00378 #if CONFIG_KERN_MONITOR
00379     monitor_add(proc, name);
00380 #endif
00381 
00382     /* Add to ready list */
00383     ATOMIC(SCHED_ENQUEUE(proc));
00384 
00385     return proc;
00386 }
00387 
00393 const char *proc_name(struct Process *proc)
00394 {
00395 #if CONFIG_KERN_MONITOR
00396     return proc ? proc->monitor.name : "<NULL>";
00397 #else
00398     (void)proc;
00399     return "---";
00400 #endif
00401 }
00402 
00404 const char *proc_currentName(void)
00405 {
00406     return proc_name(proc_current());
00407 }
00408 
00410 void proc_rename(struct Process *proc, const char *name)
00411 {
00412 #if CONFIG_KERN_MONITOR
00413     monitor_rename(proc, name);
00414 #else
00415     (void)proc; (void)name;
00416 #endif
00417 }
00418 
00419 
00420 #if CONFIG_KERN_PRI
00421 
00440 void proc_setPri(struct Process *proc, int pri)
00441 {
00442     if (proc->link.pri == pri)
00443         return;
00444 
00445     proc->link.pri = pri;
00446 
00447     if (proc != current_process)
00448         ATOMIC(sched_reenqueue(proc));
00449 }
00450 #endif // CONFIG_KERN_PRI
00451 
00452 INLINE void proc_run(void)
00453 {
00454     void (*entry)(void) = current_process->user_entry;
00455 
00456     LOG_INFO("New process starting at %p", entry);
00457     entry();
00458 }
00459 
00463 void proc_entry(void)
00464 {
00465     /*
00466      * Return from a context switch assumes interrupts are disabled, so
00467      * we need to explicitly re-enable them as soon as possible.
00468      */
00469     IRQ_ENABLE;
00470     /* Call the actual process's entry point */
00471     proc_run();
00472     proc_exit();
00473 }
00474 
00478 void proc_exit(void)
00479 {
00480     LOG_INFO("%p:%s", current_process, proc_currentName());
00481 
00482 #if CONFIG_KERN_MONITOR
00483     monitor_remove(current_process);
00484 #endif
00485 
00486     proc_forbid();
00487 #if CONFIG_KERN_HEAP
00488     /*
00489      * Set the task as zombie, its resources will be freed in proc_new() in
00490      * a lazy way, when another process will be created.
00491      */
00492     proc_addZombie(current_process);
00493 #endif
00494     current_process = NULL;
00495     proc_permit();
00496 
00497     proc_switch();
00498 
00499     /* never reached */
00500     ASSERT(0);
00501 }
00502 
00506 static void proc_schedule(void)
00507 {
00508     Process *old_process = current_process;
00509 
00510     IRQ_ASSERT_DISABLED();
00511 
00512     /* Poll on the ready queue for the first ready process */
00513     LIST_ASSERT_VALID(&proc_ready_list);
00514     while (!(current_process = (struct Process *)list_remHead(&proc_ready_list)))
00515     {
00516         /*
00517          * Make sure we physically reenable interrupts here, no matter what
00518          * the current task status is. This is important because if we
00519          * are idle-spinning, we must allow interrupts, otherwise no
00520          * process will ever wake up.
00521          *
00522          * During idle-spinning, an interrupt can occur and it may
00523          * modify \p proc_ready_list. To ensure that compiler reload this
00524          * variable every while cycle we call CPU_MEMORY_BARRIER.
00525          * The memory barrier ensure that all variables used in this context
00526          * are reloaded.
00527          * \todo If there was a way to write sig_wait() so that it does not
00528          * disable interrupts while waiting, there would not be any
00529          * reason to do this.
00530          */
00531         IRQ_ENABLE;
00532         CPU_IDLE;
00533         MEMORY_BARRIER;
00534         IRQ_DISABLE;
00535     }
00536     if (CONTEXT_SWITCH_FROM_ISR())
00537         proc_context_switch(current_process, old_process);
00538     /* This RET resumes the execution on the new process */
00539     LOG_INFO("resuming %p:%s\n", current_process, proc_currentName());
00540 }
00541 
00542 #if CONFIG_KERN_PREEMPT
00543 /* Global preemption nesting counter */
00544 cpu_atomic_t preempt_count;
00545 
00546 /*
00547  * The time sharing interval: when a process is scheduled on a CPU it gets an
00548  * amount of CONFIG_KERN_QUANTUM clock ticks. When these ticks expires and
00549  * preemption is enabled a new process is selected to run.
00550  */
00551 int _proc_quantum;
00552 
00556 bool proc_needPreempt(void)
00557 {
00558     if (UNLIKELY(current_process == NULL))
00559         return false;
00560     if (!proc_preemptAllowed())
00561         return false;
00562     if (LIST_EMPTY(&proc_ready_list))
00563         return false;
00564     return preempt_quantum() ? prio_next() > prio_curr() :
00565             prio_next() >= prio_curr();
00566 }
00567 
00571 void proc_preempt(void)
00572 {
00573     IRQ_ASSERT_DISABLED();
00574     ASSERT(current_process);
00575 
00576     /* Perform the kernel preemption */
00577     LOG_INFO("preempting %p:%s\n", current_process, proc_currentName());
00578     /* We are inside a IRQ context, so ATOMIC is not needed here */
00579     SCHED_ENQUEUE(current_process);
00580     preempt_reset_quantum();
00581     proc_schedule();
00582 }
00583 #endif /* CONFIG_KERN_PREEMPT */
00584 
00585 /* Immediately switch to a particular process */
00586 static void proc_switchTo(Process *proc)
00587 {
00588     Process *old_process = current_process;
00589 
00590     SCHED_ENQUEUE(current_process);
00591     preempt_reset_quantum();
00592     current_process = proc;
00593     proc_context_switch(current_process, old_process);
00594 }
00595 
00604 void proc_switch(void)
00605 {
00606     ASSERT(proc_preemptAllowed());
00607     ATOMIC(
00608         preempt_reset_quantum();
00609         proc_schedule();
00610     );
00611 }
00612 
00616 void proc_wakeup(Process *proc)
00617 {
00618     ASSERT(proc_preemptAllowed());
00619     ASSERT(current_process);
00620     IRQ_ASSERT_DISABLED();
00621 
00622     if (prio_proc(proc) >= prio_curr())
00623         proc_switchTo(proc);
00624     else
00625         SCHED_ENQUEUE_HEAD(proc);
00626 }
00627 
00631 void proc_yield(void)
00632 {
00633     Process *proc;
00634 
00635     /*
00636      * Voluntary preemption while preemption is disabled is considered
00637      * illegal, as not very useful in practice.
00638      *
00639      * ASSERT if it happens.
00640      */
00641     ASSERT(proc_preemptAllowed());
00642     IRQ_ASSERT_ENABLED();
00643 
00644     IRQ_DISABLE;
00645     proc = (struct Process *)list_remHead(&proc_ready_list);
00646     if (proc)
00647         proc_switchTo(proc);
00648     IRQ_ENABLE;
00649 }