00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019 #ifndef _RTAI_SCHEDCORE_H
00020 #define _RTAI_SCHEDCORE_H
00021
00022 #include <rtai_version.h>
00023 #include <rtai_lxrt.h>
00024 #include <rtai_sched.h>
00025 #include <rtai_malloc.h>
00026 #include <rtai_trace.h>
00027 #include <rtai_leds.h>
00028 #include <rtai_sem.h>
00029 #include <rtai_rwl.h>
00030 #include <rtai_spl.h>
00031 #include <rtai_scb.h>
00032 #include <rtai_mbx.h>
00033 #include <rtai_msg.h>
00034 #include <rtai_tbx.h>
00035 #include <rtai_mq.h>
00036 #include <rtai_bits.h>
00037 #include <rtai_wd.h>
00038 #include <rtai_tasklets.h>
00039 #include <rtai_fifos.h>
00040 #include <rtai_netrpc.h>
00041 #include <rtai_shm.h>
00042 #include <rtai_usi.h>
00043
00044 #ifdef __KERNEL__
00045
00046 #include <linux/module.h>
00047 #include <linux/init.h>
00048 #include <linux/kernel.h>
00049 #include <linux/version.h>
00050 #include <linux/errno.h>
00051 #include <linux/slab.h>
00052 #include <linux/timex.h>
00053 #include <linux/sched.h>
00054 #include <asm/param.h>
00055 #include <asm/system.h>
00056 #include <asm/io.h>
00057
00058 extern RT_TASK rt_smp_linux_task[];
00059
00060 extern RT_TASK *rt_smp_current[];
00061
00062 extern RTIME rt_smp_time_h[];
00063
00064 extern int rt_smp_oneshot_timer[];
00065
00066 #ifdef CONFIG_RTAI_MALLOC
00067 #define sched_malloc(size) rt_malloc((size))
00068 #define sched_free(adr) rt_free((adr))
00069 #ifndef CONFIG_RTAI_MALLOC_BUILTIN
00070 #define sched_mem_init()
00071 #define sched_mem_end()
00072 #else
00073 #define sched_mem_init() \
00074 { if(__rtai_heap_init() != 0) { \
00075 return(-ENOMEM); \
00076 } }
00077 #define sched_mem_end() __rtai_heap_exit()
00078 #endif
00079 #define call_exit_handlers(task) __call_exit_handlers(task)
00080 #define set_exit_handler(task, fun, arg1, arg2) __set_exit_handler(task, fun, arg1, arg2)
00081 #else
00082 #define sched_malloc(size) kmalloc((size), GFP_KERNEL)
00083 #define sched_free(adr) kfree((adr))
00084 #define sched_mem_init()
00085 #define sched_mem_end()
00086 #define call_exit_handlers(task)
00087 #define set_exit_handler(task, fun, arg1, arg2)
00088 #endif
00089
00090 #define RT_SEM_MAGIC 0xaabcdeff
00091
00092 #define SEM_ERR (0xFfff)
00093
00094 #define MSG_ERR ((RT_TASK *)0xFfff)
00095
00096 #define NOTHING ((void *)0)
00097
00098 #define SOMETHING ((void *)1)
00099
00100 #define SEMHLF 0x0000FFFF
00101 #define RPCHLF 0xFFFF0000
00102 #define RPCINC 0x00010000
00103
00104 #define DECLARE_RT_CURRENT int cpuid; RT_TASK *rt_current
00105 #define ASSIGN_RT_CURRENT rt_current = rt_smp_current[cpuid = hard_cpu_id()]
00106 #define RT_CURRENT rt_smp_current[hard_cpu_id()]
00107
00108 #define MAX_LINUX_RTPRIO 99
00109 #define MIN_LINUX_RTPRIO 1
00110
00111 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00112 void rtai_handle_isched_lock(int nesting);
00113 #endif
00114
00115 #ifdef CONFIG_SMP
00116 #define rt_time_h (rt_smp_time_h[cpuid & sqilter])
00117 #define oneshot_timer (rt_smp_oneshot_timer[cpuid & sqilter])
00118 #define rt_linux_task (rt_smp_linux_task[cpuid])
00119 #else
00120 #define rt_time_h (rt_smp_time_h[0])
00121 #define oneshot_timer (rt_smp_oneshot_timer[0])
00122 #define rt_linux_task (rt_smp_linux_task[0])
00123 #endif
00124
00125 #ifdef CONFIG_SMP
00126
00127 extern unsigned long sqilter;
00128
00129 #define SCHED_IPI RTAI_APIC1_IPI
00130 #define SCHED_VECTOR RTAI_APIC1_VECTOR
00131
00132 static inline void send_sched_ipi(unsigned long dest)
00133 {
00134 unsigned long flags;
00135 rtai_hw_lock(flags);
00136 apic_wait_icr_idle();
00137 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(dest));
00138 apic_write_around(APIC_ICR, APIC_DEST_LOGICAL | SCHED_VECTOR);
00139 rtai_hw_unlock(flags);
00140 }
00141
00142 #define RT_SCHEDULE_MAP(schedmap) \
00143 do { \
00144 if (sqilter) { \
00145 if (schedmap) send_sched_ipi(schedmap); \
00146 } else { \
00147 rt_schedule(); \
00148 } \
00149 } while (0)
00150
00151 #define RT_SCHEDULE_MAP_BOTH(schedmap) \
00152 do { \
00153 if (sqilter && schedmap) send_sched_ipi(schedmap); \
00154 rt_schedule(); \
00155 } while (0)
00156
00157 #define RT_SCHEDULE(task, cpuid) \
00158 do { \
00159 if (((task)->runnable_on_cpus != (cpuid)) && sqilter) { \
00160 send_sched_ipi(1 << (task)->runnable_on_cpus); \
00161 } else { \
00162 rt_schedule(); \
00163 } \
00164 } while (0)
00165
00166 #define RT_SCHEDULE_BOTH(task, cpuid) \
00167 { \
00168 if (((task)->runnable_on_cpus != (cpuid)) && sqilter) { \
00169 send_sched_ipi(1 << (task)->runnable_on_cpus); \
00170 } \
00171 rt_schedule(); \
00172 }
00173
00174 #else
00175
00176 #define send_sched_ipi(dest)
00177
00178 #define RT_SCHEDULE_MAP_BOTH(schedmap) rt_schedule()
00179
00180 #define RT_SCHEDULE_MAP(schedmap) rt_schedule()
00181
00182 #define RT_SCHEDULE(task, cpuid) rt_schedule()
00183
00184 #define RT_SCHEDULE_BOTH(task, cpuid) rt_schedule()
00185
00186 #endif
00187
00188 #define BASE_SOFT_PRIORITY 1000000000
00189
00190 #define TASK_HARDREALTIME TASK_UNINTERRUPTIBLE
00191
00192 static inline void enq_ready_edf_task(RT_TASK *ready_task)
00193 {
00194 RT_TASK *task;
00195 #ifdef CONFIG_SMP
00196 task = rt_smp_linux_task[ready_task->runnable_on_cpus & sqilter].rnext;
00197 #else
00198 task = rt_smp_linux_task[0].rnext;
00199 #endif
00200 while (task->policy < 0 && ready_task->period >= task->period) {
00201 task = task->rnext;
00202 }
00203 task->rprev = (ready_task->rprev = task->rprev)->rnext = ready_task;
00204 ready_task->rnext = task;
00205 }
00206
00207 #define MAX_WAKEUP_SRQ (2 << 6)
00208
00209 struct klist_t { volatile int srq, in, out; void *task[MAX_WAKEUP_SRQ]; };
00210 extern struct klist_t wake_up_srq;
00211
00212 static inline void enq_ready_task(RT_TASK *ready_task)
00213 {
00214 RT_TASK *task;
00215 if (ready_task->is_hard) {
00216 #ifdef CONFIG_SMP
00217 task = rt_smp_linux_task[ready_task->runnable_on_cpus & sqilter].rnext;
00218 #else
00219 task = rt_smp_linux_task[0].rnext;
00220 #endif
00221 while (ready_task->priority >= task->priority) {
00222 if ((task = task->rnext)->priority < 0) break;
00223 }
00224 task->rprev = (ready_task->rprev = task->rprev)->rnext = ready_task;
00225 ready_task->rnext = task;
00226 } else {
00227 ready_task->state = 0;
00228 wake_up_srq.task[wake_up_srq.in] = ready_task->lnxtsk;
00229 wake_up_srq.in = (wake_up_srq.in + 1) & (MAX_WAKEUP_SRQ - 1);
00230 rt_pend_linux_srq(wake_up_srq.srq);
00231 }
00232 }
00233
00234 static inline int renq_ready_task(RT_TASK *ready_task, int priority)
00235 {
00236 int retval;
00237 if ((retval = ready_task->priority != priority)) {
00238 ready_task->priority = priority;
00239 if (ready_task->state == RT_SCHED_READY) {
00240 (ready_task->rprev)->rnext = ready_task->rnext;
00241 (ready_task->rnext)->rprev = ready_task->rprev;
00242 enq_ready_task(ready_task);
00243 }
00244 }
00245 return retval;
00246 }
00247
00248 static inline int renq_current(RT_TASK *rt_current, int priority)
00249 {
00250 int retval;
00251 if ((retval = rt_current->priority != priority)) {
00252 rt_current->priority = priority;
00253 (rt_current->rprev)->rnext = rt_current->rnext;
00254 (rt_current->rnext)->rprev = rt_current->rprev;
00255 enq_ready_task(rt_current);
00256 }
00257 return retval;
00258 }
00259
00260 static inline void rem_ready_task(RT_TASK *task)
00261 {
00262 if (task->state == RT_SCHED_READY) {
00263 if (!task->is_hard) {
00264 (task->lnxtsk)->state = TASK_HARDREALTIME;
00265 }
00266 (task->rprev)->rnext = task->rnext;
00267 (task->rnext)->rprev = task->rprev;
00268 }
00269 }
00270
00271 static inline void rem_ready_current(RT_TASK *rt_current)
00272 {
00273 if (!rt_current->is_hard) {
00274 (rt_current->lnxtsk)->state = TASK_HARDREALTIME;
00275 }
00276 (rt_current->rprev)->rnext = rt_current->rnext;
00277 (rt_current->rnext)->rprev = rt_current->rprev;
00278 }
00279
00280 static inline void enq_timed_task(RT_TASK *timed_task)
00281 {
00282 RT_TASK *task;
00283 #ifdef CONFIG_SMP
00284 task = rt_smp_linux_task[timed_task->runnable_on_cpus & sqilter].tnext;
00285 #else
00286 task = rt_smp_linux_task[0].tnext;
00287 #endif
00288 while (timed_task->resume_time > task->resume_time) {
00289 task = task->tnext;
00290 }
00291 task->tprev = (timed_task->tprev = task->tprev)->tnext = timed_task;
00292 timed_task->tnext = task;
00293 }
00294
00295 static inline void wake_up_timed_tasks(int cpuid)
00296 {
00297 RT_TASK *task;
00298 #ifdef CONFIG_SMP
00299 task = rt_smp_linux_task[cpuid = cpuid & sqilter].tnext;
00300 #else
00301 task = rt_smp_linux_task[0].tnext;
00302 #endif
00303 while (task->resume_time <= rt_time_h) {
00304 if ((task->state &= ~(RT_SCHED_DELAYED | RT_SCHED_SEMAPHORE | RT_SCHED_RECEIVE | RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_RETURN | RT_SCHED_MBXSUSP)) == RT_SCHED_READY) {
00305 if (task->policy < 0) {
00306 enq_ready_edf_task(task);
00307 } else {
00308 enq_ready_task(task);
00309 }
00310 }
00311 task = task->tnext;
00312 }
00313 #ifdef CONFIG_SMP
00314 rt_smp_linux_task[cpuid].tnext = task;
00315 task->tprev = &rt_smp_linux_task[cpuid];
00316 #else
00317 rt_smp_linux_task[0].tnext = task;
00318 task->tprev = &rt_smp_linux_task[0];
00319 #endif
00320 }
00321
00322 static inline void rem_timed_task(RT_TASK *task)
00323 {
00324 if ((task->state & RT_SCHED_DELAYED)) {
00325 (task->tprev)->tnext = task->tnext;
00326 (task->tnext)->tprev = task->tprev;
00327 }
00328 }
00329
00330 #define get_time() rt_get_time()
00331 #if 0
00332 static inline RTIME get_time(void)
00333 {
00334 #ifdef CONFIG_SMP
00335 if (sqilter) {
00336 int cpuid;
00337 return rt_smp_oneshot_timer[cpuid = hard_cpu_id()] ? rdtsc() : rt_smp_times[cpuid].tick_time;
00338 } else {
00339 return rt_smp_oneshot_timer[0] ? rdtsc(): rt_times.tick_time;
00340 }
00341 #else
00342 return oneshot_timer ? rdtsc(): rt_times.tick_time;
00343 #endif
00344 }
00345 #endif
00346
00347 static inline void enqueue_blocked(RT_TASK *task, QUEUE *queue, int qtype)
00348 {
00349 QUEUE *q;
00350 task->blocked_on = (q = queue);
00351 if (!qtype) {
00352 while ((q = q->next) != queue && (q->task)->priority <= task->priority);
00353 }
00354 q->prev = (task->queue.prev = q->prev)->next = &(task->queue);
00355 task->queue.next = q;
00356 }
00357
00358
00359 static inline void dequeue_blocked(RT_TASK *task)
00360 {
00361 task->prio_passed_to = NOTHING;
00362 (task->queue.prev)->next = task->queue.next;
00363 (task->queue.next)->prev = task->queue.prev;
00364 task->blocked_on = NOTHING;
00365 }
00366
00367 static __volatile__ inline unsigned long pass_prio(RT_TASK *to, RT_TASK *from)
00368 {
00369 QUEUE *q;
00370 #ifdef CONFIG_SMP
00371 unsigned long schedmap;
00372 schedmap = 0;
00373 #endif
00374 from->prio_passed_to = to;
00375 while (to && to->priority > from->priority) {
00376 to->priority = from->priority;
00377 if (to->state == RT_SCHED_READY) {
00378 (to->rprev)->rnext = to->rnext;
00379 (to->rnext)->rprev = to->rprev;
00380 enq_ready_task(to);
00381 #ifdef CONFIG_SMP
00382 set_bit(to->runnable_on_cpus & 0x1F, &schedmap);
00383 #endif
00384 } else if ((q = to->blocked_on) && !((to->state & RT_SCHED_SEMAPHORE) &&
00385 ((SEM *)q)->qtype)) {
00386 (to->queue.prev)->next = to->queue.next;
00387 (to->queue.next)->prev = to->queue.prev;
00388 while ((q = q->next) != to->blocked_on && (q->task)->priority <= to->priority);
00389 q->prev = (to->queue.prev = q->prev)->next = &(to->queue);
00390 to->queue.next = q;
00391 }
00392 to = to->prio_passed_to;
00393 }
00394 #ifdef CONFIG_SMP
00395 return schedmap;
00396 #else
00397 return 0;
00398 #endif
00399 }
00400
00401 static inline RT_TASK *_rt_whoami(void)
00402 {
00403 #ifdef CONFIG_SMP
00404 RT_TASK *rt_current;
00405 unsigned long flags;
00406 flags = rt_global_save_flags_and_cli();
00407 rt_current = RT_CURRENT;
00408 rt_global_restore_flags(flags);
00409 return rt_current;
00410 #else
00411 return rt_smp_current[0];
00412 #endif
00413 }
00414
00415 static inline void __call_exit_handlers(RT_TASK *task)
00416 {
00417 XHDL *pt, *tmp;
00418
00419 pt = task->ExitHook;
00420 while ( pt ) {
00421 (*pt->fun) (pt->arg1, pt->arg2);
00422 tmp = pt;
00423 pt = pt->nxt;
00424 rt_free(tmp);
00425 }
00426 task->ExitHook = 0;
00427 }
00428
00429 static inline XHDL *__set_exit_handler(RT_TASK *task, void (*fun) (void *, int), void *arg1, int arg2)
00430 {
00431 XHDL *p;
00432
00433
00434
00435 if (task->magic != RT_TASK_MAGIC) return 0;
00436 if (!(p = (XHDL *) rt_malloc (sizeof(XHDL)))) return 0;
00437 p->fun = fun;
00438 p->arg1 = arg1;
00439 p->arg2 = arg2;
00440 p->nxt = task->ExitHook;
00441 return (task->ExitHook = p);
00442 }
00443
00444 static inline int rtai_init_features (void)
00445
00446 {
00447 #ifdef CONFIG_RTAI_LEDS_BUILTIN
00448 __rtai_leds_init();
00449 #endif
00450 #ifdef CONFIG_RTAI_SEM_BUILTIN
00451 __rtai_sem_init();
00452 #endif
00453 #ifdef CONFIG_RTAI_MSG_BUILTIN
00454 __rtai_msg_init();
00455 #endif
00456 #ifdef CONFIG_RTAI_MBX_BUILTIN
00457 __rtai_mbx_init();
00458 #endif
00459 #ifdef CONFIG_RTAI_TBX_BUILTIN
00460 __rtai_tbx_init();
00461 #endif
00462 #ifdef CONFIG_RTAI_MQ_BUILTIN
00463 __rtai_mq_init();
00464 #endif
00465 #ifdef CONFIG_RTAI_BITS_BUILTIN
00466 __rtai_bits_init();
00467 #endif
00468 #ifdef CONFIG_RTAI_TASKLETS_BUILTIN
00469 __rtai_tasklets_init();
00470 #endif
00471 #ifdef CONFIG_RTAI_FIFOS_BUILTIN
00472 __rtai_fifos_init();
00473 #endif
00474 #ifdef CONFIG_RTAI_NETRPC_BUILTIN
00475 __rtai_netrpc_init();
00476 #endif
00477 #ifdef CONFIG_RTAI_SHM_BUILTIN
00478 __rtai_shm_init();
00479 #endif
00480 #ifdef CONFIG_RTAI_USI_BUILTIN
00481 __rtai_usi_init();
00482 #endif
00483 #ifdef CONFIG_RTAI_MATH_BUILTIN
00484 __rtai_math_init();
00485 #endif
00486
00487 return 0;
00488 }
00489
00490 static inline void rtai_cleanup_features (void) {
00491
00492 #ifdef CONFIG_RTAI_MATH_BUILTIN
00493 __rtai_math_exit();
00494 #endif
00495 #ifdef CONFIG_RTAI_USI_BUILTIN
00496 __rtai_usi_exit();
00497 #endif
00498 #ifdef CONFIG_RTAI_SHM_BUILTIN
00499 __rtai_shm_exit();
00500 #endif
00501 #ifdef CONFIG_RTAI_NETRPC_BUILTIN
00502 __rtai_netrpc_exit();
00503 #endif
00504 #ifdef CONFIG_RTAI_FIFOS_BUILTIN
00505 __rtai_fifos_exit();
00506 #endif
00507 #ifdef CONFIG_RTAI_TASKLETS_BUILTIN
00508 __rtai_tasklets_exit();
00509 #endif
00510 #ifdef CONFIG_RTAI_BITS_BUILTIN
00511 __rtai_bits_exit();
00512 #endif
00513 #ifdef CONFIG_RTAI_MQ_BUILTIN
00514 __rtai_mq_exit();
00515 #endif
00516 #ifdef CONFIG_RTAI_TBX_BUILTIN
00517 __rtai_tbx_exit();
00518 #endif
00519 #ifdef CONFIG_RTAI_MBX_BUILTIN
00520 __rtai_mbx_exit();
00521 #endif
00522 #ifdef CONFIG_RTAI_MSG_BUILTIN
00523 __rtai_msg_exit();
00524 #endif
00525 #ifdef CONFIG_RTAI_SEM_BUILTIN
00526 __rtai_sem_exit();
00527 #endif
00528 #ifdef CONFIG_RTAI_LEDS_BUILTIN
00529 __rtai_leds_exit();
00530 #endif
00531 }
00532
00533 int rt_check_current_stack(void);
00534
00535 int rt_kthread_init(RT_TASK *task,
00536 void (*rt_thread)(int),
00537 int data,
00538 int stack_size,
00539 int priority,
00540 int uses_fpu,
00541 void(*signal)(void));
00542
00543 int rt_kthread_init_cpuid(RT_TASK *task,
00544 void (*rt_thread)(int),
00545 int data,
00546 int stack_size,
00547 int priority,
00548 int uses_fpu,
00549 void(*signal)(void),
00550 unsigned int cpuid);
00551
00552 #endif
00553
00554 #endif