00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019 #ifndef _RTAI_SCHED_H
00020 #define _RTAI_SCHED_H
00021
00022 #include <rtai.h>
00023 #ifndef __KERNEL__
00024 #include <sys/time.h>
00025 #include <time.h>
00026 #include <errno.h>
00027 #include <rtai_types.h>
00028 #endif
00029
00030 #define RT_SCHED_UP 1
00031 #define RT_SCHED_SMP 2
00032 #define RT_SCHED_MUP 3
00033
00034 #define RT_SCHED_HIGHEST_PRIORITY 0
00035 #define RT_SCHED_LOWEST_PRIORITY 0x3fffFfff
00036 #define RT_SCHED_LINUX_PRIORITY 0x7fffFfff
00037
00038 #define RT_SCHED_READY 1
00039 #define RT_SCHED_SUSPENDED 2
00040 #define RT_SCHED_DELAYED 4
00041 #define RT_SCHED_SEMAPHORE 8
00042 #define RT_SCHED_SEND 16
00043 #define RT_SCHED_RECEIVE 32
00044 #define RT_SCHED_RPC 64
00045 #define RT_SCHED_RETURN 128
00046 #define RT_SCHED_MBXSUSP 256
00047
00048 struct rt_task_struct;
00049
00050 #ifdef __KERNEL__
00051
00052 #include <linux/time.h>
00053 #include <linux/errno.h>
00054
00055 #define RT_TASK_MAGIC 0x754d2774
00056
00057 #ifndef __cplusplus
00058
00059 #include <linux/sched.h>
00060
00061 typedef struct rt_queue {
00062 struct rt_queue *prev;
00063 struct rt_queue *next;
00064 struct rt_task_struct *task;
00065 } QUEUE;
00066
00067 struct mcb_t {
00068 void *sbuf;
00069 int sbytes;
00070 void *rbuf;
00071 int rbytes;
00072 };
00073
00074 typedef struct rt_ExitHandler {
00075
00076
00077 struct rt_ExitHandler *nxt;
00078 void (*fun) (void *arg1, int arg2);
00079 void *arg1;
00080 int arg2;
00081 } XHDL;
00082
00083 struct rt_heap_t { void *heap, *kadr, *uadr; };
00084
00085 typedef struct rt_task_struct {
00086
00087 int *stack;
00088 int uses_fpu;
00089 int magic;
00090 volatile int state, running;
00091 unsigned long runnable_on_cpus;
00092 int *stack_bottom;
00093 volatile int priority;
00094 int base_priority;
00095 int policy;
00096 int sched_lock_priority;
00097 struct rt_task_struct *prio_passed_to;
00098 RTIME period;
00099 RTIME resume_time;
00100 RTIME yield_time;
00101 int rr_quantum;
00102 int rr_remaining;
00103 int suspdepth;
00104 struct rt_queue queue;
00105 int owndres;
00106 struct rt_queue *blocked_on;
00107 struct rt_queue msg_queue;
00108 int tid;
00109 unsigned msg;
00110 struct rt_queue ret_queue;
00111 void (*signal)(void);
00112 FPU_ENV fpu_reg;
00113 struct rt_task_struct *prev;
00114 struct rt_task_struct *next;
00115 struct rt_task_struct *tprev;
00116 struct rt_task_struct *tnext;
00117 struct rt_task_struct *rprev;
00118 struct rt_task_struct *rnext;
00119
00120
00121 int *fun_args, *bstack;
00122 struct task_struct *lnxtsk;
00123 long long retval;
00124 char *msg_buf[2];
00125 int max_msg_size[2];
00126 char task_name[16];
00127 void *system_data_ptr;
00128 struct rt_task_struct *nextp;
00129 struct rt_task_struct *prevp;
00130
00131
00132 RT_TRAP_HANDLER task_trap_handler[RTAI_NR_TRAPS];
00133
00134
00135 void (*usp_signal)(void);
00136 volatile unsigned long pstate;
00137 unsigned long usp_flags;
00138 unsigned long usp_flags_mask;
00139 unsigned long force_soft;
00140 volatile int is_hard;
00141
00142
00143 void *tick_queue;
00144
00145
00146
00147 void *trap_handler_data;
00148 int trap_signo;
00149
00150
00151 int resync_frame;
00152
00153
00154 XHDL *ExitHook;
00155 int linux_signal;
00156 int errno;
00157 void (*linux_signal_handler)(int sig);
00158 RTIME exectime[2];
00159 struct mcb_t mcb;
00160
00161
00162 struct rt_heap_t heap[2];
00163
00164 } RT_TASK __attribute__ ((__aligned__ (16)));
00165
00166 #else
00167 extern "C" {
00168 #endif
00169
00170 int rt_task_init(struct rt_task_struct *task,
00171 void (*rt_thread)(int),
00172 int data,
00173 int stack_size,
00174 int priority,
00175 int uses_fpu,
00176 void(*signal)(void));
00177
00178 int rt_task_init_cpuid(struct rt_task_struct *task,
00179 void (*rt_thread)(int),
00180 int data,
00181 int stack_size,
00182 int priority,
00183 int uses_fpu,
00184 void(*signal)(void),
00185 unsigned run_on_cpu);
00186
00187 void rt_set_runnable_on_cpus(struct rt_task_struct *task,
00188 unsigned long cpu_mask);
00189
00190 void rt_set_runnable_on_cpuid(struct rt_task_struct *task,
00191 unsigned cpuid);
00192
00193 void rt_set_sched_policy(struct rt_task_struct *task,
00194 int policy,
00195 int rr_quantum_ns);
00196
00197 int rt_task_delete(struct rt_task_struct *task);
00198
00199 int rt_get_task_state(struct rt_task_struct *task);
00200
00201 void rt_gettimeorig(RTIME time_orig[]);
00202
00203 int rt_get_timer_cpu(void);
00204
00205 int rt_is_hard_timer_running(void);
00206
00207 void rt_set_periodic_mode(void);
00208
00209 void rt_set_oneshot_mode(void);
00210
00211 RTIME start_rt_timer(int period);
00212
00213 RTIME start_rt_timer_cpuid(int period,
00214 int cpuid);
00215
00216 #define start_rt_timer_ns(period) start_rt_timer(nano2count((period)))
00217
00218 void start_rt_apic_timers(struct apic_timer_setup_data *setup_mode,
00219 unsigned rcvr_jiffies_cpuid);
00220
00221 void stop_rt_timer(void);
00222
00223 struct rt_task_struct *rt_whoami(void);
00224
00225 int rt_sched_type(void);
00226
00227 int rt_task_signal_handler(struct rt_task_struct *task,
00228 void (*handler)(void));
00229
00230 int rt_task_use_fpu(struct rt_task_struct *task,
00231 int use_fpu_flag);
00232
00233 void rt_linux_use_fpu(int use_fpu_flag);
00234
00235 void rt_preempt_always(int yes_no);
00236
00237 void rt_preempt_always_cpuid(int yes_no,
00238 unsigned cpuid);
00239
00240 RTIME count2nano(RTIME timercounts);
00241
00242 RTIME nano2count(RTIME nanosecs);
00243
00244 RTIME count2nano_cpuid(RTIME timercounts,
00245 unsigned cpuid);
00246
00247 RTIME nano2count_cpuid(RTIME nanosecs,
00248 unsigned cpuid);
00249
00250 RTIME rt_get_time(void);
00251
00252 RTIME rt_get_time_cpuid(unsigned cpuid);
00253
00254 RTIME rt_get_time_ns(void);
00255
00256 RTIME rt_get_time_ns_cpuid(unsigned cpuid);
00257
00258 RTIME rt_get_cpu_time_ns(void);
00259
00260 int rt_get_prio(struct rt_task_struct *task);
00261
00262 int rt_get_inher_prio(struct rt_task_struct *task);
00263
00264 void rt_spv_RMS(int cpuid);
00265
00266 int rt_change_prio(struct rt_task_struct *task,
00267 int priority);
00268
00269 void rt_sched_lock(void);
00270
00271 void rt_sched_unlock(void);
00272
00273 void rt_task_yield(void);
00274
00275 int rt_task_suspend(struct rt_task_struct *task);
00276
00277 int rt_task_resume(struct rt_task_struct *task);
00278
00279 int rt_task_make_periodic_relative_ns(struct rt_task_struct *task,
00280 RTIME start_delay,
00281 RTIME period);
00282
00283 int rt_task_make_periodic(struct rt_task_struct *task,
00284 RTIME start_time,
00285 RTIME period);
00286
00287 void rt_task_set_resume_end_times(RTIME resume,
00288 RTIME end);
00289
00290 int rt_set_resume_time(struct rt_task_struct *task,
00291 RTIME new_resume_time);
00292
00293 int rt_set_period(struct rt_task_struct *task,
00294 RTIME new_period);
00295
00296 void rt_task_wait_period(void);
00297
00298 void rt_schedule(void);
00299
00300 RTIME next_period(void);
00301
00302 void rt_busy_sleep(int nanosecs);
00303
00304 void rt_sleep(RTIME delay);
00305
00306 void rt_sleep_until(RTIME time);
00307
00308 int rt_task_wakeup_sleeping(struct rt_task_struct *task);
00309
00310 struct rt_task_struct *rt_named_task_init(const char *task_name,
00311 void (*thread)(int),
00312 int data,
00313 int stack_size,
00314 int prio,
00315 int uses_fpu,
00316 void(*signal)(void));
00317
00318 struct rt_task_struct *rt_named_task_init_cpuid(const char *task_name,
00319 void (*thread)(int),
00320 int data,
00321 int stack_size,
00322 int prio,
00323 int uses_fpu,
00324 void(*signal)(void),
00325 unsigned run_on_cpu);
00326
00327 int rt_named_task_delete(struct rt_task_struct *task);
00328
00329 RT_TRAP_HANDLER rt_set_task_trap_handler(struct rt_task_struct *task,
00330 unsigned vec,
00331 RT_TRAP_HANDLER handler);
00332
00333 static inline RTIME timeval2count(struct timeval *t)
00334 {
00335 return nano2count(t->tv_sec*1000000000LL + t->tv_usec*1000);
00336 }
00337
00338 static inline void count2timeval(RTIME rt, struct timeval *t)
00339 {
00340 t->tv_sec = ulldiv(count2nano(rt), 1000000000, (unsigned long *)&t->tv_usec);
00341 t->tv_usec /= 1000;
00342 }
00343
00344 static inline RTIME timespec2count(const struct timespec *t)
00345 {
00346 return nano2count(t->tv_sec*1000000000LL + t->tv_nsec);
00347 }
00348
00349 static inline void count2timespec(RTIME rt, struct timespec *t)
00350 {
00351 t->tv_sec = ulldiv(count2nano(rt), 1000000000, (unsigned long *)&t->tv_nsec);
00352 }
00353
00354 static inline RTIME timespec2nanos(const struct timespec *t)
00355 {
00356 return t->tv_sec*1000000000LL + t->tv_nsec;
00357 }
00358
00359 static inline void nanos2timespec(RTIME rt, struct timespec *t)
00360 {
00361 t->tv_sec = ulldiv(rt, 1000000000, (unsigned long *)&t->tv_nsec);
00362 }
00363
00364 #ifdef __cplusplus
00365 }
00366 #else
00367
00368
00369
00370 RT_TASK *rt_get_base_linux_task(RT_TASK **base_linux_task);
00371
00372 RT_TASK *rt_alloc_dynamic_task(void);
00373
00374 void rt_enq_ready_edf_task(RT_TASK *ready_task);
00375
00376 void rt_enq_ready_task(RT_TASK *ready_task);
00377
00378 int rt_renq_ready_task(RT_TASK *ready_task,
00379 int priority);
00380
00381 void rt_rem_ready_task(RT_TASK *task);
00382
00383 void rt_rem_ready_current(RT_TASK *rt_current);
00384
00385 void rt_enq_timed_task(RT_TASK *timed_task);
00386
00387 void rt_rem_timed_task(RT_TASK *task);
00388
00389 void rt_dequeue_blocked(RT_TASK *task);
00390
00391 RT_TASK **rt_register_watchdog(RT_TASK *wdog,
00392 int cpuid);
00393
00394 void rt_deregister_watchdog(RT_TASK *wdog,
00395 int cpuid);
00396
00397 #endif
00398
00399 #endif
00400
00401 #if !defined(__KERNEL__) || defined(__cplusplus)
00402
00403 typedef struct rt_task_struct {
00404 int opaque;
00405 } RT_TASK;
00406
00407 typedef struct QueueBlock {
00408 int opaque;
00409 } QBLK;
00410
00411 typedef struct QueueHook {
00412 int opaque;
00413 } QHOOK;
00414
00415 #endif
00416
00417 #endif