00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020 #ifndef _RTAI_POSIX_H_
00021 #define _RTAI_POSIX_H_
00022
00023 #ifdef __KERNEL__
00024
00025 #include <rtai_malloc.h>
00026 #include <rtai_rwl.h>
00027 #include <rtai_spl.h>
00028 #include <rtai_sem.h>
00029
00030 #define MAX_PRIO 99
00031 #define MIN_PRIO 1
00032 #define STACK_SIZE 8192
00033 #define RR_QUANTUM_NS 1000000
00034
00035 typedef struct rt_semaphore sem_t;
00036
00037 typedef struct rt_semaphore pthread_mutex_t;
00038
00039 typedef unsigned long pthread_mutexattr_t;
00040
00041 typedef struct rt_semaphore pthread_cond_t;
00042
00043 typedef unsigned long pthread_condattr_t;
00044
00045 typedef struct rt_semaphore pthread_barrier_t;
00046
00047 typedef int pthread_barrierattr_t;
00048
00049 typedef RWL pthread_rwlock_t;
00050
00051 typedef int pthread_rwlockattr_t;
00052
00053 typedef struct rt_spl_t pthread_spinlock_t;
00054
00055 typedef struct rt_task_struct *pthread_t;
00056
00057 typedef struct pthread_attr {
00058
00059 int stacksize;
00060 int policy;
00061 int rr_quantum_ns;
00062 int priority;
00063
00064 } pthread_attr_t;
00065
00066 typedef struct pthread_cookie {
00067
00068 RT_TASK task;
00069 SEM sem;
00070 void (*task_fun)(int);
00071 int arg;
00072
00073 } pthread_cookie_t;
00074
00075 #ifdef __cplusplus
00076 extern "C" {
00077 #endif
00078
00079 static inline int sem_init_rt(sem_t *sem, int pshared, unsigned int value)
00080 {
00081 if (value < SEM_TIMOUT) {
00082 rt_typed_sem_init(sem, value, pshared | PRIO_Q);
00083 return 0;
00084 }
00085 return -EINVAL;
00086 }
00087
00088 static inline int sem_destroy_rt(sem_t *sem)
00089 {
00090 if (rt_sem_wait_if(sem) >= 0) {
00091 rt_sem_signal(sem);
00092 return rt_sem_delete(sem);
00093 }
00094 return -EBUSY;
00095 }
00096
00097 static inline int sem_wait_rt(sem_t *sem)
00098 {
00099 return rt_sem_wait(sem) < SEM_TIMOUT ? 0 : -1;
00100 }
00101
00102 static inline int sem_trywait_rt(sem_t *sem)
00103 {
00104 return rt_sem_wait_if(sem) > 0 ? 0 : -EAGAIN;
00105 }
00106
00107 static inline int sem_timedwait_rt(sem_t *sem, const struct timespec *abstime)
00108 {
00109 return rt_sem_wait_until(sem, timespec2count(abstime)) < SEM_TIMOUT ? 0 : -1;
00110 }
00111
00112 static inline int sem_post_rt(sem_t *sem)
00113 {
00114 return rt_sem_signal(sem) < SEM_TIMOUT ? 0 : -ERANGE;
00115 }
00116
00117 static inline int sem_getvalue_rt(sem_t *sem, int *sval)
00118 {
00119 if ((*sval = rt_sem_wait_if(sem)) > 0) {
00120 rt_sem_signal(sem);
00121 }
00122 return 0;
00123 }
00124
00125 static inline int pthread_mutex_init_rt(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutexattr)
00126 {
00127 rt_typed_sem_init(mutex, 1, RES_SEM);
00128 return 0;
00129 }
00130
00131 static inline int pthread_mutex_destroy_rt(pthread_mutex_t *mutex)
00132 {
00133 if (rt_sem_wait_if(mutex) > 0) {
00134 rt_sem_signal(mutex);
00135 return rt_sem_delete(mutex);
00136 }
00137 return -EBUSY;
00138 }
00139
00140 static inline int pthread_mutex_lock_rt(pthread_mutex_t *mutex)
00141 {
00142 return rt_sem_wait(mutex) < SEM_TIMOUT ? 0 : -EINVAL;
00143 }
00144
00145 static inline int pthread_mutex_timedlock_rt(pthread_mutex_t *mutex, const struct timespec *abstime)
00146 {
00147 return rt_sem_wait_until(mutex, timespec2count(abstime)) < SEM_TIMOUT ? 0 : -1;
00148 }
00149
00150 static inline int pthread_mutex_trylock_rt(pthread_mutex_t *mutex)
00151 {
00152 return rt_sem_wait_if(mutex) > 0 ? 0 : -EBUSY;
00153 }
00154
00155 static inline int pthread_mutex_unlock_rt(pthread_mutex_t *mutex)
00156 {
00157 return rt_sem_signal(mutex) > 0 ? 0 : -EINVAL;
00158 }
00159
00160 static inline int pthread_cond_init_rt(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
00161 {
00162 return sem_init_rt(cond, BIN_SEM, 0);
00163 }
00164
00165 static inline int pthread_cond_destroy_rt(pthread_cond_t *cond)
00166 {
00167 return sem_destroy_rt((sem_t *)cond);
00168 }
00169
00170 static inline int pthread_cond_signal_rt(pthread_cond_t *cond)
00171 {
00172 return rt_cond_signal((sem_t *)cond);
00173 }
00174
00175 static inline int pthread_cond_broadcast_rt(pthread_cond_t *cond)
00176 {
00177 return rt_sem_broadcast(cond);
00178 }
00179
00180 static inline int pthread_cond_wait_rt(pthread_cond_t *cond, pthread_mutex_t *mutex)
00181 {
00182 return rt_cond_wait(cond, mutex);
00183 }
00184
00185 static inline int pthread_cond_timedwait_rt(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime)
00186 {
00187 return rt_cond_wait_until(cond, mutex, timespec2count(abstime)) < SEM_TIMOUT ? 0 : -ETIMEDOUT;
00188 }
00189
00190 static inline int pthread_barrier_init_rt(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count)
00191 {
00192 return sem_init_rt(barrier, CNT_SEM, count);
00193 }
00194
00195 static inline int pthread_barrier_destroy_rt(pthread_barrier_t *barrier)
00196 {
00197 return sem_destroy_rt(barrier);
00198 }
00199
00200 static inline int pthread_barrier_wait_rt(pthread_barrier_t *barrier)
00201 {
00202 return rt_sem_wait_barrier(barrier);
00203 }
00204
00205 static inline int pthread_rwlock_init_rt(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
00206 {
00207 return rt_rwl_init((RWL *)rwlock);
00208 }
00209
00210 static inline int pthread_rwlock_destroy_rt(pthread_rwlock_t *rwlock)
00211 {
00212 return rt_rwl_delete((RWL *)rwlock);
00213 }
00214
00215 static inline int pthread_rwlock_rdlock_rt(pthread_rwlock_t *rwlock)
00216 {
00217 return rt_rwl_rdlock((RWL *)rwlock);
00218 }
00219
00220 static inline int pthread_rwlock_tryrdlock_rt(pthread_rwlock_t *rwlock)
00221 {
00222 return rt_rwl_rdlock_if((RWL *)rwlock);
00223 }
00224
00225 static inline int pthread_rwlock_timedrdlock_rt(pthread_rwlock_t *rwlock, struct timespec *abstime)
00226 {
00227 return rt_rwl_rdlock_until((RWL *)rwlock, timespec2count(abstime));
00228 }
00229
00230 static inline int pthread_rwlock_wrlock_rt(pthread_rwlock_t *rwlock)
00231 {
00232 return rt_rwl_wrlock((RWL *)rwlock);
00233 }
00234
00235 static inline int pthread_rwlock_trywrlock_rt(pthread_rwlock_t *rwlock)
00236 {
00237 return rt_rwl_wrlock_if((RWL *)rwlock);
00238 }
00239
00240 static inline int pthread_rwlock_timedwrlock_rt(pthread_rwlock_t *rwlock, struct timespec *abstime)
00241 {
00242 return rt_rwl_wrlock_until((RWL *)rwlock, timespec2count(abstime));
00243 }
00244
00245 static inline int pthread_rwlock_unlock_rt(pthread_rwlock_t *rwlock)
00246 {
00247 return rt_rwl_unlock((RWL *)rwlock);
00248 }
00249
00250 static inline int pthread_spin_init_rt(pthread_spinlock_t *lock)
00251 {
00252 return rt_spl_init((SPL *)lock);
00253 }
00254
00255 static inline int pthread_spin_destroy_rt(pthread_spinlock_t *lock)
00256 {
00257 return rt_spl_delete((SPL *)lock);
00258 }
00259
00260 static inline int pthread_spin_lock_rt(pthread_spinlock_t *lock)
00261 {
00262 return rt_spl_lock((SPL *)lock);
00263 }
00264
00265 static inline int pthread_spin_trylock_rt(pthread_spinlock_t *lock)
00266 {
00267 return rt_spl_lock_if((SPL *)lock);
00268 }
00269
00270 static inline int pthread_spin_unlock_rt(pthread_spinlock_t *lock)
00271 {
00272 return rt_spl_unlock((SPL *)lock);
00273 }
00274
00275 static inline int get_max_priority_rt(int policy)
00276 {
00277 return MAX_PRIO;
00278 }
00279
00280 static inline int get_min_priority_rt(int policy)
00281 {
00282 return MIN_PRIO;
00283 }
00284
00285 static void posix_wrapper_fun(pthread_cookie_t *cookie)
00286 {
00287 cookie->task_fun(cookie->arg);
00288 rt_sem_broadcast(&cookie->sem);
00289 rt_sem_delete(&cookie->sem);
00290 rt_task_suspend(&cookie->task);
00291 }
00292
00293 static inline int pthread_create_rt(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
00294 {
00295 pthread_cookie_t *cookie;
00296 cookie = (void *)rt_malloc(sizeof(pthread_cookie_t));
00297 if (cookie) {
00298 (cookie->task).magic = 0;
00299 cookie->task_fun = (void *)start_routine;
00300 cookie->arg = (int)arg;
00301 if (!rt_task_init(&cookie->task, (void *)posix_wrapper_fun, (int)cookie,
00302 (attr) ? attr->stacksize : STACK_SIZE, (attr) ? attr->priority : RT_SCHED_LOWEST_PRIORITY, 1, 0)) {
00303 *thread = &cookie->task;
00304 rt_typed_sem_init(&cookie->sem, 0, BIN_SEM | FIFO_Q);
00305 rt_task_resume(&cookie->task);
00306 return 0;
00307 }
00308 }
00309 rt_free(cookie);
00310 return ENOMEM;
00311 }
00312
00313 static inline int pthread_yield_rt(void)
00314 {
00315 rt_task_yield();
00316 return 0;
00317 }
00318
00319 static inline void pthread_exit_rt(void *retval)
00320 {
00321 RT_TASK *rt_task;
00322 rt_task = rt_whoami();
00323 rt_sem_broadcast((SEM *)(rt_task + 1));
00324 rt_sem_delete((SEM *)(rt_task + 1));
00325 rt_task_suspend(rt_task);
00326 }
00327
00328 static inline int pthread_join_rt(pthread_t thread, void **thread_return)
00329 {
00330 int retval1, retval2;
00331 if (rt_whoami()->priority != RT_SCHED_LINUX_PRIORITY)
00332 retval1 = rt_sem_wait((SEM *)(thread + 1));
00333 else {
00334 while ((retval1 = rt_sem_wait_if((SEM *)(thread + 1))) <= 0) {
00335 set_current_state(TASK_INTERRUPTIBLE);
00336 schedule_timeout(HZ/10);
00337 }
00338 }
00339 if (retval1 != 0xFFFF)
00340 retval1 = 0;
00341 retval2 = rt_task_delete(thread);
00342 rt_free(thread);
00343 return (retval1) ? retval1 : retval2;
00344 }
00345
00346 static inline int pthread_cancel_rt(pthread_t thread)
00347 {
00348 int retval;
00349 if (!thread) {
00350 thread = rt_whoami();
00351 }
00352 retval = rt_task_delete(thread);
00353 rt_free(thread);
00354 return retval;
00355 }
00356
00357 static inline int pthread_equal_rt(pthread_t thread1,pthread_t thread2)
00358 {
00359 return thread1 == thread2;
00360 }
00361
00362 static inline pthread_t pthread_self_rt(void)
00363 {
00364 return rt_whoami();
00365 }
00366
00367 static inline int pthread_attr_init_rt(pthread_attr_t *attr)
00368 {
00369 attr->stacksize = STACK_SIZE;
00370 attr->policy = SCHED_FIFO;
00371 attr->rr_quantum_ns = RR_QUANTUM_NS;
00372 attr->priority = 1;
00373 return 0;
00374 }
00375
00376 static inline int pthread_attr_destroy_rt(pthread_attr_t *attr)
00377 {
00378 return 0;
00379 }
00380
00381 static inline int pthread_attr_setschedparam_rt(pthread_attr_t *attr, const struct sched_param *param)
00382 {
00383 if(param->sched_priority < MIN_PRIO || param->sched_priority > MAX_PRIO) {
00384 return(EINVAL);
00385 }
00386 attr->priority = MAX_PRIO - param->sched_priority;
00387 return 0;
00388 }
00389
00390 static inline int pthread_attr_getschedparam_rt(const pthread_attr_t *attr, struct sched_param *param)
00391 {
00392 param->sched_priority = MAX_PRIO - attr->priority;
00393 return 0;
00394 }
00395
00396 static inline int pthread_attr_setschedpolicy_rt(pthread_attr_t *attr, int policy)
00397 {
00398 if(policy != SCHED_FIFO && policy != SCHED_RR) {
00399 return EINVAL;
00400 }
00401 if ((attr->policy = policy) == SCHED_RR) {
00402 rt_set_sched_policy(rt_whoami(), SCHED_RR, attr->rr_quantum_ns);
00403 }
00404 return 0;
00405 }
00406
00407
00408 static inline int pthread_attr_getschedpolicy_rt(const pthread_attr_t *attr, int *policy)
00409 {
00410 *policy = attr->policy;
00411 return 0;
00412 }
00413
00414 static inline int pthread_attr_setschedrr_rt(pthread_attr_t *attr, int rr_quantum_ns)
00415 {
00416 attr->rr_quantum_ns = rr_quantum_ns;
00417 return 0;
00418 }
00419
00420
00421 static inline int pthread_attr_getschedrr_rt(const pthread_attr_t *attr, int *rr_quantum_ns)
00422 {
00423 *rr_quantum_ns = attr->rr_quantum_ns;
00424 return 0;
00425 }
00426
00427 static inline int pthread_attr_setstacksize_rt(pthread_attr_t *attr, int stacksize)
00428 {
00429 attr->stacksize = stacksize;
00430 return 0;
00431 }
00432
00433 static inline int pthread_attr_getstacksize_rt(const pthread_attr_t *attr, int *stacksize)
00434 {
00435 *stacksize = attr->stacksize;
00436 return 0;
00437 }
00438
00439 static inline int pthread_attr_setstack_rt(pthread_attr_t *attr, void *stackaddr, int stacksize)
00440 {
00441 attr->stacksize = stacksize;
00442 return 0;
00443 }
00444
00445 static inline int pthread_attr_getstack_rt(const pthread_attr_t *attr, void **stackaddr, int *stacksize)
00446 {
00447 *stacksize = attr->stacksize;
00448 return 0;
00449 }
00450
00451 static inline void pthread_testcancel_rt(void)
00452 {
00453 rt_task_delete(rt_whoami());
00454 pthread_exit_rt(NULL);
00455 }
00456
00457 static inline void clock_gettime_rt(int clockid, struct timespec *current_time)
00458 {
00459 count2timespec(rt_get_time(), current_time);
00460 }
00461
00462 static inline int nanosleep_rt(const struct timespec *rqtp, struct timespec *rmtp)
00463 {
00464 RTIME expire;
00465 if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec <
00466 0) {
00467 return -EINVAL;
00468 }
00469 rt_sleep_until(expire = rt_get_time() + timespec2count(rqtp));
00470 if ((expire -= rt_get_time()) > 0) {
00471 if (rmtp) {
00472 count2timespec(expire, rmtp);
00473 }
00474 return -EINTR;
00475 }
00476 return 0;
00477 }
00478
00479
00480
00481
00482
00483 #define pthread_mutexattr_init_rt(attr)
00484 #define pthread_mutexattr_destroy_rt(attr)
00485 #define pthread_mutexattr_getpshared_rt(attr, pshared)
00486 #define pthread_mutexattr_setpshared_rt(attr, pshared)
00487 #define pthread_mutexattr_settype_rt(attr, kind)
00488 #define pthread_mutexattr_gettype_rt(attr, kind)
00489
00490 #define pthread_condattr_init_rt(attr)
00491 #define pthread_condattr_destroy_rt(attr)
00492 #define pthread_condattr_getpshared_rt(attr, pshared)
00493 #define pthread_condattr_setpshared_rt(attr, pshared)
00494
00495 #define pthread_barrierattr_getpshared_rt(attr, pshared)
00496 #define pthread_barrierattr_setpshared_rt(attr, pshared)
00497 #define pthread_barrierattr_getpshared_rt(attr, pshared)
00498 #define pthread_barrierattr_setpshared_rt(attr, pshared)
00499
00500 #define pthread_rwlockattr_init(attr)
00501 #define pthread_rwlockattr_destroy(attr)
00502 #define pthread_rwlockattr_getpshared( ttr, pshared)
00503 #define pthread_rwlockattr_setpshared(attr, pshared)
00504 #define pthread_rwlockattr_getkind_np(attr, pref)
00505 #define pthread_rwlockattr_setkind_np(attr, pref)
00506
00507 #define pthread_detach_rt(thread)
00508 #define pthread_attr_setdetachstate_rt(attr, detachstate)
00509 #define pthread_attr_getdetachstate_rt(attr, detachstate)
00510 #define pthread_setconcurrency_rt(level)
00511 #define pthread_getconcurrency_rt()
00512 #define pthread_attr_setinheritsched_rt(attr, inherit)
00513 #define pthread_attr_getinheritsched_rt(attr, inherit)
00514 #define pthread_attr_setscope_rt(attr, scope)
00515 #define pthread_attr_getscope_rt(attr, scope)
00516 #define pthread_attr_setguardsize_rt(attr, guardsize)
00517 #define pthread_attr_getguardsize_rt(attr, guardsize)
00518 #define pthread_attr_setstackaddr_rt(attr, stackaddr)
00519 #define pthread_attr_getstackaddr_rt(attr, stackaddr)
00520 #define pthread_setcancelstate_rt(state, oldstate)
00521 #define pthread_setcanceltype_rt(type, oldtype)
00522
00523 #ifdef __cplusplus
00524 }
00525 #endif
00526
00527 #else
00528
00529 #include <errno.h>
00530 #include <fcntl.h>
00531 #include <unistd.h>
00532 #include <signal.h>
00533 #include <sys/types.h>
00534 #include <sys/stat.h>
00535 #include <semaphore.h>
00536 #include <pthread.h>
00537 #include <stdlib.h>
00538
00539 struct task_struct;
00540
00541 #undef SEM_VALUE_MAX
00542 #define SEM_VALUE_MAX (SEM_TIMOUT - 1)
00543 #define SEM_BINARY (0x7FFFFFFF)
00544
00545 #include <asm/rtai_atomic.h>
00546 #include <rtai_sem.h>
00547
00548
00549
00550
00551
00552 static inline int MAKE_SOFT(void)
00553 {
00554 if (rt_is_hard_real_time(rt_buddy())) {
00555 rt_make_soft_real_time();
00556 return 1;
00557 }
00558 return 0;
00559 }
00560
00561 #define MAKE_HARD(hs) do { if (hs) rt_make_hard_real_time(); } while (0)
00562
00563 #ifdef __cplusplus
00564 extern "C" {
00565 #endif
00566
00567 RTAI_PROTO(void, count2timespec,(RTIME rt, struct timespec *t))
00568 {
00569 t->tv_sec = (rt = count2nano(rt))/1000000000;
00570 t->tv_nsec = rt - t->tv_sec*1000000000LL;
00571 }
00572
00573 RTAI_PROTO(void, nanos2timespec,(RTIME rt, struct timespec *t))
00574 {
00575 t->tv_sec = rt/1000000000;
00576 t->tv_nsec = rt - t->tv_sec*1000000000LL;
00577 }
00578
00579 RTAI_PROTO(RTIME, timespec2count,(const struct timespec *t))
00580 {
00581 return nano2count(t->tv_sec*1000000000LL + t->tv_nsec);
00582 }
00583
00584 RTAI_PROTO(RTIME, timespec2nanos,(const struct timespec *t))
00585 {
00586 return t->tv_sec*1000000000LL + t->tv_nsec;
00587 }
00588
00589
00590
00591
00592
00593 RTAI_PROTO(sem_t *,sem_open_rt,(const char *name, int oflags, int value, int type))
00594 {
00595 int hs, fd;
00596 sem_t *sem;
00597 hs = MAKE_SOFT();
00598 if ((fd = open(name, O_RDONLY)) > 0) {
00599 read(fd, &sem, sizeof(int));
00600 close(fd);
00601 atomic_inc((atomic_t *)(&((int *)sem)[1]));
00602 } else {
00603 struct { int name, value, type; } arg = { nam2num(name), value, (type == SEM_BINARY ? BIN_SEM : CNT_SEM) | PRIO_Q };
00604 sem = (sem_t *)malloc(sizeof(sem_t));
00605 if ((((int *)sem)[0] = rtai_lxrt(BIDX, SIZARG, LXRT_SEM_INIT, &arg).i[LOW]) && (fd = open(name, O_WRONLY | O_CREAT))) {
00606 write(fd, &sem, sizeof(int));
00607 close(fd);
00608 ((int *)sem)[1] = 1;
00609 } else {
00610 free(sem);
00611 sem = 0;
00612 }
00613 }
00614 MAKE_HARD(hs);
00615 return sem;
00616 }
00617
00618 RTAI_PROTO(int, sem_init_rt,(sem_t *sem, int pshared, unsigned int value))
00619 {
00620 int hs;
00621 if (value <= SEM_VALUE_MAX) {
00622 struct { int name, value, type; } arg = { rt_get_name(0), value, (pshared == SEM_BINARY ? BIN_SEM : CNT_SEM) | PRIO_Q };
00623 hs = MAKE_SOFT();
00624 ((int *)sem)[0] = rtai_lxrt(BIDX, SIZARG, LXRT_SEM_INIT, &arg).i[LOW];
00625 ((int *)sem)[1] = 0;
00626 MAKE_HARD(hs);
00627 return 0;
00628 }
00629 errno = EINVAL;
00630 return -1;
00631 }
00632
00633 RTAI_PROTO(int, sem_close_rt,(sem_t *sem))
00634 {
00635 int hs, cnt;
00636 char name[7];
00637 struct { void *sem; } arg = { ((void **)sem)[0] };
00638 if (rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW] < 0) {
00639 errno = EBUSY;
00640 return -1;
00641 }
00642 cnt = ((int *)sem)[1];
00643 if (!cnt || (cnt && atomic_dec_and_test((atomic_t *)&((int *)sem)[1]))) {
00644 hs = MAKE_SOFT();
00645 num2nam(rt_get_name(((void **)sem)[0]), name);
00646 rtai_lxrt(BIDX, SIZARG, LXRT_SEM_DELETE, &arg);
00647 if (cnt) {
00648 unlink(name);
00649 free((void *)sem);
00650 }
00651 MAKE_HARD(hs);
00652 }
00653 return 0;
00654 }
00655
00656 RTAI_PROTO(int, sem_destroy_rt,(sem_t *sem))
00657 {
00658 return sem_close_rt(sem);
00659 }
00660
00661 RTAI_PROTO(int, pthread_create_rt,(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine)(void *), void *arg))
00662 {
00663 int hs, ret;
00664 hs = MAKE_SOFT();
00665 ret = pthread_create(thread, attr, start_routine, arg);
00666 MAKE_HARD(hs);
00667 return ret;
00668 }
00669
00670 RTAI_PROTO(int, pthread_cancel_rt,(pthread_t thread))
00671 {
00672 int hs, ret;
00673 hs = MAKE_SOFT();
00674 ret = pthread_cancel(thread);
00675 MAKE_HARD(hs);
00676 return ret;
00677 }
00678
00679 #define pthread_cleanup_push_rt(routine, arg) \
00680 do { \
00681 {\
00682 int __hs_hs_hs__; \
00683 __hs_hs_hs__ = MAKE_SOFT(); \
00684 pthread_cleanup_push(routine, arg); \
00685 MAKE_HARD(__hs_hs_hs__);
00686
00687 #define pthread_cleanup_pop_rt(execute) \
00688 __hs_hs_hs__ = MAKE_SOFT(); \
00689 pthread_cleanup_pop(execute); \
00690 MAKE_HARD(__hs_hs_hs__); \
00691 } \
00692 } while (0)
00693
00694 #define pthread_cleanup_push_defer_rt(routine, arg) \
00695 do { \
00696 {\
00697 int __hs_hs_hs__; \
00698 __hs_hs_hs__ = MAKE_SOFT(); \
00699 pthread_cleanup_push_defer_np(routine, arg); \
00700 MAKE_HARD(__hs_hs_hs__);
00701
00702 #define pthread_cleanup_pop_restore_rt(execute) \
00703 __hs_hs_hs__ = MAKE_SOFT(); \
00704 pthread_cleanup_pop_restore_np(execute); \
00705 MAKE_HARD(__hs_hs_hs__); \
00706 } \
00707 } while (0)
00708
00709 RTAI_PROTO(int, pthread_sigmask_rt,(int how, const sigset_t *newmask, sigset_t *oldmask))
00710 {
00711 int hs, ret;
00712 hs = MAKE_SOFT();
00713 ret = pthread_sigmask(how, newmask, oldmask);
00714 MAKE_HARD(hs);
00715 return ret;
00716 }
00717
00718 RTAI_PROTO(int, pthread_kill_rt,(pthread_t thread, int signo))
00719 {
00720 int hs, ret;
00721 hs = MAKE_SOFT();
00722 ret = pthread_kill(thread, signo);
00723 MAKE_HARD(hs);
00724 return ret;
00725 }
00726
00727
00728 RTAI_PROTO(int, sigwait_rt,(const sigset_t *set, int *sig))
00729 {
00730 int hs, ret;
00731 hs = MAKE_SOFT();
00732 ret = sigwait(set, sig);
00733 MAKE_HARD(hs);
00734 return ret;
00735 }
00736
00737 RTAI_PROTO(pthread_mutex_t *, pthread_mutex_open_rt,(const char *name))
00738 {
00739 int hs, fd;
00740 pthread_mutex_t *mutex;
00741 hs = MAKE_SOFT();
00742 if ((fd = open(name, O_RDONLY)) > 0) {
00743 read(fd, &mutex, sizeof(int));
00744 close(fd);
00745 atomic_inc((atomic_t *)(&((int *)mutex)[1]));
00746 } else {
00747 struct { int name, value, type; } arg = { nam2num(name), 1, RES_SEM };
00748 mutex = (pthread_mutex_t *)malloc(sizeof(pthread_mutex_t));
00749 if ((((int *)mutex)[0] = rtai_lxrt(BIDX, SIZARG, LXRT_SEM_INIT, &arg).i[LOW]) && (fd = open(name, O_WRONLY | O_CREAT))) {
00750 write(fd, &mutex, sizeof(int));
00751 close(fd);
00752 ((int *)mutex)[1] = 1;
00753 } else {
00754 free(mutex);
00755 mutex = 0;
00756 }
00757 }
00758 MAKE_HARD(hs);
00759 return mutex;
00760 }
00761
00762 RTAI_PROTO(int, pthread_mutex_init_rt,(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutexattr))
00763 {
00764 int hs;
00765 struct { int name, value, type; } arg = { rt_get_name(0), 1, RES_SEM };
00766 hs = MAKE_SOFT();
00767 ((int *)mutex)[0] = rtai_lxrt(BIDX, SIZARG, LXRT_SEM_INIT, &arg).i[LOW];
00768 ((int *)mutex)[1] = 0;
00769 MAKE_HARD(hs);
00770 return 0;
00771 }
00772
00773 RTAI_PROTO(int, pthread_mutex_close_rt,(pthread_mutex_t *mutex))
00774 {
00775 int hs, cnt;
00776 char name[7];
00777 struct { void *sem; } arg = { ((void **)mutex)[0] };
00778 if (rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW] < 0) {
00779 return EBUSY;
00780 }
00781 cnt = ((int *)mutex)[1];
00782 if (!cnt || (cnt && atomic_dec_and_test((atomic_t *)&((int *)mutex)[1]))) {
00783 hs = MAKE_SOFT();
00784 num2nam(rt_get_name(((void **)mutex)[0]), name);
00785 rtai_lxrt(BIDX, SIZARG, LXRT_SEM_DELETE, &arg);
00786 if (cnt) {
00787 unlink(name);
00788 free((void *)mutex);
00789 }
00790 MAKE_HARD(hs);
00791 }
00792 return 0;
00793 }
00794
00795 RTAI_PROTO(int, pthread_mutex_destroy_rt,(pthread_mutex_t *mutex))
00796 {
00797 return pthread_mutex_close_rt(mutex);
00798 }
00799
00800 RTAI_PROTO(pthread_cond_t *, pthread_cond_open_rt,(const char *name))
00801 {
00802 return (pthread_cond_t *)sem_open_rt(name, 0, 0, SEM_BINARY);
00803 }
00804
00805 RTAI_PROTO(int, pthread_cond_init_rt,(pthread_cond_t *cond, pthread_condattr_t *cond_attr))
00806 {
00807 return sem_init_rt((sem_t *)cond, SEM_BINARY, 0);
00808 }
00809
00810 RTAI_PROTO(int, pthread_cond_destroy_rt,(pthread_cond_t *cond))
00811 {
00812 return sem_close_rt((sem_t *)cond);
00813 }
00814
00815 RTAI_PROTO(int, pthread_cond_close_rt,(pthread_cond_t *cond))
00816 {
00817 return sem_close_rt((sem_t *)cond);
00818 }
00819
00820 #ifdef __USE_XOPEN2K
00821 RTAI_PROTO(pthread_barrier_t *, pthread_barrier_open_rt,(const char *name, unsigned int count))
00822 {
00823 return (pthread_barrier_t *)sem_open_rt(name, 0, count, 0);
00824 }
00825
00826 RTAI_PROTO(int, pthread_barrier_init_rt,(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count))
00827 {
00828 return sem_init_rt((sem_t *)barrier, 0, count);
00829 }
00830
00831 RTAI_PROTO(int, pthread_barrier_destroy_rt,(pthread_barrier_t *barrier))
00832 {
00833 return sem_close_rt((sem_t *)barrier);
00834 }
00835
00836 RTAI_PROTO(int, pthread_barrier_close_rt,(pthread_barrier_t *barrier))
00837 {
00838 return sem_close_rt((sem_t *)barrier);
00839 }
00840 #endif
00841
00842
00843
00844
00845
00846 #define pthread_attr_setdetachstate_rt(attr, detachstate)
00847 #define pthread_detach_rt(thread)
00848 #define pthread_getconcurrency_rt()
00849 #define pthread_setconcurrency_rt(level)
00850
00851 #define pthread_mutexattr_init_rt(attr)
00852 #define pthread_mutexattr_destroy_rt(attr)
00853 #define pthread_mutexattr_getpshared_rt(attr, pshared)
00854 #define pthread_mutexattr_setpshared_rt(attr, pshared)
00855 #define pthread_mutexattr_settype_rt(attr, kind)
00856 #define pthread_mutexattr_gettype_rt(attr, kind)
00857
00858 #define pthread_condattr_init_rt(attr)
00859 #define pthread_condattr_destroy_rt(attr)
00860 #define pthread_condattr_getpshared_rt(attr, pshared)
00861 #define pthread_condattr_setpshared_rt(attr, pshared)
00862 #ifdef __USE_XOPEN2K
00863 #define pthread_barrierattr_getpshared_rt(attr, pshared)
00864 #define pthread_barrierattr_setpshared_rt(attr, pshared)
00865 #define pthread_barrierattr_getpshared_rt(attr, pshared)
00866 #define pthread_barrierattr_setpshared_rt(attr, pshared)
00867 #endif
00868 #define pthread_rwlockattr_init(attr)
00869 #define pthread_rwlockattr_destroy(attr)
00870 #define pthread_rwlockattr_getpshared( ttr, pshared)
00871 #define pthread_rwlockattr_setpshared(attr, pshared)
00872 #define pthread_rwlockattr_getkind_np(attr, pref)
00873 #define pthread_rwlockattr_setkind_np(attr, pref)
00874
00875
00876
00877
00878
00879
00880
00881 #define pthread_self_rt pthread_self
00882 #define pthread_equal_rt pthread_equal
00883 #define pthread_attr_init_rt pthread_attr_init
00884 #define pthread_attr_destroy_rt pthread_attr_destroy
00885 #define pthread_attr_getdetachstate_rt pthread_attr_getdetachstate
00886 #define pthread_attr_setschedpolicy_rt pthread_attr_setschedpolicy
00887 #define pthread_attr_getschedpolicy_rt pthread_attr_getschedpolicy
00888 #define pthread_attr_setschedparam_rt pthread_attr_setschedparam
00889 #define pthread_attr_getschedparam_rt pthread_attr_getschedparam
00890 #define pthread_attr_setinheritsched_rt pthread_attr_setinheritsched
00891 #define pthread_attr_getinheritsched_rt pthread_attr_getinheritsched
00892 #define pthread_attr_setscope_rt pthread_attr_setscope
00893 #define pthread_attr_getscope_rt pthread_attr_getscope
00894 #ifdef __USE_UNIX98
00895 #define pthread_attr_setguardsize_rt pthread_attr_setguardsize
00896 #define pthread_attr_getguardsize_rt pthread_attr_getguardsize
00897 #endif
00898 #define pthread_attr_setstackaddr_rt pthread_attr_setstackaddr
00899 #define pthread_attr_getstackaddr_rt pthread_attr_getstackaddr
00900 #ifdef __USE_XOPEN2K
00901 #define pthread_attr_setstack_rt pthread_attr_setstack
00902 #define pthread_attr_getstack_rt pthread_attr_getstack
00903 #endif
00904 #define pthread_attr_setstacksize_rt pthread_attr_setstacksize
00905 #define pthread_attr_getstacksize_rt pthread_attr_getstacksize
00906
00907
00908
00909
00910
00911 #define pthread_setcancelstate_rt pthread_setcancelstate
00912 #define pthread_setcanceltype_rt pthread_setcanceltype
00913
00914 RTAI_PROTO(void, pthread_testcancel_rt,(void))
00915 {
00916 int oldtype, oldstate;
00917 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
00918 pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype);
00919 if (oldstate != PTHREAD_CANCEL_DISABLE && oldtype != PTHREAD_CANCEL_DEFERRED) {
00920 MAKE_SOFT();
00921 rt_task_delete(rt_buddy());
00922 pthread_exit(NULL);
00923 }
00924 pthread_setcanceltype(oldtype, &oldtype);
00925 pthread_setcancelstate(oldstate, &oldstate);
00926 }
00927
00928 extern int pthread_yield (void);
00929 RTAI_PROTO(int, pthread_yield_rt,(void))
00930 {
00931 if (rt_is_hard_real_time(rt_buddy())) {
00932 struct { unsigned long dummy; } arg;
00933 rtai_lxrt(BIDX, SIZARG, YIELD, &arg);
00934 return 0;
00935 }
00936 return pthread_yield();
00937 }
00938
00939 RTAI_PROTO(void, pthread_exit_rt,(void *retval))
00940 {
00941 MAKE_SOFT();
00942 rt_task_delete(rt_buddy());
00943 pthread_exit(retval);
00944 }
00945
00946 RTAI_PROTO(int, pthread_join_rt,(pthread_t thread, void **thread_return))
00947 {
00948 int hs, ret;
00949 hs = MAKE_SOFT();
00950 ret = pthread_join(thread, thread_return);
00951 MAKE_HARD(hs);
00952 return ret;
00953 }
00954
00955 RTAI_PROTO(int, sem_wait_rt,(sem_t *sem))
00956 {
00957 struct { void *sem; } arg = { ((void **)sem)[0] };
00958 rtai_lxrt(BIDX, SIZARG, SEM_WAIT, &arg);
00959 return 0;
00960 }
00961
00962 RTAI_PROTO(int, sem_trywait_rt,(sem_t *sem))
00963 {
00964 struct { void *sem; } arg = { ((void **)sem)[0] };
00965 if (rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW] > 0) {
00966 return 0;
00967 }
00968 errno = EAGAIN;
00969 return -1;
00970 }
00971
00972 RTAI_PROTO(int, sem_timedwait_rt,(sem_t *sem, const struct timespec *abstime))
00973 {
00974 struct { void *sem; RTIME until; } arg = { ((void **)sem)[0], timespec2count(abstime) };
00975 return rtai_lxrt(BIDX, SIZARG, SEM_WAIT_UNTIL, &arg).i[LOW] < SEM_VALUE_MAX ? 0 : ETIMEDOUT;
00976 }
00977
00978 RTAI_PROTO(int, sem_post_rt,(sem_t *sem))
00979 {
00980 struct { void *sem; } arg = { ((void **)sem)[0] };
00981 return rtai_lxrt(BIDX, SIZARG, SEM_SIGNAL, &arg).i[LOW];
00982 }
00983
00984 RTAI_PROTO(int, sem_getvalue_rt,(sem_t *sem, int *sval))
00985 {
00986 struct { void *sem; } arg = { ((void **)sem)[0] };
00987 *sval = rtai_lxrt(BIDX, SIZARG, SEM_COUNT, &arg).i[LOW];
00988 return 0;
00989 }
00990
00991 RTAI_PROTO(int, pthread_mutex_lock_rt,(pthread_mutex_t *mutex))
00992 {
00993 return sem_wait_rt((sem_t *)mutex);
00994 }
00995
00996 #ifdef __USE_XOPEN2K
00997 RTAI_PROTO(int, pthread_mutex_timedlock_rt,(pthread_mutex_t *mutex, const struct timespec *abstime))
00998 {
00999 return sem_timedwait_rt((sem_t *)mutex, abstime);
01000 }
01001 #endif
01002
01003 RTAI_PROTO(int, pthread_mutex_trylock_rt,(pthread_mutex_t *mutex))
01004 {
01005 return sem_trywait_rt((sem_t *)mutex);
01006 }
01007
01008 RTAI_PROTO(int, pthread_mutex_unlock_rt,(pthread_mutex_t *mutex))
01009 {
01010 return sem_post_rt((sem_t *)mutex);
01011 }
01012
01013 RTAI_PROTO(int, pthread_cond_signal_rt,(pthread_cond_t *cond))
01014 {
01015 struct { void *cond; } arg = { ((void **)cond)[0] };
01016 return rtai_lxrt(BIDX, SIZARG, COND_SIGNAL, &arg).i[LOW];
01017 }
01018
01019 RTAI_PROTO(int, pthread_cond_broadcast_rt,(pthread_cond_t *cond))
01020 {
01021 struct { void *cond; } arg = { ((void **)cond)[0] };
01022 return rtai_lxrt(BIDX, SIZARG, SEM_BROADCAST, &arg).i[LOW];
01023 }
01024
01025 RTAI_PROTO(int, pthread_cond_wait_rt,(pthread_cond_t *cond, pthread_mutex_t *mutex))
01026 {
01027 struct { void *cond; void *mutex; } arg = { ((void **)cond)[0], ((void **)mutex)[0] };
01028 return rtai_lxrt(BIDX, SIZARG, COND_WAIT, &arg).i[LOW];
01029 }
01030
01031 RTAI_PROTO(int, pthread_cond_timedwait_rt,(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime))
01032 {
01033 struct { void *cond; void *mutex; RTIME time; } arg = { ((void **)cond)[0], ((void **)mutex)[0], timespec2count(abstime) };
01034 return rtai_lxrt(BIDX, SIZARG, COND_WAIT_UNTIL, &arg).i[LOW] < SEM_TIMOUT ? 0 : -ETIMEDOUT;
01035 }
01036
01037 #ifdef __USE_XOPEN2K
01038 RTAI_PROTO(int, pthread_barrier_wait_rt,(pthread_barrier_t *barrier))
01039 {
01040 struct { void *sem; } arg = { ((void **)barrier)[0] };
01041 rtai_lxrt(BIDX, SIZARG, SEM_WAIT_BARRIER, &arg);
01042 return 0;
01043 }
01044 #endif
01045
01046 #ifdef __USE_UNIX98
01047 RTAI_PROTO(pthread_rwlock_t *, pthread_rwlock_open_rt,(const char *name))
01048 {
01049 int hs, fd;
01050 pthread_rwlock_t *rwlock;
01051 hs = MAKE_SOFT();
01052 if ((fd = open(name, O_RDONLY)) > 0) {
01053 read(fd, &rwlock, sizeof(int));
01054 close(fd);
01055 atomic_inc((atomic_t *)(&((int *)rwlock)[1]));
01056 } else {
01057 struct { int name, value, type; } arg = { nam2num(name), 1, RES_SEM };
01058 rwlock = (pthread_rwlock_t *)malloc(sizeof(pthread_rwlock_t));
01059 if ((((int *)rwlock)[0] = rtai_lxrt(BIDX, SIZARG, LXRT_RWL_INIT, &arg).i[LOW]) && (fd = open(name, O_WRONLY | O_CREAT))) {
01060 write(fd, &rwlock, sizeof(int));
01061 close(fd);
01062 ((int *)rwlock)[1] = 1;
01063 } else {
01064 free(rwlock);
01065 rwlock = 0;
01066 }
01067 }
01068 MAKE_HARD(hs);
01069 return rwlock;
01070 }
01071 #endif
01072
01073 RTAI_PROTO(int, pthread_rwlock_init_rt,(pthread_rwlock_t *rwlock, pthread_rwlockattr_t *attr))
01074 {
01075 int hs;
01076 struct { int name; } arg = { rt_get_name(0) };
01077 hs = MAKE_SOFT();
01078 ((int *)rwlock)[0] = rtai_lxrt(BIDX, SIZARG, LXRT_RWL_INIT, &arg).i[LOW];
01079 ((int *)rwlock)[1] = 0;
01080 MAKE_HARD(hs);
01081 return 0;
01082 }
01083
01084 RTAI_PROTO(int, pthread_rwlock_close_rt,(pthread_rwlock_t *rwlock))
01085 {
01086 int hs, cnt;
01087 char name[7];
01088 struct { void *rwlock; } arg = { ((void **)rwlock)[0] };
01089 if (rtai_lxrt(BIDX, SIZARG, RWL_WRLOCK_IF, &arg).i[LOW] < 0) {
01090 return EBUSY;
01091 } else {
01092 rtai_lxrt(BIDX, SIZARG, RWL_UNLOCK, &arg);
01093 if (rtai_lxrt(BIDX, SIZARG, RWL_RDLOCK_IF, &arg).i[LOW] < 0) {
01094 return EBUSY;
01095 }
01096 rtai_lxrt(BIDX, SIZARG, RWL_UNLOCK, &arg);
01097 }
01098 cnt = ((int *)rwlock)[1];
01099 if (!cnt || (cnt && atomic_dec_and_test((atomic_t *)&((int *)rwlock)[1]))) {
01100 hs = MAKE_SOFT();
01101 num2nam(rt_get_name(((void **)rwlock)[0]), name);
01102 rtai_lxrt(BIDX, SIZARG, LXRT_RWL_DELETE, &arg);
01103 if (cnt) {
01104 unlink(name);
01105 free((void *)rwlock);
01106 }
01107 MAKE_HARD(hs);
01108 }
01109 return 0;
01110 }
01111
01112 RTAI_PROTO(int, pthread_rwlock_destroy_rt,(pthread_rwlock_t *rwlock))
01113 {
01114 return pthread_rwlock_close_rt(rwlock);
01115 }
01116
01117 RTAI_PROTO(int, pthread_rwlock_rdlock_rt,(pthread_rwlock_t *rwlock))
01118 {
01119 struct { void *rwlock; } arg = { ((void **)rwlock)[0] };
01120 return rtai_lxrt(BIDX, SIZARG, RWL_RDLOCK, &arg).i[LOW];
01121 }
01122
01123 RTAI_PROTO(int, pthread_rwlock_tryrdlock_rt,(pthread_rwlock_t *rwlock))
01124 {
01125 struct { void *rwlock; } arg = { ((void **)rwlock)[0] };
01126 return rtai_lxrt(BIDX, SIZARG, RWL_RDLOCK_IF, &arg).i[LOW];
01127 }
01128
01129 #ifdef __USE_XOPEN2K
01130 RTAI_PROTO(int, pthread_rwlock_timedrdlock_rt,(pthread_rwlock_t *rwlock, struct timespec *abstime))
01131 {
01132 struct { void *rwlock; RTIME time; } arg = { ((void **)rwlock)[0], timespec2count(abstime) };
01133 return rtai_lxrt(BIDX, SIZARG, RWL_RDLOCK_UNTIL, &arg).i[LOW];
01134 }
01135 #endif
01136
01137 RTAI_PROTO(int, pthread_rwlock_wrlock_rt,(pthread_rwlock_t *rwlock))
01138 {
01139 struct { void *rwlock; } arg = { ((void **)rwlock)[0] };
01140 return rtai_lxrt(BIDX, SIZARG, RWL_WRLOCK, &arg).i[LOW];
01141 }
01142
01143 RTAI_PROTO(int, pthread_rwlock_trywrlock_rt,(pthread_rwlock_t *rwlock))
01144 {
01145 struct { void *rwlock; } arg = { ((void **)rwlock)[0] };
01146 return rtai_lxrt(BIDX, SIZARG, RWL_WRLOCK_IF, &arg).i[LOW];
01147 }
01148
01149 #ifdef __USE_XOPEN2K
01150 RTAI_PROTO(int, pthread_rwlock_timedwrlock_rt,(pthread_rwlock_t *rwlock, struct timespec *abstime))
01151 {
01152 struct { void *rwlock; RTIME time; } arg = { ((void **)rwlock)[0], timespec2count(abstime) };
01153 return rtai_lxrt(BIDX, SIZARG, RWL_WRLOCK_UNTIL, &arg).i[LOW];
01154 }
01155 #endif
01156
01157 RTAI_PROTO(int, pthread_rwlock_unlock_rt,(pthread_rwlock_t *rwlock))
01158 {
01159 struct { void *rwlock; } arg = { ((void **)rwlock)[0] };
01160 return rtai_lxrt(BIDX, SIZARG, RWL_UNLOCK, &arg).i[LOW];
01161 }
01162
01163 #ifdef __USE_XOPEN2K
01164 RTAI_PROTO(int, pthread_spin_init_rt,(pthread_spinlock_t *lock))
01165 {
01166 return (((int *)lock)[0] = 0);
01167 }
01168
01169 RTAI_PROTO(int, pthread_spin_destroy_rt,(pthread_spinlock_t *lock))
01170 {
01171 return ((int *)lock)[0] = 0;
01172 }
01173
01174 RTAI_PROTO(int, pthread_spin_lock_rt,(pthread_spinlock_t *lock))
01175 {
01176 while (atomic_cmpxchg(&lock, 0, 1));
01177 return 0;
01178 }
01179
01180 RTAI_PROTO(int, pthread_spin_trylock_rt,(pthread_spinlock_t *lock))
01181 {
01182 if (atomic_cmpxchg(&lock, 0, 1)) {
01183 return EAGAIN;
01184 }
01185 return 0;
01186 }
01187
01188 RTAI_PROTO(int, pthread_spin_unlock_rt,(pthread_spinlock_t *lock))
01189 {
01190 return ((int *)lock)[0] = 0;
01191 }
01192 #endif
01193
01194 RTAI_PROTO(void, clock_gettime_rt,(int clockid, struct timespec *current_time))
01195 {
01196 count2timespec(rt_get_time(), current_time);
01197 }
01198
01199 RTAI_PROTO(int, nanosleep_rt,(const struct timespec *rqtp, struct timespec *rmtp))
01200 {
01201 RTIME expire;
01202 if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) {
01203 return -EINVAL;
01204 }
01205 rt_sleep_until(expire = rt_get_time() + timespec2count(rqtp));
01206 if ((expire -= rt_get_time()) > 0) {
01207 if (rmtp) {
01208 count2timespec(expire, rmtp);
01209 }
01210 return -EINTR;
01211 }
01212 return 0;
01213 }
01214
01215 #ifdef __cplusplus
01216 }
01217 #endif
01218
01219 #endif
01220
01221 #endif