base/include/rtai_posix.h

Go to the documentation of this file.
00001 /*
00002  * Copyright (C) 1999-2006 Paolo Mantegazza <mantegazza@aero.polimi.it>
00003  *
00004  * This library is free software; you can redistribute it and/or
00005  * modify it under the terms of the GNU Lesser General Public
00006  * License as published by the Free Software Foundation; either
00007  * version 2 of the License, or (at your option) any later version.
00008  *
00009  * This library is distributed in the hope that it will be useful,
00010  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00012  * Lesser General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU Lesser General Public
00015  * License along with this library; if not, write to the Free Software
00016  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
00017  *
00018  */
00019 
00020 #ifndef _RTAI_POSIX_H_
00021 #define _RTAI_POSIX_H_
00022 
00023 #define sem_open_rt                     sem_open
00024 #define sem_close_rt                    sem_close
00025 #define sem_init_rt                     sem_init
00026 #define sem_destroy_rt                  sem_destroy
00027 #define sem_wait_rt                     sem_wait
00028 #define sem_trywait_rt                  sem_trywait
00029 #define sem_timedwait_rt                sem_timedwait
00030 #define sem_post_rt                     sem_post
00031 #define sem_getvalue_rt                 sem_getvalue
00032 
00033 #define pthread_mutex_open_rt           pthread_mutex_open
00034 #define pthread_mutex_close_rt          pthread_mutex_close
00035 #define pthread_mutex_init_rt           pthread_mutex_init
00036 #define pthread_mutex_destroy_rt        pthread_mutex_destroy
00037 #define pthread_mutex_lock_rt           pthread_mutex_lock
00038 #define pthread_mutex_timedlock_rt      pthread_mutex_timedlock
00039 #define pthread_mutex_trylock_rt        pthread_mutex_trylock
00040 #define pthread_mutex_unlock_rt         pthread_mutex_unlock
00041 
00042 #define pthread_cond_open_rt            pthread_cond_open
00043 #define pthread_cond_close_rt           pthread_cond_close
00044 #define pthread_cond_init_rt            pthread_cond_init
00045 #define pthread_cond_destroy_rt         pthread_cond_destroy
00046 #define pthread_cond_signal_rt          pthread_cond_signal
00047 #define pthread_cond_broadcast_rt       pthread_cond_broadcast
00048 #define pthread_cond_wait_rt            pthread_cond_wait
00049 #define pthread_cond_timedwait_rt       pthread_cond_timedwait
00050 
00051 #define pthread_barrier_open_rt         pthread_barrier_open
00052 #define pthread_barrier_close_rt        pthread_barrier_close
00053 #define pthread_barrier_init_rt         pthread_barrier_init
00054 #define pthread_barrier_destroy_rt      pthread_barrier_destroy
00055 #define pthread_barrier_wait_rt         pthread_barrier_wait
00056 
00057 #define pthread_rwlock_open_rt          pthread_rwlock_open
00058 #define pthread_rwlock_close_rt         pthread_rwlock_close
00059 #define pthread_rwlock_init_rt          pthread_rwlock_init
00060 #define pthread_rwlock_destroy_rt       pthread_rwlock_destroy
00061 #define pthread_rwlock_rdlock_rt        pthread_rwlock_rdlock
00062 #define pthread_rwlock_tryrdlock_rt     pthread_rwlock_tryrdlock
00063 #define pthread_rwlock_timedrdlock_rt   pthread_rwlock_timedrdlock
00064 #define pthread_rwlock_wrlock_rt        pthread_rwlock_wrlock
00065 #define pthread_rwlock_trywrlock_rt     pthread_rwlock_trywrlock
00066 #define pthread_rwlock_timedwrlock_rt   pthread_rwlock_timedwrlock
00067 #define pthread_rwlock_unlock_rt        pthread_rwlock_unlock
00068 
00069 #define pthread_spin_init_rt            pthread_spin_init
00070 #define pthread_spin_destroy_rt         pthread_spin_destroy
00071 #define pthread_spin_lock_rt            pthread_spin_lock
00072 #define pthread_spin_trylock_rt         pthread_spin_trylock
00073 #define pthread_spin_unlock_rt          pthread_spin_unlock
00074 
00075 #define sched_get_max_priority_rt       sched_get_max_priority
00076 #define sched_get_min_priority_rt       sched_get_min_priority
00077 
00078 #define pthread_create_rt               pthread_create
00079 #define pthread_yield_rt                pthread_yield
00080 #define pthread_exit_rt                 pthread_exit
00081 #define pthread_join_rt                 pthread_join
00082 #define pthread_cancel_rt               pthread_cancel
00083 #define pthread_equal_rt                pthread_equal
00084 #define pthread_self_rt                 pthread_self
00085 #define pthread_attr_init_rt            pthread_attr_init
00086 #define pthread_attr_destroy_rt         pthread_attr_destroy
00087 #define pthread_attr_setschedparam_rt   pthread_attr_setschedparam
00088 #define pthread_attr_getschedparam_rt   pthread_attr_getschedparam
00089 #define pthread_attr_setschedpolicy_rt  pthread_attr_setschedpolicy
00090 #define pthread_attr_getschedpolicy_rt  pthread_attr_getschedpolicy
00091 #define pthread_attr_setschedrr_rt      pthread_attr_setschedrr
00092 #define pthread_attr_getschedrr_rt      pthread_attr_getschedrr
00093 #define pthread_attr_setstacksize_rt    pthread_attr_setstacksize
00094 #define pthread_attr_getstacksize_rt    pthread_attr_getstacksize
00095 #define pthread_attr_setstack_rt        pthread_attr_setstack
00096 #define pthread_attr_getstack_rt        pthread_attr_getstack
00097 #define pthread_testcancel_rt           pthread_testcancel
00098 
00099 #define clock_gettime_rt                clock_gettime
00100 #define nanosleep_rt                    nanosleep
00101 
00102 #define pthread_cleanup_push_rt         pthread_cleanup_push
00103 #define pthread_cleanup_pop_rt          pthread_cleanup_pop
00104 
00105 /*
00106  * _RT DO NOTHING FUNCTIONS 
00107  */
00108 
00109 #define pthread_attr_setdetachstate_rt(attr, detachstate)
00110 #define pthread_detach_rt(thread)
00111 #define pthread_setconcurrency_rt(level)
00112 
00113 #ifdef __KERNEL__
00114 
00115 /*
00116  * KERNEL DO NOTHING FUNCTIONS (FOR RTAI HARD REAL TIME)
00117  */
00118 
00119 #define pthread_setcanceltype_rt(type, oldtype)
00120 #define pthread_setcancelstate_rt(state, oldstate)
00121 #define pthread_attr_getstackaddr_rt(attr, stackaddr) 
00122 #define pthread_attr_setstackaddr_rt(attr, stackaddr)
00123 #define pthread_attr_setguardsize_rt(attr, guardsize) 
00124 #define pthread_attr_getguardsize_rt(attr, guardsize)
00125 #define pthread_attr_setscope_rt(attr, scope)
00126 #define pthread_attr_getscope_rt(attr, scope)
00127 #define pthread_attr_getdetachstate_rt(attr, detachstate)
00128 #define pthread_attr_getdetachstate(attr, detachstate)
00129 #define pthread_attr_setinheritsched_rt(attr, inherit)
00130 #define pthread_attr_getinheritsched_rt(attr, inherit)
00131 #define pthread_attr_setinheritsched(attr, inherit)
00132 #define pthread_attr_getinheritsched(attr, inherit)
00133 
00134 #include <linux/fcntl.h>
00135 
00136 #include <rtai_malloc.h>
00137 #include <rtai_rwl.h>
00138 #include <rtai_spl.h>
00139 #include <rtai_sem.h>
00140 #include <rtai_sched.h>
00141 #include <rtai_schedcore.h>
00142 
00143 
00144 #define SET_ADR(s)     (((void **)s)[0])
00145 
00146 #define RTAI_PNAME_MAXSZ  6
00147 #define PTHREAD_BARRIER_SERIAL_THREAD -1
00148 
00149 
00150 #ifndef MAX_PRIO
00151 #define MAX_PRIO  99
00152 #endif
00153 #ifndef MIN_PRIO
00154 #define MIN_PRIO  1
00155 #endif
00156 
00157 #ifndef CLOCK_REALTIME
00158 #define CLOCK_REALTIME  0
00159 #endif
00160 
00161 #ifndef CLOCK_MONOTONIC
00162 #define CLOCK_MONOTONIC  1
00163 #endif
00164 
00165 #define STACK_SIZE     8192
00166 #define RR_QUANTUM_NS  1000000
00167 
00168 typedef struct { SEM sem; } sem_t;
00169 
00170 typedef struct { SEM mutex; } pthread_mutex_t;
00171 
00172 typedef unsigned long pthread_mutexattr_t;
00173 
00174 typedef struct { SEM cond; } pthread_cond_t;
00175 
00176 typedef unsigned long pthread_condattr_t;
00177 
00178 typedef struct { SEM barrier; } pthread_barrier_t;
00179 
00180 typedef unsigned long pthread_barrierattr_t;
00181 
00182 typedef struct { RWL rwlock; } pthread_rwlock_t;
00183 
00184 typedef unsigned long pthread_rwlockattr_t;
00185 
00186 typedef unsigned long pthread_spinlock_t;
00187 
00188 typedef struct rt_task_struct *pthread_t;
00189 
00190 typedef struct pthread_attr {
00191     int stacksize;
00192     int policy;
00193     int rr_quantum_ns;
00194     int priority;
00195 } pthread_attr_t;
00196 
00197 typedef struct pthread_cookie {
00198     RT_TASK task;
00199     SEM sem;
00200     void (*task_fun)(long);
00201     long arg;
00202     void *cookie;
00203 } pthread_cookie_t;
00204 
00205 #ifdef __cplusplus
00206 extern "C" {
00207 #endif /* __cplusplus */
00208 
00209 /*
00210  * SEMAPHORES
00211  */
00212 
00213 static inline sem_t *sem_open(const char *namein, int oflags, int value, int type)
00214 {
00215     char nametmp[RTAI_PNAME_MAXSZ + 1];
00216     int i;
00217     if (strlen(namein) > RTAI_PNAME_MAXSZ) {
00218         return (sem_t *)-ENAMETOOLONG;
00219     }
00220     
00221     for(i = 0; i < strlen(namein); i++) {
00222         if ((nametmp[i] = namein [i]) >= 'a' && nametmp[i] <= 'z') nametmp[i] += 'A' - 'a';
00223     }
00224     nametmp[i]='\0';
00225     if (!oflags || value <= SEM_TIMOUT) {
00226         SEM *tsem; 
00227         unsigned long handle = 0UL;
00228         if ((tsem = _rt_typed_named_sem_init(nam2num(nametmp), value, type, &handle))) {
00229             if ((handle) && (oflags == (O_CREAT | O_EXCL)))     {
00230                 return (sem_t *)-EEXIST;
00231             }
00232             return (sem_t *)tsem;
00233         }
00234         return (sem_t *)-ENOSPC;
00235     }
00236     return (sem_t *)-EINVAL;
00237 }
00238 
00239 static inline int sem_close(sem_t *sem)
00240 {
00241     if (rt_sem_wait_if(&sem->sem)< 0) {
00242         return -EBUSY;
00243     }
00244     rt_named_sem_delete(&sem->sem);
00245     
00246     rt_free(sem);
00247     
00248     return  0;
00249 }
00250 
00251 static inline int sem_unlink(const char *namein)
00252 {
00253     char nametmp[RTAI_PNAME_MAXSZ + 1];
00254     int i;
00255     SEM *sem;
00256     if (strlen(namein) > RTAI_PNAME_MAXSZ) {
00257         return -ENAMETOOLONG;
00258     }
00259     
00260     for(i = 0; i < strlen(namein); i++) {
00261         if ((nametmp[i] = namein [i]) >= 'a' && nametmp[i] <= 'z') nametmp[i] += 'A' - 'a';
00262     }
00263     nametmp[i]='\0';
00264     sem = rt_get_adr_cnt(nam2num(nametmp));
00265     if (sem) {
00266         if (rt_sem_wait_if(sem) >= 0) {
00267             rt_sem_signal(sem);
00268             rt_named_sem_delete(sem);
00269             return  0;
00270         }
00271         return -EBUSY;
00272     }
00273     return -ENOENT;
00274 }
00275 
00276 
00277 static inline int sem_init(sem_t *sem, int pshared, unsigned int value)
00278 {
00279     if (value < SEM_TIMOUT) {
00280         rt_typed_sem_init(&sem->sem, value, CNT_SEM | PRIO_Q);
00281         return 0;
00282     }
00283     return -EINVAL;
00284 }
00285 
00286 static inline int sem_destroy(sem_t *sem)
00287 {
00288     if (rt_sem_wait_if(&sem->sem) >= 0) {
00289         rt_sem_signal(&sem->sem);
00290         rt_sem_delete(&sem->sem);
00291         return  0;
00292     }
00293     return -EBUSY;
00294 }
00295 
00296 static inline int sem_wait(sem_t *sem)
00297 {
00298     return rt_sem_wait(&sem->sem) < SEM_TIMOUT ? 0 : -1;
00299 }
00300 
00301 static inline int sem_trywait(sem_t *sem)
00302 {   
00303     return rt_sem_wait_if(&sem->sem) > 0 ? 0 : -EAGAIN;
00304 }
00305 
00306 static inline int sem_timedwait(sem_t *sem, const struct timespec *abstime)
00307 {   
00308     return rt_sem_wait_until(&sem->sem, timespec2count(abstime)) < SEM_TIMOUT ? 0 : -ETIMEDOUT;
00309 }
00310 
00311 static inline int sem_post(sem_t *sem)
00312 {
00313     return rt_sem_signal(&sem->sem) < SEM_TIMOUT ? 0 : -ERANGE;
00314 }
00315 
00316 static inline int sem_getvalue(sem_t *sem, int *sval)
00317 {
00318     *sval = rt_sem_count(&sem->sem);
00319     return 0;
00320 }
00321 
00322 /*
00323  * MUTEXES
00324  */
00325  
00326 enum {
00327   PTHREAD_PROCESS_PRIVATE,
00328 #define PTHREAD_PROCESS_PRIVATE PTHREAD_PROCESS_PRIVATE
00329   PTHREAD_PROCESS_SHARED
00330 #define PTHREAD_PROCESS_SHARED  PTHREAD_PROCESS_SHARED
00331 };
00332 
00333 enum
00334 {
00335   PTHREAD_MUTEX_TIMED_NP,
00336   PTHREAD_MUTEX_RECURSIVE_NP,
00337   PTHREAD_MUTEX_ERRORCHECK_NP,
00338   PTHREAD_MUTEX_ADAPTIVE_NP,
00339   PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
00340   PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP,
00341   PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP,
00342   PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL,
00343   PTHREAD_MUTEX_FAST_NP = PTHREAD_MUTEX_TIMED_NP
00344 };
00345 
00346 #define RTAI_MUTEX_DEFAULT    (1 << 0)
00347 #define RTAI_MUTEX_ERRCHECK   (1 << 1)
00348 #define RTAI_MUTEX_RECURSIVE  (1 << 2)
00349 #define RTAI_MUTEX_PSHARED    (1 << 3)
00350  
00351 static inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutexattr)
00352 {
00353     rt_typed_sem_init(&mutex->mutex,  !mutexattr || (((long *)mutexattr)[0] & RTAI_MUTEX_DEFAULT) ? RESEM_BINSEM : (((long *)mutexattr)[0] & RTAI_MUTEX_ERRCHECK) ? RESEM_CHEKWT : RESEM_RECURS, RES_SEM);
00354     return 0;
00355 }
00356 
00357 static inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
00358 {
00359     if (rt_sem_wait_if(&mutex->mutex) >= 0) {
00360         rt_sem_signal(&mutex->mutex);
00361         rt_sem_delete(&mutex->mutex);
00362         return  0;
00363     }
00364     return -EBUSY;  
00365 }
00366 
00367 static inline int pthread_mutex_lock(pthread_mutex_t *mutex)
00368 {
00369     return rt_sem_wait(&mutex->mutex) < SEM_TIMOUT ? 0 : -EINVAL;
00370 }
00371 
00372 static inline int pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
00373 {
00374     return rt_sem_wait_until(&mutex->mutex, timespec2count(abstime)) < SEM_TIMOUT ? 0 : -1;
00375 }
00376 
00377 static inline int pthread_mutex_trylock(pthread_mutex_t *mutex)
00378 {
00379     return rt_sem_wait_if(&mutex->mutex) > 0 ? 0 : -EBUSY;
00380 }
00381 
00382 static inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
00383 {
00384     return rt_sem_signal(&mutex->mutex) >= 0 ? 0 : -EINVAL;
00385 }
00386 
00387 static inline int pthread_mutexattr_init(pthread_mutexattr_t *attr)
00388 {
00389     ((long *)attr)[0] = RTAI_MUTEX_DEFAULT;
00390     return 0;
00391 }
00392 
00393 static inline int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
00394 {
00395     return 0;
00396 }
00397 
00398 static inline int pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared)
00399 {   
00400     *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
00401     return 0;
00402 }
00403 
00404 static inline int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
00405 {
00406     if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
00407         if (pshared == PTHREAD_PROCESS_PRIVATE) {
00408             ((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED;
00409         } else {
00410             ((long *)attr)[0] |= RTAI_MUTEX_PSHARED;
00411         }
00412         return 0;
00413     }
00414     return -EINVAL;
00415 }
00416 
00417 static inline int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int kind)
00418 {
00419     switch (kind) {
00420         case PTHREAD_MUTEX_DEFAULT:
00421             ((long *)attr)[0] = (((long *)attr)[0] & ~(RTAI_MUTEX_RECURSIVE | RTAI_MUTEX_ERRCHECK)) | RTAI_MUTEX_DEFAULT;
00422             break;
00423         case PTHREAD_MUTEX_ERRORCHECK:
00424             ((long *)attr)[0] = (((long *)attr)[0] & ~(RTAI_MUTEX_RECURSIVE | RTAI_MUTEX_DEFAULT)) | RTAI_MUTEX_ERRCHECK;
00425             break;
00426         case PTHREAD_MUTEX_RECURSIVE:
00427             ((long *)attr)[0] = (((long *)attr)[0] & ~(RTAI_MUTEX_DEFAULT | RTAI_MUTEX_ERRCHECK)) | RTAI_MUTEX_RECURSIVE;
00428             break;
00429         default:
00430             return -EINVAL;
00431     }
00432     return 0;
00433 }
00434 
00435 static inline int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *kind)
00436 {
00437     switch (((long *)attr)[0] & (RTAI_MUTEX_DEFAULT | RTAI_MUTEX_ERRCHECK | RTAI_MUTEX_RECURSIVE)) {
00438         case RTAI_MUTEX_DEFAULT:
00439             *kind = PTHREAD_MUTEX_DEFAULT;
00440             break;
00441         case RTAI_MUTEX_ERRCHECK:
00442             *kind = PTHREAD_MUTEX_ERRORCHECK;
00443             break;
00444         case RTAI_MUTEX_RECURSIVE:
00445             *kind = PTHREAD_MUTEX_RECURSIVE;
00446             break;
00447     }
00448     return 0;
00449 }
00450 
00451 /*
00452  * CONDVARS
00453  */
00454 
00455 static inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
00456 {
00457     rt_typed_sem_init(&cond->cond, 0,  BIN_SEM | PRIO_Q);
00458     return 0;
00459 }
00460 
00461 static inline int pthread_cond_destroy(pthread_cond_t *cond)
00462 {
00463     if (rt_sem_wait_if(&cond->cond) < 0) {
00464         return -EBUSY;
00465     }
00466     rt_sem_delete(&cond->cond);
00467     return  0;  
00468 }
00469 
00470 static inline int pthread_cond_signal(pthread_cond_t *cond)
00471 {
00472     return rt_sem_signal(&cond->cond);
00473 }
00474 
00475 static inline int pthread_cond_broadcast(pthread_cond_t *cond)
00476 {
00477     return rt_sem_broadcast(&cond->cond);
00478 }
00479 
00480 static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
00481 {
00482     return rt_cond_wait(&cond->cond, &mutex->mutex);
00483 }
00484 
00485 static inline int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime)
00486 {
00487     return rt_cond_wait_until(&cond->cond, &mutex->mutex, timespec2count(abstime)) < SEM_TIMOUT ? 0 : -ETIMEDOUT;
00488 }
00489 
00490 static inline int pthread_condattr_init(unsigned long *attr)
00491 {
00492     ((long *)attr)[0] = 0;
00493     return 0;
00494 }
00495 
00496 static inline int pthread_condattr_destroy(pthread_condattr_t *attr)
00497 {
00498     return 0;
00499 }
00500 
00501 static inline int pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared)
00502 {
00503     *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
00504         return 0;
00505 }
00506 
00507 static inline int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
00508 {
00509     if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
00510         if (pshared == PTHREAD_PROCESS_PRIVATE) {
00511             ((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED;
00512         } else {
00513             ((long *)attr)[0] |= RTAI_MUTEX_PSHARED;
00514         }
00515         return 0;
00516     }
00517     return -EINVAL;
00518 }
00519 
00520 static inline int pthread_condattr_setclock(pthread_condattr_t *condattr, clockid_t clockid)
00521 {
00522     if (clockid == CLOCK_MONOTONIC || clockid == CLOCK_REALTIME) {
00523         ((int *)condattr)[0] = clockid;
00524         return 0;
00525     }
00526     return -EINVAL;
00527 }
00528 
00529 static inline int pthread_condattr_getclock(pthread_condattr_t *condattr, clockid_t *clockid)
00530 {
00531         if (clockid) {
00532         *clockid = ((int *)condattr)[0];
00533                 return 0;
00534         }
00535         return -EINVAL;
00536 }
00537 
00538 /*
00539  * BARRIER
00540  */
00541 
00542 static inline int pthread_barrier_init(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count)
00543 {
00544     if (count > 0) {
00545         rt_typed_sem_init(&barrier->barrier, count, CNT_SEM | PRIO_Q);
00546         return 0;
00547     }
00548     return -EINVAL;
00549 }
00550 
00551 static inline int pthread_barrier_destroy(pthread_barrier_t *barrier)
00552 {
00553     if (rt_sem_wait_if(&barrier->barrier) < 0) {
00554         return -EBUSY;
00555     }
00556     return rt_sem_delete(&barrier->barrier) == RT_OBJINV ? -EINVAL : 0;
00557 }
00558 
00559 static inline int pthread_barrier_wait(pthread_barrier_t *barrier)
00560 {
00561     return rt_sem_wait_barrier(&barrier->barrier);
00562 }
00563 
00564 static inline int wrap_pthread_barrierattr_init(pthread_barrierattr_t *attr)
00565 {
00566     ((long *)attr)[0] = PTHREAD_PROCESS_PRIVATE;
00567     return 0;
00568 }
00569 
00570 static inline int pthread_barrierattr_destroy(pthread_barrierattr_t *attr)
00571 {
00572     return 0;
00573 }
00574 
00575 static inline int pthread_barrierattr_setpshared(pthread_barrierattr_t *attr, int pshared)
00576 {
00577     if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
00578         ((long *)attr)[0] = pshared;
00579         return 0;
00580     }
00581     return -EINVAL;
00582 }
00583 
00584 static inline int wrap_pthread_barrierattr_getpshared(const pthread_barrierattr_t *attr, int *pshared)
00585 {
00586     *pshared = ((long *)attr)[0];
00587     return 0;
00588 }
00589 
00590 /*
00591  * RWLOCKS
00592  */
00593 
00594 static inline int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
00595 {
00596     return rt_rwl_init(&rwlock->rwlock);
00597 }
00598 
00599 static inline int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
00600 {
00601     return rt_rwl_delete(&rwlock->rwlock);
00602 }
00603 
00604 static inline int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
00605 {
00606     if (rt_rwl_rdlock(&rwlock->rwlock)) {
00607             return -EDEADLK;
00608     }
00609     return 0;
00610 }
00611 
00612 static inline int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
00613 {
00614     if (rt_rwl_rdlock_if(&rwlock->rwlock)) {
00615         return -EBUSY;
00616     }
00617     return 0;
00618 }
00619 
00620 static inline int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, struct timespec *abstime)
00621 {
00622     return rt_rwl_rdlock_until(&rwlock->rwlock, timespec2count(abstime));
00623 }
00624 
00625 static inline int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
00626 {
00627     return rt_rwl_wrlock(&rwlock->rwlock);
00628 }
00629 
00630 static inline int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
00631 {
00632     if (rt_rwl_wrlock_if(&rwlock->rwlock)) {
00633         return -EBUSY;
00634     }
00635     return 0;   
00636 }
00637 
00638 static inline int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, struct timespec *abstime)
00639 {
00640     return rt_rwl_wrlock_until(&rwlock->rwlock, timespec2count(abstime));
00641 }
00642 
00643 static inline int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
00644 {
00645     return rt_rwl_unlock(&rwlock->rwlock);
00646 }
00647 
00648 static inline int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
00649 {
00650     ((long *)attr)[0] = 0;
00651     return 0;
00652 }
00653 
00654 static inline int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
00655 {
00656     return 0;
00657 }
00658 
00659 static inline int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr, int *pshared)
00660 {
00661         *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
00662         return 0;
00663 
00664     return 0;
00665 }
00666 
00667 static inline int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
00668 {
00669         if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
00670                 if (pshared == PTHREAD_PROCESS_PRIVATE) {
00671                         ((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED;
00672                 } else {
00673                         ((long *)attr)[0] |= RTAI_MUTEX_PSHARED;
00674                 }
00675                 return 0;
00676         }
00677         return -EINVAL;
00678 }
00679 
00680 static inline int pthread_rwlockattr_getkind_np(const pthread_rwlockattr_t *attr, int *pref)
00681 {
00682     return 0;
00683 }
00684 
00685 static inline int pthread_rwlockattr_setkind_np(pthread_rwlockattr_t *attr, int pref)
00686 {
00687     return 0;
00688 }
00689 
00690 /*
00691  * SCHEDULING
00692  */
00693  
00694 static inline int get_max_priority(int policy)
00695 {
00696     return MAX_PRIO;
00697 }
00698 
00699 static inline int get_min_priority(int policy)
00700 {
00701     return MIN_PRIO;
00702 }
00703 
00704 static void posix_wrapper_fun(pthread_cookie_t *cookie)
00705 {
00706     cookie->task_fun(cookie->arg);
00707     rt_sem_broadcast(&cookie->sem);
00708     rt_sem_delete(&cookie->sem);
00709 //  rt_task_suspend(&cookie->task);
00710 } 
00711 
00712 static inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
00713 {
00714     void *cookie_mem;
00715 
00716     cookie_mem = (void *)rt_malloc(sizeof(pthread_cookie_t) + L1_CACHE_BYTES);
00717     if (cookie_mem) {
00718         pthread_cookie_t *cookie;
00719         int err;
00720         /* align memory for RT_TASK to L1_CACHE_BYTES boundary */
00721         cookie = (pthread_cookie_t *)( (((unsigned long)cookie_mem) + ((unsigned long)L1_CACHE_BYTES)) & ~(((unsigned long)L1_CACHE_BYTES) - 1UL) );
00722         cookie->cookie = cookie_mem; /* save real memory block for pthread_join to free for us */
00723         (cookie->task).magic = 0;
00724         cookie->task_fun = (void *)start_routine;
00725         cookie->arg = (long)arg;
00726         if (!(err = rt_task_init(&cookie->task, (void *)posix_wrapper_fun, (long)cookie, (attr) ? attr->stacksize : STACK_SIZE, (attr) ? attr->priority : RT_SCHED_LOWEST_PRIORITY, 1, NULL))) {
00727             rt_typed_sem_init(&cookie->sem, 0, BIN_SEM | FIFO_Q);
00728             rt_task_resume(&cookie->task);
00729             *thread = &cookie->task;
00730             return 0;
00731         } else {
00732             rt_free(cookie->cookie);
00733             return err;
00734         }
00735     }
00736     return -ENOMEM;
00737 }
00738 
00739 static inline int pthread_yield(void)
00740 {
00741     rt_task_yield();
00742     return 0;
00743 }
00744 
00745 static inline void pthread_exit(void *retval)
00746 {
00747     RT_TASK *rt_task;
00748     SEM *sem;
00749     rt_task = rt_whoami();
00750     sem = &((pthread_cookie_t *)rt_task)->sem;
00751     rt_sem_broadcast(sem);
00752     rt_sem_delete(sem);
00753     rt_task->retval = (long)retval;
00754     rt_task_suspend(rt_task);
00755 }
00756 
00757 static inline int pthread_join(pthread_t thread, void **thread_return)
00758 {
00759     int retval1, retval2;
00760     long retval_thread;
00761     SEM *sem;
00762     sem = &((pthread_cookie_t *)thread)->sem;
00763     if (rt_whoami()->priority != RT_SCHED_LINUX_PRIORITY){
00764         retval1 = rt_sem_wait(sem);
00765     } else {
00766         while ((retval1 = rt_sem_wait_if(sem)) <= 0) {
00767             msleep(10);
00768         }
00769     }
00770 //  retval1 = 0;
00771     retval_thread = ((RT_TASK *)thread)->retval;
00772     if (thread_return) {
00773         *thread_return = (void *)retval_thread;
00774     }
00775     retval2 = rt_task_delete(thread);
00776     rt_free(((pthread_cookie_t *)thread)->cookie);
00777     return (retval1) ? retval1 : retval2;
00778 }
00779 
00780 static inline int pthread_cancel(pthread_t thread)
00781 {
00782     int retval;
00783     if (!thread) {
00784         thread = rt_whoami();
00785     }
00786     retval = rt_task_delete(thread);
00787     rt_free(((pthread_cookie_t *)thread)->cookie);
00788     return retval;
00789 }
00790 
00791 static inline int pthread_equal(pthread_t thread1,pthread_t thread2)
00792 {
00793     return thread1 == thread2;
00794 }
00795 
00796 static inline pthread_t pthread_self(void)
00797 {
00798     return rt_whoami();
00799 }
00800 
00801 static inline int pthread_attr_init(pthread_attr_t *attr)
00802 {
00803     attr->stacksize     = STACK_SIZE;
00804     attr->policy        = SCHED_FIFO;
00805     attr->rr_quantum_ns = RR_QUANTUM_NS;
00806     attr->priority      = 1;
00807     return 0;
00808 }
00809 
00810 static inline int pthread_attr_destroy(pthread_attr_t *attr)
00811 {
00812     return 0;
00813 }
00814 
00815 static inline int pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
00816 {
00817     if(param->sched_priority < MIN_PRIO || param->sched_priority > MAX_PRIO) {
00818         return(-EINVAL);
00819     }
00820     attr->priority = MAX_PRIO - param->sched_priority;
00821     return 0;
00822 }
00823 
00824 static inline int pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
00825 {
00826     param->sched_priority = MAX_PRIO - attr->priority;
00827     return 0;
00828 }
00829 
00830 static inline int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
00831 {
00832     if(policy != SCHED_FIFO && policy != SCHED_RR) {
00833         return -EINVAL;
00834     }
00835     if ((attr->policy = policy) == SCHED_RR) {
00836         rt_set_sched_policy(rt_whoami(), SCHED_RR, attr->rr_quantum_ns);
00837     }
00838     return 0;
00839 }
00840 
00841 
00842 static inline int pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
00843 {
00844     *policy = attr->policy;
00845     return 0;
00846 }
00847 
00848 static inline int pthread_attr_setschedrr(pthread_attr_t *attr, int rr_quantum_ns)
00849 {
00850     attr->rr_quantum_ns = rr_quantum_ns;
00851     return 0;
00852 }
00853 
00854 
00855 static inline int pthread_attr_getschedrr(const pthread_attr_t *attr, int *rr_quantum_ns)
00856 {
00857     *rr_quantum_ns = attr->rr_quantum_ns;
00858     return 0;
00859 }
00860 
00861 static inline int pthread_attr_setstacksize(pthread_attr_t *attr, int stacksize)
00862 {
00863     attr->stacksize = stacksize;
00864     return 0;
00865 }
00866 
00867 static inline int pthread_attr_getstacksize(const pthread_attr_t *attr, int *stacksize)
00868 {
00869     *stacksize = attr->stacksize;
00870     return 0;
00871 }
00872 
00873 static inline int pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, int stacksize)
00874 {
00875     attr->stacksize = stacksize;
00876     return 0;
00877 }
00878 
00879 static inline int pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, int *stacksize)
00880 {
00881     *stacksize = attr->stacksize;
00882     return 0;
00883 }
00884 
00885 static inline void pthread_testcancel(void)
00886 {
00887     rt_task_delete(rt_whoami());
00888     pthread_exit(NULL);
00889 }
00890 
00891 /*
00892  * SPINLOCKS
00893  */
00894 
00895 static inline int pthread_spin_init(pthread_spinlock_t *lock, int pshared)
00896 {
00897     if (lock) {
00898         *lock = 0UL;
00899         return 0;
00900     }
00901     return -EINVAL;
00902 }
00903 
00904 static inline int pthread_spin_destroy(pthread_spinlock_t *lock)
00905 {
00906     if (lock) {
00907         if (*lock) {
00908             return -EBUSY;
00909         }
00910         *lock = 0UL;
00911         return 0;
00912     }
00913     return -EINVAL;
00914 }
00915 
00916 static inline int pthread_spin_lock(pthread_spinlock_t *lock)
00917 {
00918     if (lock) {
00919         unsigned long tid;
00920         if (((unsigned long *)lock)[0] == (tid = (unsigned long)(pthread_self()))) {
00921             return -EDEADLOCK;
00922         }
00923         while (atomic_cmpxchg((atomic_t *)lock, 0, tid));
00924         return 0;
00925     }
00926     return -EINVAL;
00927 }
00928 
00929 static inline int pthread_spin_trylock(pthread_spinlock_t *lock)
00930 {
00931     if (lock) {
00932         unsigned long tid;
00933         tid = (unsigned long)(pthread_self());
00934         return atomic_cmpxchg((atomic_t *)lock, 0, tid) ? -EBUSY : 0;
00935     }
00936     return -EINVAL;
00937 }
00938 
00939 static inline int pthread_spin_unlock(pthread_spinlock_t *lock)
00940 {
00941     if (lock) {
00942 #if 0
00943         *lock = 0UL;
00944         return 0;
00945 #else
00946         if (*lock != (unsigned long)pthread_self()) {
00947             return -EPERM;
00948         }
00949         *lock = 0UL;
00950         return 0;
00951 #endif
00952     }
00953     return -EINVAL;
00954 }
00955 
00956 static inline int clock_getres(int clockid, struct timespec *res)
00957 {
00958     res->tv_sec = 0;
00959     if (!(res->tv_nsec = count2nano(1))) {
00960         res->tv_nsec = 1;
00961     }
00962     return 0;
00963 }
00964 
00965 static inline int clock_gettime(int clockid, struct timespec *tp)
00966 {
00967     count2timespec(rt_get_time(), tp);
00968     return 0;
00969 }
00970 
00971 static inline int clock_settime(int clockid, const struct timespec *tp)
00972 {
00973     return 0;
00974 }
00975 
00976 static inline int clock_nanosleep(int clockid, int flags, const struct timespec *rqtp, struct timespec *rmtp)
00977 {
00978     RTIME expire;
00979     if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) {
00980         return -EINVAL;
00981     }
00982     rt_sleep_until(expire = flags ? timespec2count(rqtp) : rt_get_time() + timespec2count(rqtp));
00983     if ((expire -= rt_get_time()) > 0) {
00984         if (rmtp) {
00985             count2timespec(expire, rmtp);
00986         }
00987         return -EINTR;
00988     }
00989         return 0;
00990 }
00991 
00992 static inline int nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
00993 {
00994         RTIME expire;
00995         if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec <
00996 0) {
00997                 return -EINVAL;
00998         }
00999         rt_sleep_until(expire = rt_get_time() + timespec2count(rqtp));
01000         if ((expire -= rt_get_time()) > 0) {
01001                 if (rmtp) {
01002                         count2timespec(expire, rmtp);
01003                 }
01004                 return -EINTR;
01005         }
01006         return 0;
01007 }
01008 
01009 /*
01010  * TIMERS
01011  */
01012  
01013 struct rt_handler_support {
01014     void (*_function)(sigval_t); 
01015     sigval_t funarg;
01016 };
01017 
01018 #ifndef RTAI_POSIX_HANDLER_WRPR
01019 #define RTAI_POSIX_HANDLER_WRPR
01020 
01021 static void handler_wrpr(unsigned long sup_data)
01022 {
01023     ((struct rt_handler_support *)sup_data)->_function(((struct rt_handler_support *)sup_data)->funarg);
01024 }
01025 
01026 #endif
01027 
01028 static inline int timer_create(clockid_t clockid, struct sigevent *evp, timer_t *timerid)
01029 {
01030     struct rt_tasklet_struct *timer;
01031     struct rt_handler_support *handler_data;            
01032         
01033     if (clockid != CLOCK_MONOTONIC && clockid != CLOCK_REALTIME) {
01034         return -EINTR; 
01035     }   
01036     if (evp == NULL) {
01037         return -EINTR; 
01038     } else {
01039         if (evp->sigev_notify == SIGEV_SIGNAL) {
01040             return -EINTR; 
01041         } else if (evp->sigev_notify == SIGEV_THREAD) {
01042             timer = rt_malloc(sizeof(struct rt_tasklet_struct));
01043             handler_data = rt_malloc(sizeof(struct rt_handler_support));
01044             handler_data->funarg = evp->sigev_value;
01045             handler_data->_function = evp->_sigev_un._sigev_thread._function;
01046             *timerid = rt_ptimer_create(timer, handler_wrpr, (unsigned long)handler_data, 1, 0);
01047         } else {
01048             return -EINTR; 
01049         }
01050     }
01051         
01052     return 0;
01053 }
01054 
01055 static inline int timer_getoverrun(timer_t timerid)
01056 {
01057     return rt_ptimer_overrun(timerid);
01058 }
01059 
01060 static inline int timer_gettime(timer_t timerid, struct itimerspec *value)
01061 {
01062     RTIME timer_times[2];
01063     
01064     rt_ptimer_gettime(timerid, timer_times);
01065     count2timespec( timer_times[0], &(value->it_value) );
01066     count2timespec( timer_times[1], &(value->it_interval) );
01067     
01068     return 0;
01069 }
01070 
01071 static inline int timer_settime(timer_t timerid, int flags, const struct itimerspec *value,  struct itimerspec *ovalue)
01072 {
01073     if (ovalue != NULL) {
01074         timer_gettime(timerid, ovalue);
01075     }   
01076     rt_ptimer_settime(timerid, value, 0, flags);
01077 
01078     return 0;
01079 }
01080 
01081 static inline int timer_delete(timer_t timerid)
01082 {
01083     rt_ptimer_delete(timerid, 0);
01084     return 0;   
01085 }
01086 
01087 #ifdef __cplusplus
01088 }
01089 #endif /* __cplusplus */
01090 
01091 #else  /* !__KERNEL__ */
01092 
01093 #include <errno.h>
01094 #include <fcntl.h>
01095 #include <unistd.h>
01096 #include <signal.h>
01097 #include <sys/types.h>
01098 #include <sys/stat.h>
01099 #include <sys/mman.h>
01100 #include <stdlib.h>
01101 #include <ctype.h>
01102 
01103 #include <semaphore.h>
01104 #include <limits.h>
01105 #include <pthread.h>
01106 
01107 struct task_struct;
01108 
01109 #undef  SEM_VALUE_MAX 
01110 #define SEM_VALUE_MAX  (SEM_TIMOUT - 1)
01111 #define SEM_BINARY     (0x7FFFFFFF)
01112 
01113 #define RTAI_PNAME_MAXSZ  6
01114 #define SET_ADR(s)     (((void **)s)[0])
01115 #define SET_VAL(s)     (((void **)s)[1])
01116 #define INC_VAL(s)     atomic_inc((atomic_t *)&(((void **)s)[1]))
01117 #define DEC_VAL(s)     atomic_dec_and_test((atomic_t *)&(((void **)s)[1]))
01118 #define TST_VAL(s)     (((void **)s)[1])
01119 
01120 #define LINUX_SIGNAL  32
01121 #define LINUX_RT_SIGNAL  32
01122 
01123 #include <asm/rtai_atomic.h>
01124 #include <rtai_sem.h>
01125 #include <rtai_signal.h>
01126 #include <rtai_tasklets.h>
01127 
01128 #ifdef __cplusplus
01129 extern "C" {
01130 #endif /* __cplusplus */
01131 
01132 /*
01133  * SUPPORT STUFF
01134  */
01135 
01136 static inline int MAKE_SOFT(void)
01137 {
01138     if (rt_is_hard_real_time(rt_buddy())) {
01139         rt_make_soft_real_time();
01140         return 1;
01141     }
01142     return 0;
01143 }
01144 
01145 #define MAKE_HARD(hs)  do { if (hs) rt_make_hard_real_time(); } while (0)
01146 
01147 RTAI_PROTO(void, count2timespec, (RTIME rt, struct timespec *t))
01148 {
01149     t->tv_sec = (rt = count2nano(rt))/1000000000;
01150     t->tv_nsec = rt - t->tv_sec*1000000000LL;
01151 }
01152 
01153 RTAI_PROTO(void, nanos2timespec, (RTIME rt, struct timespec *t))
01154 {
01155     t->tv_sec = rt/1000000000;
01156     t->tv_nsec = rt - t->tv_sec*1000000000LL;
01157 }
01158 
01159 RTAI_PROTO(RTIME, timespec2count, (const struct timespec *t))
01160 {
01161     return nano2count(t->tv_sec*1000000000LL + t->tv_nsec);
01162 }
01163 
01164 RTAI_PROTO(RTIME, timespec2nanos,(const struct timespec *t))
01165 {
01166     return t->tv_sec*1000000000LL + t->tv_nsec;
01167 }
01168 
01169 RTAI_PROTO(int, pthread_get_name_np, (void *adr, unsigned long *nameid))
01170 {
01171     return (*nameid = rt_get_name(SET_ADR(adr))) ? 0 : EINVAL;
01172 }
01173 
01174 RTAI_PROTO(int, pthread_get_adr_np, (unsigned long nameid, void *adr))
01175 {
01176     return (SET_ADR(adr) = rt_get_adr(nameid)) ? 0 : EINVAL;
01177 }
01178 
01179 /*
01180  * SEMAPHORES
01181  */
01182 
01183 #define str2upr(si, so) \
01184 do { int i; for (i = 0; i <= RTAI_PNAME_MAXSZ; i++) so[i] = toupper(si[i]); } while (0) 
01185 
01186 RTAI_PROTO(sem_t *, __wrap_sem_open, (const char *namein, int oflags, int value, int type))
01187 {
01188     char name[RTAI_PNAME_MAXSZ + 1];
01189     if (strlen(namein) > RTAI_PNAME_MAXSZ) {
01190         errno = ENAMETOOLONG;
01191         return SEM_FAILED;
01192     }
01193     str2upr(namein, name);
01194     if (!oflags || value <= SEM_VALUE_MAX) {
01195         void *tsem;
01196         unsigned long handle = 0UL;
01197         struct { unsigned long name; long value, type; unsigned long *handle; } arg = { nam2num(name), value, type, &handle };
01198         if ((tsem = rtai_lxrt(BIDX, SIZARG, NAMED_SEM_INIT, &arg).v[LOW])) {
01199             int fd;
01200             void *psem;
01201             if (handle == (unsigned long)tsem) {
01202                 if (oflags == (O_CREAT | O_EXCL)) {
01203                     errno = EEXIST;
01204                     return SEM_FAILED;
01205                 }
01206                 while ((fd = open(name, O_RDONLY)) <= 0 || read(fd, &psem, sizeof(psem)) != sizeof(psem));
01207                 close(fd);
01208             } else {
01209                 int wrtn;
01210                 rtai_lxrt(BIDX, SIZARG, NAMED_SEM_INIT, &arg);
01211                 psem = malloc(sizeof(void *));
01212                 ((void **)psem)[0] = tsem;
01213                 fd = open(name, O_CREAT | O_WRONLY, S_IRWXU | S_IRWXG | S_IRWXO);
01214                 wrtn = write(fd, &psem, sizeof(psem));
01215                 close(fd);
01216             }
01217             return (sem_t *)psem;
01218         }
01219         errno = ENOSPC;
01220         return SEM_FAILED;
01221     }
01222     errno = EINVAL;
01223     return SEM_FAILED;
01224 }
01225 
01226 RTAI_PROTO(int, __wrap_sem_close, (sem_t *sem))
01227 {
01228     struct { void *sem; } arg = { SET_ADR(sem) };
01229     if (arg.sem) {
01230         char name[RTAI_PNAME_MAXSZ + 1];
01231         num2nam(rt_get_name(SET_ADR(sem)), name);
01232         if (rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW] < 0) {
01233             errno = EBUSY;
01234             return -1;
01235         }
01236         if (!rtai_lxrt(BIDX, SIZARG, NAMED_SEM_DELETE, &arg).i[LOW]) {
01237             while (!unlink(name));
01238             free(sem);
01239         }
01240         return 0;
01241     }
01242     errno =  EINVAL;
01243     return -1;
01244 }
01245 
01246 RTAI_PROTO(int, __wrap_sem_unlink, (const char *namein))
01247 {
01248     char name[RTAI_PNAME_MAXSZ + 1];
01249     int fd;
01250     void *psem;
01251     if (strlen(namein) > RTAI_PNAME_MAXSZ) {
01252         errno = ENAMETOOLONG;
01253         return -1;
01254     }
01255     str2upr(namein, name);
01256     if ((fd = open(name, O_RDONLY)) > 0 && read(fd, &psem, sizeof(psem)) == sizeof(psem)) {
01257         return __wrap_sem_close((sem_t *)psem);
01258     }
01259     errno = ENOENT;
01260     return -1;
01261 }
01262 
01263 RTAI_PROTO(int, __wrap_sem_init, (sem_t *sem, int pshared, unsigned int value))
01264 {
01265     if (value <= SEM_VALUE_MAX) {
01266         struct { unsigned long name; long value, type; unsigned long *handle; } arg = { rt_get_name(0), value, CNT_SEM | PRIO_Q, NULL };
01267         if (!(SET_ADR(sem) = rtai_lxrt(BIDX, SIZARG, NAMED_SEM_INIT, &arg).v[LOW])) {
01268             errno = ENOSPC;
01269             return -1;
01270         }
01271         return 0;
01272     }
01273     errno = EINVAL;
01274     return -1;
01275 }
01276 
01277 RTAI_PROTO(int, __wrap_sem_destroy, (sem_t *sem))
01278 {
01279     struct { void *sem; } arg = { SET_ADR(sem) };
01280     if (arg.sem) {
01281         if (rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW] < 0) {
01282             errno = EBUSY;
01283             return -1;
01284         }
01285         SET_ADR(sem) = NULL;
01286         while (rtai_lxrt(BIDX, SIZARG, NAMED_SEM_DELETE, &arg).i[LOW]);
01287         return 0;
01288     }
01289     errno =  EINVAL;
01290     return -1;
01291 }
01292 
01293 RTAI_PROTO(int, __wrap_sem_wait, (sem_t *sem))
01294 {
01295     int oldtype, retval = -1;
01296     struct { void *sem; } arg = { SET_ADR(sem) };
01297     pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
01298     pthread_testcancel();
01299     if (arg.sem) {
01300         if (abs(rtai_lxrt(BIDX, SIZARG, SEM_WAIT, &arg).i[LOW]) >= RTE_BASE) {
01301             errno =  EINTR;
01302         } else {
01303             retval = 0;
01304         }
01305     } else {
01306         errno =  EINVAL;
01307     }
01308     pthread_testcancel();
01309     pthread_setcanceltype(oldtype, NULL);
01310     return retval;
01311 }
01312 
01313 RTAI_PROTO(int, __wrap_sem_trywait, (sem_t *sem))
01314 {
01315     struct { void *sem; } arg = { SET_ADR(sem) };
01316     if (arg.sem) {
01317         int retval;
01318         if (abs(retval = rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW]) >= RTE_BASE) {
01319             errno =  EINTR;
01320             return -1;
01321         }
01322         if (retval <= 0) {
01323             errno = EAGAIN;
01324             return -1;
01325         }
01326         return 0;
01327     }
01328     errno = EINVAL;
01329     return -1;
01330 }
01331 
01332 RTAI_PROTO(int, __wrap_sem_timedwait, (sem_t *sem, const struct timespec *abstime))
01333 {
01334     int oldtype, retval = -1;
01335     struct { void *sem; RTIME time; } arg = { SET_ADR(sem), timespec2count(abstime) };
01336     pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
01337     pthread_testcancel();
01338     if (arg.sem) {
01339         int ret;
01340         if (abs(ret = rtai_lxrt(BIDX, SIZARG, SEM_WAIT_UNTIL, &arg).i[LOW]) == RTE_TIMOUT) {
01341             errno =  ETIMEDOUT;
01342         } else if (ret >= RTE_BASE) {
01343             errno = EINTR;
01344         } else {
01345             retval = 0;
01346         }
01347     } else {
01348         errno =  EINVAL;
01349     }
01350     pthread_testcancel();
01351     pthread_setcanceltype(oldtype, NULL);
01352     return retval;
01353 }
01354 
01355 RTAI_PROTO(int, __wrap_sem_post, (sem_t *sem))
01356 {
01357     struct { void *sem; } arg = { SET_ADR(sem) };
01358     if (arg.sem) {
01359         rtai_lxrt(BIDX, SIZARG, SEM_SIGNAL, &arg);
01360         return 0;
01361     }
01362     errno =  EINVAL;
01363     return -1;
01364 }
01365 
01366 RTAI_PROTO(int, __wrap_sem_getvalue, (sem_t *sem, int *sval))
01367 {
01368     struct { void *sem; } arg = { SET_ADR(sem) };
01369     if (arg.sem) {
01370         *sval = rtai_lxrt(BIDX, SIZARG, SEM_COUNT, &arg).i[LOW];
01371         return 0;
01372     }
01373     errno =  EINVAL;
01374     return -1;
01375 }
01376 
01377 /*
01378  * MUTEXES
01379  */
01380 
01381 #define RTAI_MUTEX_DEFAULT    (1 << 0)
01382 #define RTAI_MUTEX_ERRCHECK   (1 << 1)
01383 #define RTAI_MUTEX_RECURSIVE  (1 << 2)
01384 #define RTAI_MUTEX_PSHARED    (1 << 3)
01385 
01386 RTAI_PROTO(int, __wrap_pthread_mutex_init, (pthread_mutex_t *mutex, const pthread_mutexattr_t *mutexattr))
01387 {
01388     struct { unsigned long name; long value, type; unsigned long *handle; } arg = { rt_get_name(0), !mutexattr || (((long *)mutexattr)[0] & RTAI_MUTEX_DEFAULT) ? RESEM_BINSEM : (((long *)mutexattr)[0] & RTAI_MUTEX_ERRCHECK) ? RESEM_CHEKWT : RESEM_RECURS, RES_SEM, NULL };
01389     SET_VAL(mutex) = 0;
01390     if (!(SET_ADR(mutex) = rtai_lxrt(BIDX, SIZARG, NAMED_SEM_INIT, &arg).v[LOW])) {
01391         return ENOMEM;
01392     }
01393     return 0;
01394 }
01395 
01396 RTAI_PROTO(int, __wrap_pthread_mutex_destroy, (pthread_mutex_t *mutex))
01397 {
01398     struct { void *mutex; } arg = { SET_ADR(mutex) };
01399     if (arg.mutex) {
01400         int count;
01401         if (TST_VAL(mutex)) {
01402             return EBUSY;
01403         }
01404         if ((count = rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW]) <= 0 || count > 1) {
01405             if (count > 1 && count != RTE_DEADLOK) {
01406                 rtai_lxrt(BIDX, SIZARG, SEM_SIGNAL, &arg);
01407             }
01408             return EBUSY;
01409         }
01410         SET_ADR(mutex) = NULL;
01411         while (rtai_lxrt(BIDX, SIZARG, NAMED_SEM_DELETE, &arg).i[LOW]);
01412         return 0;
01413     }
01414     return EINVAL;
01415 }
01416 
01417 RTAI_PROTO(int, __wrap_pthread_mutex_lock, (pthread_mutex_t *mutex))
01418 {
01419     struct { void *mutex; } arg = { SET_ADR(mutex) };
01420     if (arg.mutex) {
01421         int retval;
01422         while ((retval = rtai_lxrt(BIDX, SIZARG, SEM_WAIT, &arg).i[LOW]) == RTE_UNBLKD);
01423         return abs(retval) < RTE_BASE ? 0 : EDEADLOCK;
01424     }
01425     return EINVAL;
01426 }
01427 
01428 RTAI_PROTO(int, __wrap_pthread_mutex_trylock, (pthread_mutex_t *mutex))
01429 {
01430     struct { void *mutex; } arg = { SET_ADR(mutex) };
01431     if (arg.mutex) {
01432         if (rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW] <= 0) {
01433             return EBUSY;
01434         }
01435         return 0;
01436     }
01437     return EINVAL;
01438 }
01439 
01440 #ifdef __USE_XOPEN2K
01441 RTAI_PROTO(int, __wrap_pthread_mutex_timedlock, (pthread_mutex_t *mutex, const struct timespec *abstime))
01442 {
01443     struct { void *mutex; RTIME time; } arg = { SET_ADR(mutex), timespec2count(abstime) };
01444     if (arg.mutex && abstime->tv_nsec >= 0 && abstime->tv_nsec < 1000000000) {
01445         int retval;
01446         while ((retval = rtai_lxrt(BIDX, SIZARG, SEM_WAIT_UNTIL, &arg).i[LOW]) == RTE_UNBLKD);
01447         if (abs(retval) < RTE_BASE) {
01448             return 0;
01449         }
01450         if (retval == RTE_TIMOUT) {
01451             return ETIMEDOUT;
01452         }
01453     }
01454     return EINVAL;
01455 }
01456 #endif
01457 
01458 RTAI_PROTO(int, __wrap_pthread_mutex_unlock, (pthread_mutex_t *mutex))
01459 {
01460     struct { void *mutex; } arg = { SET_ADR(mutex) };
01461     if (arg.mutex) {
01462         return rtai_lxrt(BIDX, SIZARG, SEM_SIGNAL, &arg).i[LOW] == RTE_PERM ? EPERM : 0;
01463     }
01464     return EINVAL;
01465 }
01466 
01467 RTAI_PROTO(int, __wrap_pthread_mutexattr_init, (pthread_mutexattr_t *attr))
01468 {
01469     ((long *)attr)[0] = RTAI_MUTEX_DEFAULT;
01470     return 0;
01471 }
01472 
01473 RTAI_PROTO(int, __wrap_pthread_mutexattr_destroy, (pthread_mutexattr_t *attr))
01474 {
01475     return 0;
01476 }
01477 
01478 RTAI_PROTO(int, __wrap_pthread_mutexattr_getpshared, (const pthread_mutexattr_t *attr, int *pshared))
01479 {   
01480     *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
01481     return 0;
01482 }
01483 
01484 RTAI_PROTO(int, __wrap_pthread_mutexattr_setpshared, (pthread_mutexattr_t *attr, int pshared))
01485 {
01486     if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
01487         if (pshared == PTHREAD_PROCESS_PRIVATE) {
01488             ((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED;
01489         } else {
01490             ((long *)attr)[0] |= RTAI_MUTEX_PSHARED;
01491         }
01492         return 0;
01493     }
01494     return EINVAL;
01495 }
01496 
01497 RTAI_PROTO(int, __wrap_pthread_mutexattr_settype, (pthread_mutexattr_t *attr, int kind))
01498 {
01499     switch (kind) {
01500         case PTHREAD_MUTEX_DEFAULT:
01501             ((long *)attr)[0] = (((long *)attr)[0] & ~(RTAI_MUTEX_RECURSIVE | RTAI_MUTEX_ERRCHECK)) | RTAI_MUTEX_DEFAULT;
01502             break;
01503         case PTHREAD_MUTEX_ERRORCHECK:
01504             ((long *)attr)[0] = (((long *)attr)[0] & ~(RTAI_MUTEX_RECURSIVE | RTAI_MUTEX_DEFAULT)) | RTAI_MUTEX_ERRCHECK;
01505             break;
01506         case PTHREAD_MUTEX_RECURSIVE:
01507             ((long *)attr)[0] = (((long *)attr)[0] & ~(RTAI_MUTEX_DEFAULT | RTAI_MUTEX_ERRCHECK)) | RTAI_MUTEX_RECURSIVE;
01508             break;
01509         default:
01510             return EINVAL;
01511     }
01512     return 0;
01513 }
01514 
01515 RTAI_PROTO(int, __wrap_pthread_mutexattr_gettype, (const pthread_mutexattr_t *attr, int *kind))
01516 {
01517     switch (((long *)attr)[0] & (RTAI_MUTEX_DEFAULT | RTAI_MUTEX_ERRCHECK | RTAI_MUTEX_RECURSIVE)) {
01518         case RTAI_MUTEX_DEFAULT:
01519             *kind = PTHREAD_MUTEX_DEFAULT;
01520             break;
01521         case RTAI_MUTEX_ERRCHECK:
01522             *kind = PTHREAD_MUTEX_ERRORCHECK;
01523             break;
01524         case RTAI_MUTEX_RECURSIVE:
01525             *kind = PTHREAD_MUTEX_RECURSIVE;
01526             break;
01527     }
01528     return 0;
01529 }
01530 
01531 RTAI_PROTO(int, pthread_make_periodic_np, (pthread_t thread, struct timespec *start_delay, struct timespec *period))
01532 {
01533         struct { RT_TASK *task; RTIME start_time, period; } arg = { NULL, start_delay->tv_sec*1000000000LL + start_delay->tv_nsec, period->tv_sec*1000000000LL + period->tv_nsec };
01534     int retval;
01535         return !(retval = rtai_lxrt(BIDX, SIZARG, MAKE_PERIODIC_NS, &arg).i[LOW]) ? 0 : retval == RTE_UNBLKD ? EINTR : ETIMEDOUT;
01536 }
01537 
01538 RTAI_PROTO(int, pthread_wait_period_np, (void))
01539 {
01540         struct { unsigned long dummy; } arg;
01541         return rtai_lxrt(BIDX, SIZARG, WAIT_PERIOD, &arg).i[LOW];
01542 }
01543 
01544 /*
01545  * CONDVARS
01546  */
01547 
01548 RTAI_PROTO(int, __wrap_pthread_cond_init, (pthread_cond_t *cond, pthread_condattr_t *cond_attr))
01549 {
01550     struct { unsigned long name; long value, type; unsigned long *handle; } arg = { rt_get_name(0), 0, BIN_SEM | PRIO_Q, NULL };
01551     if (!(SET_ADR(cond) = rtai_lxrt(BIDX, SIZARG, NAMED_SEM_INIT, &arg).v[LOW])) {
01552         return ENOMEM;
01553     }
01554     return 0;
01555 }
01556 
01557 RTAI_PROTO(int, __wrap_pthread_cond_destroy, (pthread_cond_t *cond))
01558 {
01559     struct { void *cond; } arg = { SET_ADR(cond) };
01560     if (arg.cond) {
01561         if (rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW] < 0) {
01562             return EBUSY;
01563         }
01564         SET_ADR(cond) = NULL;
01565         while (rtai_lxrt(BIDX, SIZARG, NAMED_SEM_DELETE, &arg).i[LOW]);
01566     }
01567     return 0;
01568 }
01569 
01570 RTAI_PROTO(int, __wrap_pthread_cond_signal, (pthread_cond_t *cond))
01571 {
01572     struct { void *cond; } arg = { SET_ADR(cond) };
01573     if (arg.cond) {
01574         rtai_lxrt(BIDX, SIZARG, COND_SIGNAL, &arg);
01575         return 0;
01576     }
01577     return EINVAL;
01578 }
01579 
01580 RTAI_PROTO(int, __wrap_pthread_cond_broadcast, (pthread_cond_t *cond))
01581 {
01582     struct { void *cond; } arg = { SET_ADR(cond) };
01583     if (arg.cond) {
01584         rtai_lxrt(BIDX, SIZARG, SEM_BROADCAST, &arg);
01585         return 0;
01586     }
01587     return EINVAL;
01588 }
01589 
01590 static void internal_cond_cleanup(void *mutex) { DEC_VAL(mutex); }
01591 
01592 RTAI_PROTO(int, __wrap_pthread_cond_wait, (pthread_cond_t *cond, pthread_mutex_t *mutex))
01593 {
01594     int oldtype, retval;
01595     struct { void *cond; void *mutex; } arg = { SET_ADR(cond), SET_ADR(mutex) };
01596     pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
01597     pthread_testcancel();
01598     if (arg.cond && arg.mutex) {
01599         pthread_cleanup_push(internal_cond_cleanup, mutex);
01600         INC_VAL(mutex);
01601         retval = !rtai_lxrt(BIDX, SIZARG, COND_WAIT, &arg).i[LOW] ? 0 : EPERM;
01602         DEC_VAL(mutex);
01603         pthread_cleanup_pop(0);
01604     } else {
01605         retval = EINVAL;
01606     }
01607     pthread_testcancel();
01608     pthread_setcanceltype(oldtype, NULL);
01609     return retval;
01610 }
01611 
01612 RTAI_PROTO(int, __wrap_pthread_cond_timedwait, (pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime))
01613 {
01614     int oldtype, retval;
01615     struct { void *cond; void *mutex; RTIME time; } arg = { SET_ADR(cond), SET_ADR(mutex), timespec2count(abstime) };
01616     pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
01617     pthread_testcancel();
01618     if (arg.cond && arg.mutex && abstime->tv_nsec >= 0 && abstime->tv_nsec < 1000000000) {
01619         pthread_cleanup_push(internal_cond_cleanup, mutex);
01620         INC_VAL(mutex);
01621         if (abs(retval = rtai_lxrt(BIDX, SIZARG, COND_WAIT_UNTIL, &arg).i[LOW]) == RTE_TIMOUT) {
01622             retval = ETIMEDOUT;
01623         } else {
01624             retval = !retval ? 0 : EPERM;
01625         }
01626         DEC_VAL(mutex);
01627         pthread_cleanup_pop(0);
01628     } else {
01629         retval = EINVAL;
01630     }
01631     pthread_testcancel();
01632     pthread_setcanceltype(oldtype, NULL);
01633     return retval;
01634 }
01635 
01636 RTAI_PROTO(int, __wrap_pthread_condattr_init, (pthread_condattr_t *attr))
01637 {
01638     ((long *)attr)[0] = 0;
01639     return 0;
01640 }
01641 
01642 RTAI_PROTO(int, __wrap_pthread_condattr_destroy, (pthread_condattr_t *attr))
01643 {
01644     return 0;
01645 }
01646 
01647 RTAI_PROTO(int, __wrap_pthread_condattr_getpshared, (const pthread_condattr_t *attr, int *pshared))
01648 {
01649     *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
01650         return 0;
01651 }
01652 
01653 RTAI_PROTO(int, __wrap_pthread_condattr_setpshared, (pthread_condattr_t *attr, int pshared))
01654 {
01655     if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
01656         if (pshared == PTHREAD_PROCESS_PRIVATE) {
01657             ((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED;
01658         } else {
01659             ((long *)attr)[0] |= RTAI_MUTEX_PSHARED;
01660         }
01661         return 0;
01662     }
01663     return EINVAL;
01664 }
01665 
01666 #ifndef CLOCK_MONOTONIC
01667 #define CLOCK_MONOTONIC  1
01668 #endif
01669 
01670 RTAI_PROTO(int, __wrap_pthread_condattr_setclock, (pthread_condattr_t *condattr, clockid_t clockid))
01671 {
01672     if (clockid == CLOCK_MONOTONIC || clockid == CLOCK_REALTIME) {
01673         ((int *)condattr)[0] = clockid;
01674         return 0;
01675     }
01676     return EINVAL;
01677 }
01678 
01679 RTAI_PROTO(int, __wrap_pthread_condattr_getclock, (pthread_condattr_t *condattr, clockid_t *clockid))
01680 {
01681         if (clockid) {
01682         *clockid = ((int *)condattr)[0];
01683                 return 0;
01684         }
01685         return EINVAL;
01686 }
01687 
01688 /*
01689  * RWLOCKS
01690  */
01691 
01692 RTAI_PROTO(int, __wrap_pthread_rwlock_init, (pthread_rwlock_t *rwlock, pthread_rwlockattr_t *attr))
01693 {
01694     struct { unsigned long name; long type; } arg = { rt_get_name(0), RESEM_CHEKWT };
01695     ((pthread_rwlock_t **)rwlock)[0] = (pthread_rwlock_t *)rtai_lxrt(BIDX, SIZARG, LXRT_RWL_INIT, &arg).v[LOW];
01696         return 0;
01697 }
01698 
01699 RTAI_PROTO(int, __wrap_pthread_rwlock_destroy, (pthread_rwlock_t *rwlock))
01700 {
01701     struct { void *rwlock; } arg = { SET_ADR(rwlock) };
01702     if (arg.rwlock) {
01703         return rtai_lxrt(BIDX, SIZARG, LXRT_RWL_DELETE, &arg).i[LOW] > 0 ? 0 : EINVAL;
01704     }
01705     return EINVAL;
01706 }
01707 
01708 RTAI_PROTO(int, __wrap_pthread_rwlock_rdlock, (pthread_rwlock_t *rwlock))
01709 {
01710     struct { void *rwlock; } arg = { SET_ADR(rwlock) };
01711     if (arg.rwlock) {
01712         return rtai_lxrt(BIDX, SIZARG, RWL_RDLOCK, &arg).i[LOW] ? EDEADLOCK : 0;
01713     }
01714     return EINVAL;
01715 }
01716 
01717 RTAI_PROTO(int, __wrap_pthread_rwlock_tryrdlock, (pthread_rwlock_t *rwlock))
01718 {
01719     struct { void *rwlock; } arg = { SET_ADR(rwlock) };
01720     if (arg.rwlock) {
01721         return rtai_lxrt(BIDX, SIZARG, RWL_RDLOCK_IF, &arg).i[LOW] ? EBUSY : 0;
01722     }
01723     return EINVAL;
01724 }
01725 
01726 #ifdef __USE_XOPEN2K
01727 RTAI_PROTO(int, __wrap_pthread_rwlock_timedrdlock, (pthread_rwlock_t *rwlock, struct timespec *abstime))
01728 {
01729     struct { void *rwlock; RTIME time; } arg = { SET_ADR(rwlock), timespec2count(abstime) };
01730     if (arg.rwlock && abstime->tv_nsec >= 0 && abstime->tv_nsec < 1000000000) {
01731         return rtai_lxrt(BIDX, SIZARG, RWL_RDLOCK_UNTIL, &arg).i[LOW] ? ETIMEDOUT : 0;
01732     }
01733     return EINVAL;
01734 }
01735 #endif
01736 
01737 RTAI_PROTO(int, __wrap_pthread_rwlock_wrlock, (pthread_rwlock_t *rwlock))
01738 {
01739     struct { void *rwlock; } arg = { SET_ADR(rwlock) };
01740     if (arg.rwlock) {
01741         return rtai_lxrt(BIDX, SIZARG, RWL_WRLOCK, &arg).i[LOW] ? EDEADLOCK : 0;
01742     }
01743     return EINVAL;
01744 }
01745 
01746 RTAI_PROTO(int, __wrap_pthread_rwlock_trywrlock, (pthread_rwlock_t *rwlock))
01747 {
01748     struct { void *rwlock; } arg = { SET_ADR(rwlock) };
01749     if (arg.rwlock) {
01750         return rtai_lxrt(BIDX, SIZARG, RWL_WRLOCK_IF, &arg).i[LOW] ? EBUSY : 0;
01751     }
01752     return EINVAL;
01753 }
01754 
01755 #ifdef __USE_XOPEN2K
01756 RTAI_PROTO(int, __wrap_pthread_rwlock_timedwrlock, (pthread_rwlock_t *rwlock, struct timespec *abstime))
01757 {
01758     struct { void *rwlock; RTIME time; } arg = { SET_ADR(rwlock), timespec2count(abstime) };
01759     if (arg.rwlock && abstime->tv_nsec >= 0 && abstime->tv_nsec < 1000000000) {
01760         return rtai_lxrt(BIDX, SIZARG, RWL_WRLOCK_UNTIL, &arg).i[LOW] ? ETIMEDOUT : 0;
01761     }
01762     return EINVAL;
01763 }
01764 #endif
01765 
01766 RTAI_PROTO(int, __wrap_pthread_rwlock_unlock, (pthread_rwlock_t *rwlock))
01767 {
01768     struct { void *rwlock; } arg = { SET_ADR(rwlock) };
01769     if (arg.rwlock) {
01770         return !rtai_lxrt(BIDX, SIZARG, RWL_UNLOCK, &arg).i[LOW] ? 0 : EPERM;
01771     }
01772     return EINVAL;
01773 }
01774 
01775 RTAI_PROTO(int, __wrap_pthread_rwlockattr_init, (pthread_rwlockattr_t *attr))
01776 {
01777     ((long *)attr)[0] = 0;
01778     return 0;
01779 }
01780 
01781 RTAI_PROTO(int, __wrap_pthread_rwlockattr_destroy, (pthread_rwlockattr_t *attr))
01782 {
01783     return 0;
01784 }
01785 
01786 RTAI_PROTO(int, __wrap_pthread_rwlockattr_getpshared, (const pthread_rwlockattr_t *attr, int *pshared))
01787 {
01788         *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
01789         return 0;
01790 
01791     return 0;
01792 }
01793 
01794 RTAI_PROTO(int, __wrap_pthread_rwlockattr_setpshared, (pthread_rwlockattr_t *attr, int pshared))
01795 {
01796         if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
01797                 if (pshared == PTHREAD_PROCESS_PRIVATE) {
01798                         ((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED;
01799                 } else {
01800                         ((long *)attr)[0] |= RTAI_MUTEX_PSHARED;
01801                 }
01802                 return 0;
01803         }
01804         return EINVAL;
01805 }
01806 
01807 RTAI_PROTO(int, __wrap_pthread_rwlockattr_getkind_np, (const pthread_rwlockattr_t *attr, int *pref))
01808 {
01809     return 0;
01810 }
01811 
01812 RTAI_PROTO(int, __wrap_pthread_rwlockattr_setkind_np, (pthread_rwlockattr_t *attr, int pref))
01813 {
01814     return 0;
01815 }
01816 
01817 /*
01818  * BARRIERS
01819  */
01820 
01821 #ifdef __USE_XOPEN2K
01822 
01823 RTAI_PROTO(int, __wrap_pthread_barrier_init,(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count))
01824 {
01825     if (count > 0) {
01826         struct { unsigned long name; long count, type; unsigned long *handle; } arg = { rt_get_name(0), count, CNT_SEM | PRIO_Q, NULL };
01827         return (((pthread_barrier_t **)barrier)[0] = (pthread_barrier_t *)rtai_lxrt(BIDX, SIZARG, NAMED_SEM_INIT, &arg).v[LOW]) ? 0 : ENOMEM;
01828     }
01829     return EINVAL;
01830 }
01831 
01832 RTAI_PROTO(int, __wrap_pthread_barrier_destroy,(pthread_barrier_t *barrier))
01833 {
01834     struct { void *sem; } arg = { SET_ADR(barrier) };
01835     SET_ADR(barrier) = NULL;
01836     if (rtai_lxrt(BIDX, SIZARG, SEM_WAIT_IF, &arg).i[LOW] < 0) {
01837         return EBUSY;
01838     }
01839     return rtai_lxrt(BIDX, SIZARG, NAMED_SEM_DELETE, &arg).i[LOW] == RT_OBJINV ? EINVAL : 0;
01840 }
01841 
01842 RTAI_PROTO(int, __wrap_pthread_barrier_wait,(pthread_barrier_t *barrier))
01843 {
01844     struct { void *sem; } arg = { SET_ADR(barrier) };
01845     if (arg.sem) {
01846         return !rtai_lxrt(BIDX, SIZARG, SEM_WAIT_BARRIER, &arg).i[LOW] ? PTHREAD_BARRIER_SERIAL_THREAD : 0;
01847     }
01848     return EINVAL;
01849 }
01850 
01851 RTAI_PROTO(int, __wrap_pthread_barrierattr_init, (pthread_barrierattr_t *attr))
01852 {
01853     ((long *)attr)[0] = PTHREAD_PROCESS_PRIVATE;
01854     return 0;
01855 }
01856 
01857 RTAI_PROTO(int, __wrap_pthread_barrierattr_destroy, (pthread_barrierattr_t *attr))
01858 {
01859     return 0;
01860 }
01861 
01862 RTAI_PROTO(int, __wrap_pthread_barrierattr_setpshared, (pthread_barrierattr_t *attr, int pshared))
01863 {
01864     if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
01865         ((long *)attr)[0] = pshared;
01866         return 0;
01867     }
01868     return EINVAL;
01869 }
01870 
01871 RTAI_PROTO(int, __wrap_pthread_barrierattr_getpshared, (const pthread_barrierattr_t *attr, int *pshared))
01872 {
01873     *pshared = ((long *)attr)[0];
01874     return 0;
01875 }
01876 
01877 #endif
01878 
01879 /*
01880  * SCHEDULING
01881  */
01882 
01883 #define PTHREAD_SOFT_REAL_TIME_NP  1
01884 #define PTHREAD_HARD_REAL_TIME_NP  2
01885 
01886 RTAI_PROTO(int, pthread_setschedparam_np, (int priority, int policy, int rr_quantum_ns, unsigned long cpus_allowed, int mode))
01887 { 
01888     RT_TASK *task;
01889     if ((task = rt_buddy())) {
01890         int hs;
01891         if (cpus_allowed) {
01892             hs = MAKE_SOFT();
01893             rt_task_init_schmod(0, 0, 0, 0, 0, cpus_allowed);
01894             if (!mode) {
01895                 MAKE_HARD(hs);
01896             }
01897         }
01898         if (priority >= 0) {
01899             rt_change_prio(task, priority);
01900         }
01901     } else if (policy == SCHED_FIFO || policy == SCHED_RR || priority >= 0 || cpus_allowed) {
01902         rt_task_init_schmod(rt_get_name(NULL), priority, 0, 0, policy, cpus_allowed);
01903         rt_grow_and_lock_stack(PTHREAD_STACK_MIN);
01904     } else {
01905         return EINVAL;
01906     }
01907     if (policy == SCHED_FIFO || policy == SCHED_RR) {
01908         rt_set_sched_policy(task, policy = SCHED_FIFO ? 0 : 1, rr_quantum_ns);
01909     }
01910     if (mode) {
01911         if (mode == PTHREAD_HARD_REAL_TIME_NP) {
01912             rt_make_hard_real_time();
01913         } else {
01914             rt_make_soft_real_time();
01915         }
01916     }
01917     return 0;
01918 }
01919 
01920 RTAI_PROTO(void, pthread_hard_real_time_np, (void))
01921 {
01922     rt_make_hard_real_time();
01923 }
01924 
01925 RTAI_PROTO(void, pthread_soft_real_time_np, (void))
01926 {
01927     rt_make_soft_real_time();
01928 }
01929 
01930 RTAI_PROTO(int, pthread_gettid_np, (void))
01931 {
01932         struct { unsigned long dummy; } arg;
01933         return rtai_lxrt(BIDX, SIZARG, RT_GETTID, &arg).i[LOW];
01934 }
01935 
01936 #define PTHREAD_SOFT_REAL_TIME  PTHREAD_SOFT_REAL_TIME_NP
01937 #define PTHREAD_HARD_REAL_TIME  PTHREAD_HARD_REAL_TIME_NP
01938 #define pthread_init_real_time_np(a, b, c, d, e) \
01939     pthread_setschedparam_np (b, c, 0, d, e)
01940 #define pthread_make_hard_real_time_np() \
01941     pthread_hard_real_time_np()
01942 #define pthread_make_soft_real_time_np() \
01943     pthread_soft_real_time_np()
01944 
01945 #if 0
01946 #if 1
01947 int __real_pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine)(void *), void *arg);
01948 RTAI_PROTO(int, __wrap_pthread_create,(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine)(void *), void *arg))
01949 {
01950 #include <sys/poll.h>
01951 
01952     int hs, ret;
01953     hs = MAKE_SOFT();
01954     ret = __real_pthread_create(thread, attr, start_routine, arg);
01955     MAKE_HARD(hs);
01956     return ret;
01957 }
01958 #else
01959 #include <sys/mman.h>
01960 
01961 struct local_pthread_args_struct { void *(*start_routine)(void *); void *arg; int pipe[3]; };
01962 
01963 #ifndef __SUPPORT_THREAD_FUN_
01964 #define __SUPPORT_THREAD_FUN_
01965 
01966 static void *support_thread_fun(struct local_pthread_args_struct *args)
01967 {
01968         RT_TASK *task;
01969     void *(*start_routine)(void *) = args->start_routine;
01970     void *arg = args->arg;
01971     pthread_t thread;
01972     int policy;
01973     struct sched_param param;
01974     
01975     pthread_getschedparam(thread = pthread_self(), &policy, &param);
01976     if (policy == SCHED_OTHER) {
01977         policy = SCHED_RR;
01978         param.sched_priority = sched_get_priority_min(SCHED_RR);
01979     }
01980     pthread_setschedparam(pthread_self(), policy, &param);
01981     task = rt_task_init_schmod(rt_get_name(0), sched_get_priority_max(policy) - param.sched_priority, 0, 0, policy, 0xF);
01982     close(args->pipe[1]);
01983     mlockall(MCL_CURRENT | MCL_FUTURE);
01984     rt_make_hard_real_time();
01985     start_routine(arg);
01986     rt_make_soft_real_time();
01987     return NULL;
01988 }
01989 
01990 #endif /* __SUPPORT_THREAD_FUN_ */
01991 
01992 RTAI_PROTO(int, __wrap_pthread_create,(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine)(void *), void *arg))
01993 {
01994     int hs, ret;
01995     struct local_pthread_args_struct args = { start_routine, arg };
01996     hs = MAKE_SOFT();
01997     pipe(args.pipe);
01998     ret = pthread_create(thread, attr, (void *)support_thread_fun, (void *)&args);
01999     read(args.pipe[0], &args.pipe[2], 1);
02000     close(args.pipe[0]);
02001     MAKE_HARD(hs);
02002     return ret;
02003 }
02004 #endif
02005 
02006 int __real_pthread_cancel(pthread_t thread);
02007 RTAI_PROTO(int, __wrap_pthread_cancel,(pthread_t thread))
02008 {
02009     int hs, ret;
02010     hs = MAKE_SOFT();
02011     ret = __real_pthread_cancel(thread);
02012     MAKE_HARD(hs);
02013     return ret;
02014 }
02015 
02016 int __real_pthread_sigmask(int how, const sigset_t *newmask, sigset_t *oldmask);
02017 RTAI_PROTO(int, __wrap_pthread_sigmask,(int how, const sigset_t *newmask, sigset_t *oldmask))
02018 {
02019     return __real_pthread_sigmask(how, newmask, oldmask);
02020     int hs, ret;
02021     hs = MAKE_SOFT();
02022     ret = __real_pthread_sigmask(how, newmask, oldmask);
02023     MAKE_HARD(hs);
02024     return ret;
02025 }
02026 
02027 int __real_pthread_kill(pthread_t thread, int signo);
02028 RTAI_PROTO(int, __wrap_pthread_kill,(pthread_t thread, int signo))
02029 {
02030     int hs, ret;
02031     hs = MAKE_SOFT();
02032     ret = __real_pthread_kill(thread, signo);
02033     MAKE_HARD(hs);
02034     return ret;
02035 }
02036 
02037 
02038 int __real_sigwait(const sigset_t *set, int *sig);
02039 RTAI_PROTO(int, __wrap_sigwait,(const sigset_t *set, int *sig))
02040 {
02041     int hs, ret;
02042     hs = MAKE_SOFT();
02043     ret = __real_sigwait(set, sig);
02044     MAKE_HARD(hs);
02045     return ret;
02046 }
02047 
02048 void __real_pthread_testcancel(void);
02049 RTAI_PROTO(void, __wrap_pthread_testcancel,(void))
02050 {
02051     __real_pthread_testcancel();
02052     return;
02053     int oldtype, oldstate;
02054     pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
02055     pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype);
02056     if (oldstate != PTHREAD_CANCEL_DISABLE && oldtype != PTHREAD_CANCEL_DEFERRED) {
02057         MAKE_SOFT();
02058         rt_task_delete(rt_buddy());
02059         pthread_exit(NULL);
02060     }
02061     pthread_setcanceltype(oldtype, &oldtype);
02062     pthread_setcancelstate(oldstate, &oldstate);
02063 }
02064 
02065 int __real_pthread_yield(void);
02066 RTAI_PROTO(int, __wrap_pthread_yield,(void))
02067 {
02068     if (rt_is_hard_real_time(rt_buddy())) {
02069         struct { unsigned long dummy; } arg;
02070         rtai_lxrt(BIDX, SIZARG, YIELD, &arg);
02071         return 0;
02072     }
02073     return __real_pthread_yield();
02074 }
02075 
02076 void __real_pthread_exit(void *retval);
02077 RTAI_PROTO(void, __wrap_pthread_exit,(void *retval))
02078 {
02079     MAKE_SOFT();
02080     rt_task_delete(NULL);
02081     __real_pthread_exit(retval);
02082 }
02083 
02084 int __real_pthread_join(pthread_t thread, void **thread_return);
02085 RTAI_PROTO(int, __wrap_pthread_join,(pthread_t thread, void **thread_return))
02086 {
02087     int hs, ret;
02088     hs = MAKE_SOFT();
02089     ret = __real_pthread_join(thread, thread_return);
02090     MAKE_HARD(hs);
02091     return ret;
02092 }
02093 #endif
02094 
02095 /*
02096  * SPINLOCKS
02097  */
02098 
02099 #ifdef __USE_XOPEN2K
02100 
02101 #if 0
02102 #define ORIGINAL_TEST
02103 RTAI_PROTO(int, __wrap_pthread_spin_init, (pthread_spinlock_t *lock, int pshared))
02104 {
02105     return lock ? (((pid_t *)lock)[0] = 0) : EINVAL;
02106 }
02107 
02108 RTAI_PROTO(int, __wrap_pthread_spin_destroy, (pthread_spinlock_t *lock))
02109 {
02110     if (lock) {
02111         return ((pid_t *)lock)[0] ? EBUSY : (((pid_t *)lock)[0] = 0);
02112     }
02113     return EINVAL;
02114 }
02115 
02116 RTAI_PROTO(int, __wrap_pthread_spin_lock,(pthread_spinlock_t *lock))
02117 {
02118     if (lock) {
02119         while (atomic_cmpxchg(lock, 0, 1));
02120         return 0;
02121     }
02122     return EINVAL;
02123 }
02124 
02125 RTAI_PROTO(int, __wrap_pthread_spin_trylock,(pthread_spinlock_t *lock))
02126 {
02127     if (lock) {
02128         return atomic_cmpxchg(lock, 0, 1) ? EBUSY : 0;
02129     }
02130     return EINVAL;
02131 }
02132 
02133 RTAI_PROTO(int, __wrap_pthread_spin_unlock,(pthread_spinlock_t *lock))
02134 {
02135     if (lock) {
02136         return ((pid_t *)lock)[0] = 0;
02137     }
02138     return EINVAL;
02139 }
02140 #else
02141 static inline int _pthread_gettid_np(void)
02142 {
02143         struct { unsigned long dummy; } arg;
02144         return rtai_lxrt(BIDX, SIZARG, RT_GETTID, &arg).i[LOW];
02145 }
02146 
02147 RTAI_PROTO(int, __wrap_pthread_spin_init, (pthread_spinlock_t *lock, int pshared))
02148 {
02149     return lock ? (((pid_t *)lock)[0] = 0) : EINVAL;
02150 }
02151 
02152 RTAI_PROTO(int, __wrap_pthread_spin_destroy, (pthread_spinlock_t *lock))
02153 {
02154     if (lock) {
02155         return ((pid_t *)lock)[0] ? EBUSY : (((pid_t *)lock)[0] = 0);
02156     }
02157     return EINVAL;
02158 }
02159 
02160 RTAI_PROTO(int, __wrap_pthread_spin_lock,(pthread_spinlock_t *lock))
02161 {
02162     if (lock) {
02163         pid_t tid;
02164         if (((pid_t *)lock)[0] == (tid = _pthread_gettid_np())) {
02165             return EDEADLOCK;
02166         }
02167         while (atomic_cmpxchg((void *)lock, 0, tid));
02168         return 0;
02169     }
02170     return EINVAL;
02171 }
02172 
02173 RTAI_PROTO(int, __wrap_pthread_spin_trylock,(pthread_spinlock_t *lock))
02174 {
02175     if (lock) {
02176         return atomic_cmpxchg((void *)lock, 0, _pthread_gettid_np()) ? EBUSY : 0;
02177     }
02178     return EINVAL;
02179 }
02180 
02181 RTAI_PROTO(int, __wrap_pthread_spin_unlock,(pthread_spinlock_t *lock))
02182 {
02183     if (lock) {
02184 #if 0
02185         return ((pid_t *)lock)[0] = 0;
02186 #else
02187         return ((pid_t *)lock)[0] != _pthread_gettid_np() ? EPERM : (((pid_t *)lock)[0] = 0);
02188 #endif
02189     }
02190     return EINVAL;
02191 }
02192 #endif
02193 
02194 #endif
02195 
02196 /*
02197  * TIMINGS
02198  */
02199 
02200 RTAI_PROTO(int, __wrap_clock_getres, (clockid_t clockid, struct timespec *res))
02201 {
02202     if (clockid == CLOCK_MONOTONIC || clockid == CLOCK_REALTIME) {
02203         res->tv_sec = 0;
02204         if (!(res->tv_nsec = count2nano(1))) {
02205             res->tv_nsec = 1;
02206         }
02207         return 0;
02208     }
02209     errno = -EINVAL;
02210     return -1;
02211 }
02212 
02213 RTAI_PROTO(int, __wrap_clock_gettime, (clockid_t clockid, struct timespec *tp))
02214 {
02215     if (clockid == CLOCK_MONOTONIC) {
02216         count2timespec(rt_get_time(), tp);
02217         return 0;
02218     } else if (clockid == CLOCK_REALTIME) {
02219         count2timespec(rt_get_real_time(), tp);
02220         return 0;
02221     }
02222     errno = -EINVAL;
02223     return -1;
02224 }
02225 
02226 RTAI_PROTO(int, __wrap_clock_settime, (clockid_t clockid, const struct timespec *tp))
02227 {
02228     if (clockid == CLOCK_REALTIME) {
02229         int hs;
02230         hs = MAKE_SOFT();
02231         rt_gettimeorig(NULL);
02232         MAKE_HARD(hs);
02233         return 0;
02234     }
02235     errno = -ENOTSUP;
02236     return -1;
02237 }
02238 
02239 RTAI_PROTO(int, __wrap_clock_nanosleep,(clockid_t clockid, int flags, const struct timespec *rqtp, struct timespec *rmtp))
02240 {
02241     int canc_type;
02242     RTIME expire;
02243 
02244     if (clockid != CLOCK_MONOTONIC && clockid != CLOCK_REALTIME) {
02245         return -ENOTSUP;
02246         }
02247 
02248     if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) {
02249         return -EINVAL;
02250     }
02251 
02252     pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &canc_type);
02253 
02254     expire = timespec2count(rqtp);
02255     if (clockid == CLOCK_MONOTONIC) {
02256         if (flags != TIMER_ABSTIME) {
02257             expire += rt_get_time();
02258         }
02259         rt_sleep_until(expire);
02260             expire -= rt_get_time();
02261     } else {
02262         if (flags != TIMER_ABSTIME) {
02263             expire += rt_get_real_time();
02264         }
02265         rt_sleep_until(expire);
02266         expire -= rt_get_real_time();
02267     }
02268     if (expire > 0) {
02269         if (rmtp) {
02270             count2timespec(expire, rmtp);
02271         }
02272         return  -EINTR;
02273     }
02274     
02275     pthread_setcanceltype(canc_type, NULL);
02276 
02277     return 0;
02278 }
02279 
02280 RTAI_PROTO(int, __wrap_nanosleep,(const struct timespec *rqtp, struct timespec *rmtp))
02281 {
02282         int canc_type;
02283     RTIME expire;
02284     if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) {
02285         return -EINVAL;
02286     }
02287 
02288     pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &canc_type);
02289 
02290     rt_sleep_until(expire = rt_get_time() + timespec2count(rqtp));
02291     if ((expire -= rt_get_time()) > 0) {
02292         if (rmtp) {
02293             count2timespec(expire, rmtp);
02294         }
02295         return -EINTR;
02296     }
02297 
02298     pthread_setcanceltype(canc_type, NULL);
02299 
02300         return 0;
02301 }
02302 
02303 /*
02304  * TIMER
02305  */
02306  
02307 static int support_posix_timer(void *data)
02308 {
02309     RT_TASK *task;
02310     struct rt_tasklet_struct usptasklet;
02311     struct data_stru { struct rt_tasklet_struct *tasklet; long signum; } data_struct;
02312     
02313     data_struct = *(struct data_stru *)data;
02314 
02315     if (!(task = rt_thread_init((unsigned long)data_struct.tasklet, 98, 0, SCHED_FIFO, 0xF))) {
02316         printf("CANNOT INIT POSIX TIMER SUPPORT TASKLET\n");
02317         return -1;
02318     } else {
02319         struct { struct rt_tasklet_struct *tasklet, *usptasklet; RT_TASK *task; } reg = { data_struct.tasklet, &usptasklet, task };
02320         rtai_lxrt(TASKLETS_IDX, sizeof(reg), REG_TASK, &reg);
02321     }
02322 
02323     mlockall(MCL_CURRENT | MCL_FUTURE);
02324     rt_make_hard_real_time();
02325     
02326     if (data_struct.signum) {
02327         while (1) {
02328             rt_task_suspend(task);
02329             if (usptasklet.handler) {
02330                 pthread_kill((pthread_t)usptasklet.data, data_struct.signum);
02331             } else {
02332                 break;
02333             }
02334         }
02335     } else {    
02336         while (1) { 
02337             rt_task_suspend(task);
02338             if (usptasklet.handler) {
02339                 usptasklet.handler(usptasklet.data);
02340             } else {
02341                 break;
02342             }
02343         }
02344     }
02345     
02346     rtai_sti();
02347     rt_make_soft_real_time();
02348     rt_task_delete(task);
02349 
02350     return 0;
02351 }
02352 
02353 RTAI_PROTO (int, __wrap_timer_create, (clockid_t clockid, struct sigevent *evp, timer_t *timerid))
02354 {
02355     void (*handler)(unsigned long) = ((void (*)(unsigned long))1);
02356     int pid = -1;
02357     unsigned long data = 0;
02358     struct { struct rt_tasklet_struct *tasklet; long signum; } data_supfun;
02359     
02360     if (clockid != CLOCK_MONOTONIC && clockid != CLOCK_REALTIME) {
02361         errno = ENOTSUP;
02362         return -1;
02363     }
02364         
02365     if (evp == NULL) {
02366             data_supfun.signum = SIGALRM;
02367     } else {
02368         if (evp->sigev_notify == SIGEV_SIGNAL) {
02369             data_supfun.signum = evp->sigev_signo;
02370             data = (unsigned long)evp->sigev_value.sival_ptr;
02371         } else if (evp->sigev_notify == SIGEV_THREAD) {
02372             data_supfun.signum = 0;
02373             data = (unsigned long)evp->sigev_value.sival_int;
02374             handler = (void (*)(unsigned long)) evp->_sigev_un._sigev_thread._function;
02375             pid = 1;
02376         }
02377     }
02378 
02379     struct { struct rt_tasklet_struct *timer; void (*handler)(unsigned long); unsigned long data; long pid; long thread; } arg = { NULL, handler, data, pid, 0 };
02380     arg.timer = (struct rt_tasklet_struct*)rtai_lxrt(TASKLETS_IDX, SIZARG, INIT, &arg).v[LOW];
02381     data_supfun.tasklet = arg.timer; 
02382     arg.thread = rt_thread_create((void *)support_posix_timer, &data_supfun, TASKLET_STACK_SIZE);
02383     *timerid = (timer_t)rtai_lxrt(TASKLETS_IDX, SIZARG, PTIMER_CREATE, &arg).i[LOW];
02384     
02385     return 0;
02386 }
02387 
02388 RTAI_PROTO (int, __wrap_timer_gettime, (timer_t timerid, struct itimerspec *value))
02389 {
02390     RTIME timer_times[2];
02391     
02392     struct { timer_t timer; RTIME *timer_times; } arg = { timerid, timer_times };
02393     rtai_lxrt(TASKLETS_IDX, SIZARG, PTIMER_GETTIME, &arg);
02394     
02395     count2timespec( timer_times[0], &(value->it_value) );
02396     count2timespec( timer_times[1], &(value->it_interval) );
02397     
02398     return 0;
02399 }
02400 
02401 RTAI_PROTO (int, __wrap_timer_settime, (timer_t timerid, int flags, const struct itimerspec *value,  struct itimerspec *ovalue))
02402 {
02403     if (ovalue != NULL) {
02404         __wrap_timer_gettime(timerid, ovalue);
02405     }
02406     struct { timer_t timer; const struct itimerspec *value; unsigned long data; long flags; } arg = { timerid, value, pthread_self(), flags};
02407     rtai_lxrt(TASKLETS_IDX, SIZARG, PTIMER_SETTIME, &arg);
02408     
02409     return 0;
02410 }
02411 
02412 RTAI_PROTO (int, __wrap_timer_getoverrun, (timer_t timerid))
02413 {
02414     struct { timer_t timer; } arg = { timerid };
02415     return rtai_lxrt(TASKLETS_IDX, SIZARG, PTIMER_OVERRUN, &arg).rt;
02416 }
02417 
02418 RTAI_PROTO (int, __wrap_timer_delete, (timer_t timerid))
02419 {
02420     int thread;
02421     
02422     struct { timer_t timer; long space;} arg_del = { timerid, 1 };
02423     if ((thread = rtai_lxrt(TASKLETS_IDX, sizeof(arg_del), PTIMER_DELETE, &arg_del).i[LOW])) {
02424         rt_thread_join(thread);
02425     }
02426     
02427     return 0;   
02428 }
02429 
02430 
02431 /*
02432  * FUNCTIONS (LIKELY) SAFELY USABLE IN HARD REAL TIME "AS THEY ARE", 
02433  * BECAUSE MAKE SENSE IN THE INITIALIZATION PHASE ONLY, I.E. BEFORE 
02434  * GOING HARD REAL TIME
02435  */
02436 
02437 #define pthread_self_rt                  pthread_self
02438 #define pthread_equal_rt                 pthread_equal
02439 #define pthread_attr_init_rt             pthread_attr_init      
02440 #define pthread_attr_destroy_rt          pthread_attr_destroy
02441 #define pthread_attr_getdetachstate_rt   pthread_attr_getdetachstate
02442 #define pthread_attr_setschedpolicy_rt   pthread_attr_setschedpolicy
02443 #define pthread_attr_getschedpolicy_rt   pthread_attr_getschedpolicy 
02444 #define pthread_attr_setschedparam_rt    pthread_attr_setschedparam
02445 #define pthread_attr_getschedparam_rt    pthread_attr_getschedparam
02446 #define pthread_attr_setinheritsched_rt  pthread_attr_setinheritsched
02447 #define pthread_attr_getinheritsched_rt  pthread_attr_getinheritsched
02448 #define pthread_attr_setscope_rt         pthread_attr_setscope
02449 #define pthread_attr_getscope_rt         pthread_attr_getscope
02450 #ifdef __USE_UNIX98
02451 #define pthread_attr_setguardsize_rt     pthread_attr_setguardsize
02452 #define pthread_attr_getguardsize_rt     pthread_attr_getguardsize
02453 #endif
02454 #define pthread_attr_setstackaddr_rt     pthread_attr_setstackaddr
02455 #define pthread_attr_getstackaddr_rt     pthread_attr_getstackaddr
02456 #ifdef __USE_XOPEN2K
02457 #define pthread_attr_setstack_rt         pthread_attr_setstack
02458 #define pthread_attr_getstack_rt         pthread_attr_getstack
02459 #endif
02460 #define pthread_attr_setstacksize_rt     pthread_attr_setstacksize
02461 #define pthread_attr_getstacksize_rt     pthread_attr_getstacksize
02462 
02463 /*
02464  * WORKING FUNCTIONS USABLE IN HARD REAL TIME, THIS IS THE REAL STUFF
02465  */
02466 
02467 #define pthread_setcancelstate_rt  pthread_setcancelstate
02468 #define pthread_setcanceltype_rt   pthread_setcanceltype
02469 
02470 #ifdef __cplusplus
02471 }
02472 #endif /* __cplusplus */
02473 
02474 #endif /* !__KERNEL__ */
02475 
02476 #endif /* !_RTAI_POSIX_H_ */

Generated on Tue Feb 2 17:46:05 2010 for RTAI API by  doxygen 1.4.7