00001 
00002 
00003 
00004 
00005 
00006 
00007 
00008 
00009 
00010 
00011 
00012 
00013 
00014 
00015 
00016 
00017 #include <linux/slab.h>
00018 
00019 #include <rtai_shm.h>
00020 
00021 static __inline__ int vm_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long to)
00022 {
00023     vma->vm_flags |= VM_RESERVED;
00024 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)
00025     return vm_insert_page(vma, from, vmalloc_to_page((void *)to));
00026 #else
00027     return mm_remap_page_range(vma, from, kvirt_to_pa(to), PAGE_SIZE, PAGE_SHARED);
00028 #endif
00029 }
00030 
00031 static __inline__ int km_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long to, unsigned long size)
00032 {
00033     vma->vm_flags |= VM_RESERVED;
00034     return mm_remap_page_range(vma, from, virt_to_phys((void *)to), size, PAGE_SHARED);
00035 }
00036 
00037 
00038 void *rvmalloc(unsigned long size)
00039 {
00040     void *mem;
00041     unsigned long adr;
00042         
00043     if ((mem = vmalloc(size))) {
00044             adr = (unsigned long)mem;
00045         while (size > 0) {
00046 
00047             SetPageReserved(vmalloc_to_page((void *)adr));
00048             adr  += PAGE_SIZE;
00049             size -= PAGE_SIZE;
00050         }
00051     }
00052     return mem;
00053 }
00054 
00055 void rvfree(void *mem, unsigned long size)
00056 {
00057         unsigned long adr;
00058         
00059     if ((adr = (unsigned long)mem)) {
00060         while (size > 0) {
00061 
00062             ClearPageReserved(vmalloc_to_page((void *)adr));
00063             adr  += PAGE_SIZE;
00064             size -= PAGE_SIZE;
00065         }
00066         vfree(mem);
00067     }
00068 }
00069 
00070 
00071 int rvmmap(void *mem, unsigned long memsize, struct vm_area_struct *vma)
00072 {
00073     unsigned long pos, size, offset;
00074     unsigned long start  = vma->vm_start;
00075 
00076     
00077     
00078     if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
00079         return -EFAULT;
00080     }
00081     offset = vma->vm_pgoff << PAGE_SHIFT;
00082     size = vma->vm_end - start;
00083     if ((size + offset) > memsize) {
00084         return -EFAULT;
00085     }
00086     pos = (unsigned long)mem + offset;
00087     if (pos%PAGE_SIZE || start%PAGE_SIZE || size%PAGE_SIZE) {
00088         return -EFAULT;
00089     }
00090     while (size > 0) {
00091 
00092         if (vm_remap_page_range(vma, start, pos)) {
00093             return -EAGAIN;
00094         }
00095         start += PAGE_SIZE;
00096         pos   += PAGE_SIZE;
00097         size  -= PAGE_SIZE;
00098     }
00099     return 0;
00100 }
00101 
00102 
00103 void *rkmalloc(int *msize, int suprt)
00104 {
00105     unsigned long mem, adr, size;
00106         
00107     if (*msize <= KMALLOC_LIMIT) {
00108         mem = (unsigned long)kmalloc(*msize, suprt);
00109     } else {
00110         mem = (unsigned long)__get_free_pages(suprt, get_order(*msize));
00111     }
00112     if (mem) {
00113         adr  = PAGE_ALIGN(mem);
00114         size = *msize -= (adr - mem);
00115         while (size > 0) {
00116 
00117             SetPageReserved(virt_to_page(adr));
00118             adr  += PAGE_SIZE;
00119             size -= PAGE_SIZE;
00120         }
00121     }
00122     return (void *)mem;
00123 }
00124 
00125 void rkfree(void *mem, unsigned long size)
00126 {
00127         unsigned long adr;
00128         
00129     if ((adr = (unsigned long)mem)) {
00130         unsigned long sz = size;
00131         adr  = PAGE_ALIGN((unsigned long)mem);
00132         while (size > 0) {
00133 
00134             ClearPageReserved(virt_to_page(adr));
00135             adr  += PAGE_SIZE;
00136             size -= PAGE_SIZE;
00137         }
00138         if (sz <= KMALLOC_LIMIT) {
00139             kfree(mem);
00140         } else {
00141             free_pages((unsigned long)mem, get_order(sz));
00142         }
00143     }
00144 }
00145 
00146 
00147 int rkmmap(void *mem, unsigned long memsize, struct vm_area_struct *vma)
00148 {
00149     unsigned long pos, size, offset;
00150     unsigned long start  = vma->vm_start;
00151 
00152     
00153     
00154     if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
00155         return -EFAULT;
00156     }
00157     offset = vma->vm_pgoff << PAGE_SHIFT;
00158     size = vma->vm_end - start;
00159     if ((size + offset) > memsize) {
00160         return -EFAULT;
00161     }
00162     pos = (unsigned long)mem + offset;
00163     if (pos%PAGE_SIZE || start%PAGE_SIZE || size%PAGE_SIZE) {
00164         return -EFAULT;
00165     }
00166 
00167     if (km_remap_page_range(vma, start, pos, size)) {
00168         return -EAGAIN;
00169     }
00170     return 0;
00171 }