base/ipc/shm/kvmem.c

Go to the documentation of this file.
00001 /*
00002  * This program is free software; you can redistribute it and/or
00003  * modify it under the terms of the GNU General Public License as
00004  * published by the Free Software Foundation; either version 2 of the
00005  * License, or (at your option) any later version.
00006  *
00007  * This program is distributed in the hope that it will be useful,
00008  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00009  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00010  * GNU General Public License for more details.
00011  *
00012  * You should have received a copy of the GNU General Public License
00013  * along with this program; if not, write to the Free Software
00014  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00015  */
00016 
00017 #include <linux/slab.h>
00018 
00019 #include <rtai_shm.h>
00020 
00021 static __inline__ int vm_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long to)
00022 {
00023     vma->vm_flags |= VM_RESERVED;
00024 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)
00025     return vm_insert_page(vma, from, vmalloc_to_page((void *)to));
00026 #else
00027     return mm_remap_page_range(vma, from, kvirt_to_pa(to), PAGE_SIZE, PAGE_SHARED);
00028 #endif
00029 }
00030 
00031 static __inline__ int km_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long to, unsigned long size)
00032 {
00033     vma->vm_flags |= VM_RESERVED;
00034     return mm_remap_page_range(vma, from, virt_to_phys((void *)to), size, PAGE_SHARED);
00035 }
00036 
00037 /* allocate user space mmapable block of memory in kernel space */
00038 void *rvmalloc(unsigned long size)
00039 {
00040     void *mem;
00041     unsigned long adr;
00042         
00043     if ((mem = vmalloc(size))) {
00044             adr = (unsigned long)mem;
00045         while (size > 0) {
00046 //          mem_map_reserve(virt_to_page(UVIRT_TO_KVA(adr)));
00047             SetPageReserved(vmalloc_to_page((void *)adr));
00048             adr  += PAGE_SIZE;
00049             size -= PAGE_SIZE;
00050         }
00051     }
00052     return mem;
00053 }
00054 
00055 void rvfree(void *mem, unsigned long size)
00056 {
00057         unsigned long adr;
00058         
00059     if ((adr = (unsigned long)mem)) {
00060         while (size > 0) {
00061 //          mem_map_unreserve(virt_to_page(UVIRT_TO_KVA(adr)));
00062             ClearPageReserved(vmalloc_to_page((void *)adr));
00063             adr  += PAGE_SIZE;
00064             size -= PAGE_SIZE;
00065         }
00066         vfree(mem);
00067     }
00068 }
00069 
00070 /* this function will map (fragment of) rvmalloc'ed memory area to user space */
00071 int rvmmap(void *mem, unsigned long memsize, struct vm_area_struct *vma)
00072 {
00073     unsigned long pos, size, offset;
00074     unsigned long start  = vma->vm_start;
00075 
00076     /* this is not time critical code, so we check the arguments */
00077     /* vma->vm_offset HAS to be checked (and is checked)*/
00078     if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
00079         return -EFAULT;
00080     }
00081     offset = vma->vm_pgoff << PAGE_SHIFT;
00082     size = vma->vm_end - start;
00083     if ((size + offset) > memsize) {
00084         return -EFAULT;
00085     }
00086     pos = (unsigned long)mem + offset;
00087     if (pos%PAGE_SIZE || start%PAGE_SIZE || size%PAGE_SIZE) {
00088         return -EFAULT;
00089     }
00090     while (size > 0) {
00091 //      if (mm_remap_page_range(vma, start, kvirt_to_pa(pos), PAGE_SIZE, PAGE_SHARED)) {
00092         if (vm_remap_page_range(vma, start, pos)) {
00093             return -EAGAIN;
00094         }
00095         start += PAGE_SIZE;
00096         pos   += PAGE_SIZE;
00097         size  -= PAGE_SIZE;
00098     }
00099     return 0;
00100 }
00101 
00102 /* allocate user space mmapable block of memory in kernel space */
00103 void *rkmalloc(int *msize, int suprt)
00104 {
00105     unsigned long mem, adr, size;
00106         
00107     if (*msize <= KMALLOC_LIMIT) {
00108         mem = (unsigned long)kmalloc(*msize, suprt);
00109     } else {
00110         mem = (unsigned long)__get_free_pages(suprt, get_order(*msize));
00111     }
00112     if (mem) {
00113         adr  = PAGE_ALIGN(mem);
00114         size = *msize -= (adr - mem);
00115         while (size > 0) {
00116 //          mem_map_reserve(virt_to_page(adr));
00117             SetPageReserved(virt_to_page(adr));
00118             adr  += PAGE_SIZE;
00119             size -= PAGE_SIZE;
00120         }
00121     }
00122     return (void *)mem;
00123 }
00124 
00125 void rkfree(void *mem, unsigned long size)
00126 {
00127         unsigned long adr;
00128         
00129     if ((adr = (unsigned long)mem)) {
00130         unsigned long sz = size;
00131         adr  = PAGE_ALIGN((unsigned long)mem);
00132         while (size > 0) {
00133 //          mem_map_unreserve(virt_to_page(adr));
00134             ClearPageReserved(virt_to_page(adr));
00135             adr  += PAGE_SIZE;
00136             size -= PAGE_SIZE;
00137         }
00138         if (sz <= KMALLOC_LIMIT) {
00139             kfree(mem);
00140         } else {
00141             free_pages((unsigned long)mem, get_order(sz));
00142         }
00143     }
00144 }
00145 
00146 /* this function will map an rkmalloc'ed memory area to user space */
00147 int rkmmap(void *mem, unsigned long memsize, struct vm_area_struct *vma)
00148 {
00149     unsigned long pos, size, offset;
00150     unsigned long start  = vma->vm_start;
00151 
00152     /* this is not time critical code, so we check the arguments */
00153     /* vma->vm_offset HAS to be checked (and is checked)*/
00154     if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
00155         return -EFAULT;
00156     }
00157     offset = vma->vm_pgoff << PAGE_SHIFT;
00158     size = vma->vm_end - start;
00159     if ((size + offset) > memsize) {
00160         return -EFAULT;
00161     }
00162     pos = (unsigned long)mem + offset;
00163     if (pos%PAGE_SIZE || start%PAGE_SIZE || size%PAGE_SIZE) {
00164         return -EFAULT;
00165     }
00166 //  if (mm_remap_page_range(vma, start, virt_to_phys((void *)pos), size, PAGE_SHARED)) {
00167     if (km_remap_page_range(vma, start, pos, size)) {
00168         return -EAGAIN;
00169     }
00170     return 0;
00171 }

Generated on Tue Feb 2 17:46:05 2010 for RTAI API by  doxygen 1.4.7