00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
#include <linux/slab.h>
00018
00019
#include <rtai_shm.h>
00020
00021
00022 void *
rvmalloc(
unsigned long size)
00023 {
00024
void *mem;
00025
unsigned long adr;
00026
00027
if ((mem = vmalloc(size))) {
00028 adr = (
unsigned long)mem;
00029
while (size > 0) {
00030 mem_map_reserve(virt_to_page(__va(
kvirt_to_pa(adr))));
00031 adr += PAGE_SIZE;
00032 size -= PAGE_SIZE;
00033 }
00034 }
00035
return mem;
00036 }
00037
00038 void rvfree(
void *mem,
unsigned long size)
00039 {
00040
unsigned long adr;
00041
00042
if ((adr = (
unsigned long)mem)) {
00043
while (size > 0) {
00044 mem_map_unreserve(virt_to_page(__va(
kvirt_to_pa(adr))));
00045 adr += PAGE_SIZE;
00046 size -= PAGE_SIZE;
00047 }
00048 vfree(mem);
00049 }
00050 }
00051
00052
00053 int rvmmap(
void *mem,
unsigned long memsize,
struct vm_area_struct *vma) {
00054
unsigned long pos, size, offset;
00055
unsigned long start = vma->vm_start;
00056
00057
00058
00059
if (vma->vm_pgoff > (0x7FFFFFFF >> PAGE_SHIFT)) {
00060
return -EFAULT;
00061 }
00062 offset = vma->vm_pgoff << PAGE_SHIFT;
00063 size = vma->vm_end - start;
00064
if ((size + offset) > memsize) {
00065
return -EFAULT;
00066 }
00067 pos = (
unsigned long)mem + offset;
00068
if (pos%PAGE_SIZE || start%PAGE_SIZE || size%PAGE_SIZE) {
00069
return -EFAULT;
00070 }
00071
while (size > 0) {
00072
if (mm_remap_page_range(vma, start,
kvirt_to_pa(pos), PAGE_SIZE, PAGE_SHARED)) {
00073
return -EAGAIN;
00074 }
00075 start += PAGE_SIZE;
00076 pos += PAGE_SIZE;
00077 size -= PAGE_SIZE;
00078 }
00079
return 0;
00080 }
00081
00082
00083 void *
rkmalloc(
int *memsize,
int suprt)
00084 {
00085
unsigned long mem, adr, size;
00086
00087
if ((mem = (
unsigned long)kmalloc(*memsize, suprt))) {
00088 adr = PAGE_ALIGN(mem);
00089 size = *memsize -= (adr - mem);
00090
while (size > 0) {
00091 mem_map_reserve(virt_to_page(adr));
00092 adr += PAGE_SIZE;
00093 size -= PAGE_SIZE;
00094 }
00095 }
00096
return (
void *)mem;
00097 }
00098
00099 void rkfree(
void *mem,
unsigned long size)
00100 {
00101
unsigned long adr;
00102
00103
if ((adr = (
unsigned long)mem)) {
00104 adr = PAGE_ALIGN((
unsigned long)mem);
00105
while (size > 0) {
00106 mem_map_unreserve(virt_to_page(adr));
00107 adr += PAGE_SIZE;
00108 size -= PAGE_SIZE;
00109 }
00110 kfree(mem);
00111 }
00112 }
00113
00114
00115 int rkmmap(
void *mem,
unsigned long memsize,
struct vm_area_struct *vma) {
00116
unsigned long pos, size, offset;
00117
unsigned long start = vma->vm_start;
00118
00119
00120
00121
if (vma->vm_pgoff > (0x7FFFFFFF >> PAGE_SHIFT)) {
00122
return -EFAULT;
00123 }
00124 offset = vma->vm_pgoff << PAGE_SHIFT;
00125 size = vma->vm_end - start;
00126
if ((size + offset) > memsize) {
00127
return -EFAULT;
00128 }
00129 pos = (
unsigned long)mem + offset;
00130
if (pos%PAGE_SIZE || start%PAGE_SIZE || size%PAGE_SIZE) {
00131
return -EFAULT;
00132 }
00133
if (mm_remap_page_range(vma, start, virt_to_phys((
void *)pos), size, PAGE_SHARED)) {
00134
return -EAGAIN;
00135 }
00136
return 0;
00137 }