like-linux系统链表原理简单分析
1 /*
2 like-linux系统下链表结构分析
3 *在like-linux系统中,链表的可移植性比较好,并不像在大学学到的链表结构那样的死板。其原理在很多linux的讲解中都有描述。
4 *原理部分可以看一下李先静老师写的《系统程序员成长之路》一书。
5 *本文根据xenarm中的链表来具体得理一下在like-linux系统中的链表原理。
6
7 *因为时间有限,只是把相关宏定义简单扩展,并把代码做了注释和筛捡。没有成文,望各位看官见谅
8 */
9
10
11 /*
12 *定义了一个相关结构和spinlock
13 */
14 spinlock_t consistent_lock;
15 struct arm_vm_region {
16 struct list_head vm_list;
17 unsigned long vm_start;
18 unsigned long vm_end;
19 struct page *vm_pages;
20 int vm_active;
21 };
22
23 /*
24 *初始化一个链表头
25 *链表头不同于链表节点,在实际分配中,链表头存放的是边界范围等一些概括信息
26 *
27 */
28 static struct arm_vm_region consistent_head = {//初始化一个链表头
29 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
30 .vm_start = CONSISTENT_BASE,
31 .vm_end = CONSISTENT_END,
32 };
33
34 static struct arm_vm_region * arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp)
35 {
36 //head头放的是区间总体范围
37 unsigned long addr = head->vm_start, end = head->vm_end - size;
38 unsigned long flags;
39 struct arm_vm_region *c, *new;
40
41 new = xmalloc(struct arm_vm_region);
42 if (!new)
43 goto out;
44
45 spin_lock_irqsave(&consistent_lock, flags);
46
47 /*
48 *遍历一下链表的各个节点,寻找合适的区间范围
49 *list_for_each_entry宏是一个关键,其实是一个for循环,只不过设计的比较巧妙,依靠了链表的结构
50 *其具体定义在下面
51 */
52 list_for_each_entry(c, &head->vm_list, vm_list) {
53 if ((addr + size) < addr)//越界
54 goto nospc;
55 if ((addr + size) <= c->vm_start)//可以夹到vm_region块之间
56 goto found;
57 addr = c->vm_end;//addr 为下一个的结束地址
58 if (addr > end)
59 goto nospc;
60 }
61 /*
62 *注意,若上面的链表仅仅有一个头的话,没有问题,直接到下面这个found就ok了。
63 */
64 found:
65 /*
66 * Insert this entry _before_ the one we found.
67 */
68 list_add_tail(&new->vm_list, &c->vm_list);
69 new->vm_start = addr;
70 new->vm_end = addr + size;
71 new->vm_active = 1;
72
73 spin_unlock_irqrestore(&consistent_lock, flags);
74 return new;
75
76 nospc:
77 spin_unlock_irqrestore(&consistent_lock, flags);
78 xfree(new);
79 out:
80 return NULL;
81 }
82
83 /**
84 * list_for_each_entry - iterate over list of given type
85 * @pos: the type * to use as a loop counter.
86 * @head: the head for your list.
87 * @member: the name of the list_struct within the struct.
88 */
89 #define list_for_each_entry(pos, head, member) \
90 for (pos = list_entry((head)->next, typeof(*pos), member); \
91 &pos->member != (head); \
92 pos = list_entry(pos->member.next, typeof(*pos), member))
93
94 /**
95 * list_entry - get the struct for this entry
96 * @ptr: the &struct list_head pointer.
97 * @type: the type of the struct this is embedded in.
98 * @member: the name of the list_struct within the struct.
99 */
100 #define list_entry(ptr, type, member) \
101 ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
102
103 /*
104 **********************************************************************************************
105 */
106 static void * __dma_alloc(/*struct device *dev,*/size_t size, dma_addr_t *handle/*, gfp_t gfp*/,pgprot_t prot)
107 {
108 struct page_info *page;
109 struct arm_vm_region *c;
110 unsigned long order;
111 unsigned long flags;
112
113 //若没有初始化,就...
114 if (!consistent_pte[0]) {
115 printk(KERN_ERR "%s: not initialised\n", __func__);
116 return NULL;
117 }
118 size = round_pgup(size);
119
120 //若分配的空间太大了,超过了,那么就...
121 if (
122 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
123 printk(KERN_WARNING "coherent allocation too big "
124 "(requested %#x)\n", size);
125 goto no_page;
126 }
127
128 //转换为伙伴算法的尺寸
129 order = get_order_from_bytes(size);
130
131 local_irq_save(flags);
132 page = alloc_heap_pages(0, order);//分配的是实际的页
133 local_irq_restore(flags);
134 if (!page)
135 goto no_page;
136
137 c = arm_vm_region_alloc(&consistent_head, size,
138 /* gfp &*/ ~(__GFP_DMA | __GFP_HIGHMEM));//分配的是页框
139 if (c) {
140 pte_t *pte;
141 struct page_info *end = page + (1 << order);
142 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
143 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
144
145 pte = consistent_pte[idx] + off;
146 c->vm_pages = page;
147
148 *handle = page_to_phys(page);//page_to_dma(/*dev,*/ page);
149
150 do {
151 *pte = l1e_from_paddr(page_to_phys(page), prot);//mk_pte(page, prot);
152
153 page++;
154 pte++;
155 off++;
156 if (off >= PTRS_PER_PTE) {
157 off = 0;
158 cpu_flush_cache_page((unsigned long)consistent_pte[idx]);
159 pte = consistent_pte[++idx];
160 }
161 } while (size -= PAGE_SIZE);
162 cpu_flush_cache_page((unsigned long)consistent_pte[idx]);
163 /*
164 * Free the otherwise unused pages.
165 */
166 while (page < end) {//若分配的页数没有全部被映射,于是释放掉没有被映射的页数
167 free_heap_pages(0/*MEMZONE_DMADOM*/,page,order);
168 page++;
169 }
170 return (void *)c->vm_start;
171 }
172 //如果没有分配vm成功的话,那么就free
173 if (page)
174 free_heap_pages(0/*MEMZONE_DMADOM*/,page,order);
175 no_page:
176 printk("No enough memory for DMA\n");
177 *handle = ~0;
178 return NULL;
179 }
180
181 pgprot_t pgprot_kernel;
182 /*
183 * Allocate a writecombining region, in much the same way as
184 * dma_alloc_coherent above.
185 */
186 void *
187 dma_alloc_writecombine(/*struct device *dev, */size_t size, dma_addr_t *handle/*, gfp_t gfp*/)
188 {
189 return __dma_alloc(/*dev, */size, handle, /*gfp,*/
190 pgprot_writecombine(pgprot_kernel));
191 }
192
193 /*
194 * Initialise the consistent memory allocation.
195 * 先把pgd给初始化了
196 */
197 int consistent_init(void)
198 {
199 pde_t *pgd;
200 pte_t *pte;
201 void *pt;
202 int ret = 0, i = 0;
203 u32 base = CONSISTENT_BASE;
204 pgprot_kernel = PTE_TYPE_SMALL/* | PTE_BUFFERABLE | PTE_CACHEABLE*/ | PTE_SMALL_AP_UNO_SRW;//L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC;
205
206 do {
207 pt = alloc_xenheap_page();//pte_alloc_kernel(pgd, base);
208 clear_page(pt);
209 idle_pg_table[pgd_index(base)] =l2e_from_page(virt_to_page(pt), PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO));
210 cpu_flush_cache_page((unsigned long) &idle_pg_table[pgd_index(base)]);
211 if (!pt) {
212 printk(KERN_ERR "%s: no pte tables\n", __func__);
213 ret = -ENOMEM;
214 break;
215 }
216 //这个数组,是为了方便以后分配dma页的时候,建立页表
217 consistent_pte[i++] = pt;
218 base += (1 << PGDIR_SHIFT);
219 } while (base < CONSISTENT_END);
220 printk("consistent init ok\n");
221
222 return ret;
223 }
评论
发表评论