从Nand驱动到文件读写
Nand属于块设备。那么nand块设备是否像其他块设备那样,每次读写都经历一个“C/S”的过程呢?
我们在Goldfish Platform上,从nand的驱动注册开始,看看nand之上的yaffs2文件读写到底是怎样的一个过程。
本文主要是对自己在学习过程中遇到疑问做一个记录,同以前的文章一样,基本上只有流程,那些原理之类的东西,请同学们google吧。在下文中,有些代码可能会有重复,主要目的是不想让各位看官看的太累,跳来跳去,眼镜受不了啊。
代码是Android Kernel 2.6.29.整个记录过程比较仓促,难免会由认识上的错误,欢迎大家指正。
下面是Android在Goldfish Platform上的执行流程:
<1>
377 static int __init init_mtdblock(void)
378 {
379 return register_mtd_blktrans(&mtdblock_tr);
380 }
在代码片段<1>中注册了一个struct mtd_blktrans_ops结构的mtdblock_tr,这个模块是系统在启动过程中加载的,从模块的init名字,可以看出,是针对mtd块设备的。由于在Linux中,Nand被归为MTD设备,MTD设备就是将nand设备封装了一下,让上层没有直接看到nand,而是看到的MTD。实际上,通过MTD来操作Nand,还是通过nand内部的驱动函数。不要把MTD看的太过神秘。如果还需要了解,请Google吧,我之前就是太较真了,一直没有弄明白,read the fucking code之后才算明白过来了。
这个mtd_blktrans_ops结构如下:
<2>
32 struct mtd_blktrans_ops {
33 char *name;
34 int major;
35 int part_bits;
36 int blksize;
37 int blkshift;
38
39 /* Access functions */
40 int (*readsect)(struct mtd_blktrans_dev *dev,
41 unsigned long block, char *buffer);
42 int (*writesect)(struct mtd_blktrans_dev *dev,
43 unsigned long block, char *buffer);
44 int (*discard)(struct mtd_blktrans_dev *dev,
45 unsigned long block, unsigned nr_blocks);
46
47 /* Block layer ioctls */
48 int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo);
49 int (*flush)(struct mtd_blktrans_dev *dev);
50
51 /* Called with mtd_table_mutex held; no race with add/remove */
52 int (*open)(struct mtd_blktrans_dev *dev);
53 int (*release)(struct mtd_blktrans_dev *dev);
54
55 /* Called on {de,}registration and on subsequent addition/removal
56 of devices, with mtd_table_mutex held. */
57 void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd);
58 void (*remove_dev)(struct mtd_blktrans_dev *dev);
59
60 struct list_head devs;
61 struct list_head list;
62 struct module *owner;
63
64 struct mtd_blkcore_priv *blkcore_priv;
65 };
其中第64行的 struct mtd_blkcore_priv,它包含了一个读写请求队列。所有的mtd设备的读写请求共用了一个请求队列。
init_mtdblock->register_mtd_blktrans
340 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
341 {
342 int ret, i;
343
344 /* Register the notifier if/when the first device type is
345 registered, to prevent the link/init ordering from fucking
346 us over. */
347 if (!blktrans_notifier.list.next)
348 register_mtd_user(&blktrans_notifier);
349
350 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);//几乎算是一个队列了
351 if (!tr->blkcore_priv)
352 return -ENOMEM;
353
354 mutex_lock(&mtd_table_mutex);
355
356 ret = register_blkdev(tr->major, tr->name);//"mtdblk"注册一个通用块设备
357 if (ret) {
358 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
359 tr->name, tr->major, ret);
360 kfree(tr->blkcore_priv);
361 mutex_unlock(&mtd_table_mutex);
362 return ret;
363 }
364 spin_lock_init(&tr->blkcore_priv->queue_lock);
365
366 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
367 if (!tr->blkcore_priv->rq) {
368 unregister_blkdev(tr->major, tr->name);
369 kfree(tr->blkcore_priv);
370 mutex_unlock(&mtd_table_mutex);
371 return -ENOMEM;
372 }
373
374 tr->blkcore_priv->rq->queuedata = tr;
375 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
376 if (tr->discard)
377 blk_queue_set_discard(tr->blkcore_priv->rq,
378 blktrans_discard_request);
379
380 tr->blkshift = ffs(tr->blksize) - 1;
381
382 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
383 "%sd", tr->name);
384 if (IS_ERR(tr->blkcore_priv->thread)) {
385 blk_cleanup_queue(tr->blkcore_priv->rq);
386 unregister_blkdev(tr->major, tr->name);
387 kfree(tr->blkcore_priv);
388 mutex_unlock(&mtd_table_mutex);
389 return PTR_ERR(tr->blkcore_priv->thread);
390 }
391
392 INIT_LIST_HEAD(&tr->devs);
393 list_add(&tr->list, &blktrans_majors);
394
395 for (i=0; i<MAX_MTD_DEVICES; i++) {
396 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
397 tr->add_mtd(tr, mtd_table[i]);//对于每一个mtd设备,都alloc_disk
398 }
399
400 mutex_unlock(&mtd_table_mutex);
401
402 return 0;
403 }
356行,比较重要在/dev/目录下,将多一个mtdblk节点。为啥叫mtdblk呢,第二个参数决定的。^_^
366行,正如上述所言,声明了一个读写请求队列。
382行,声明了一个内核线程。当每一次请求发送的时候,会让这个线程run一次。(引入一个问题:线程自动终结后,如何释放所拥有的资源?)
在第395~398行,其实所有的nand并没有在这里添加,貌似是因为这个时候goldfish_nand设备驱动还没有被加进来出来。所以register_mtd_blktrans的工作至此已经结束了。它的贡献,仅仅是注册了一个mtd_blktrans_ops。
当执行到module_init(goldfish_nand_init)的时候,才开始添加mtd设备。
405 static int __init goldfish_nand_init(void)
406 {
407 return platform_driver_register(&goldfish_nand_driver);
408 }
在这之后,会遍历bus上的所有设备,直到和goldfish_nand相匹配。有同学可能会有疑问,为啥在设备注册的时候不主动去匹配驱动呢?确实,设备会主动去匹配驱动,但是当前驱动的代码还没有被加载进来的时候,去神马地方找驱动捏?
58 static void goldfish_pdev_worker(struct work_struct *work)
59 {
60 int ret;
61 struct pdev_bus_dev *pos, *n;
62
63 list_for_each_entry_safe(pos, n, &pdev_bus_removed_devices, list) {
64 list_del(&pos->list);
65 platform_device_unregister(&pos->pdev);
66 kfree(pos);
67 }
68 list_for_each_entry_safe(pos, n, &pdev_bus_new_devices, list) {
69 list_del(&pos->list);
70 ret = platform_device_register(&pos->pdev);
71 if(ret) {
72 printk("goldfish_pdev_worker failed to register device, %s\n", pos->pdev.name);
73 }
74 else {
75 printk("goldfish_pdev_worker registered %s\n", pos->pdev.name);
76 }
77 list_add_tail(&pos->list, &pdev_bus_registered_devices);
78 }
79 }
看第70行,可以看到,确实注册了设备,并且在platform_device_register中,也确实去匹配驱动了,只不过没有找到驱动而饮恨“铩羽而归”。然后,当每一次注册一个驱动的时候,会去找对应的设备。当找到对应的设备后,就会调用对应驱动的probe函数了。对于goldfish_nand_driver,其probe函数是:
goldfish_nand_probe
315 static int goldfish_nand_probe(struct platform_device *pdev)
316 {
317 uint32_t num_dev;
318 int i;
319 int err;
320 uint32_t num_dev_working;
321 uint32_t version;
322 struct resource *r;
323 struct goldfish_nand *nand;
324 unsigned char __iomem *base;
325
326 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
327 if(r == NULL) {
328 err = -ENODEV;
329 goto err_no_io_base;
330 }
331
332 base = ioremap(r->start, PAGE_SIZE);
333 if(base == NULL) {
334 err = -ENOMEM;
335 goto err_ioremap;
336 }
337 version = readl(base + NAND_VERSION);
338 if(version != NAND_VERSION_CURRENT) {
339 printk("goldfish_nand_init: version mismatch, got %d, expected %d\n",
340 version, NAND_VERSION_CURRENT);
341 err = -ENODEV;
342 goto err_no_dev;
343 }
344 num_dev = readl(base + NAND_NUM_DEV);
345 if(num_dev == 0) {
346 err = -ENODEV;
347 goto err_no_dev;
348 }
349
350 nand = kzalloc(sizeof(*nand) + sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
351 if(nand == NULL) {
352 err = -ENOMEM;
353 goto err_nand_alloc_failed;
354 }
355 spin_lock_init(&nand->lock);
356 nand->base = base;
357 nand->mtd_count = num_dev;
358 platform_set_drvdata(pdev, nand);
359
360 num_dev_working = 0;
361 for(i = 0; i < num_dev; i++) {
362 err = goldfish_nand_init_device(nand, i);
363 if(err == 0)
364 num_dev_working++;
365 }
366 if(num_dev_working == 0) {
367 err = -ENODEV;
368 goto err_no_working_dev;
369 }
370 return 0;
371
372 err_no_working_dev:
373 kfree(nand);
374 err_nand_alloc_failed:
375 err_no_dev:
376 iounmap(base);
377 err_ioremap:
378 err_no_io_base:
379 return err;
380 }
362行,根据枚举出来的nand,调用goldfish_nand_init_device去初始化。
goldfish_nand_probe->goldfish_nand_init_device
248 static int goldfish_nand_init_device(struct goldfish_nand *nand, int id)
249 {
250 uint32_t name_len;
251 uint32_t result;
252 uint32_t flags;
253 unsigned long irq_flags;
254 unsigned char __iomem *base = nand->base;
255 struct mtd_info *mtd = &nand->mtd[id];
256 char *name;
257
258 spin_lock_irqsave(&nand->lock, irq_flags);
259 writel(id, base + NAND_DEV);
260 flags = readl(base + NAND_DEV_FLAGS);
261 name_len = readl(base + NAND_DEV_NAME_LEN);
262 mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
263 mtd->size = readl(base + NAND_DEV_SIZE_LOW);
264 mtd->size |= (uint64_t)readl(base + NAND_DEV_SIZE_HIGH) << 32;
265 mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
266 mtd->oobavail = mtd->oobsize;
267 mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
268 (mtd->writesize + mtd->oobsize) * mtd->writesize;
269 do_div(mtd->size, mtd->writesize + mtd->oobsize);
270 mtd->size *= mtd->writesize;
271 printk("goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
272 id, mtd->size, mtd->writesize, mtd->oobsize, mtd->erasesize);
273 spin_unlock_irqrestore(&nand->lock, irq_flags);
274
275 mtd->priv = nand;
276
277 mtd->name = name = kmalloc(name_len + 1, GFP_KERNEL);
278 if(name == NULL)
279 return -ENOMEM;
280
281 result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len, name);
282 if(result != name_len) {
283 kfree(mtd->name);
284 mtd->name = NULL;
285 printk("goldfish_nand_init_device failed to get dev name %d != %d\n",
286 result, name_len);
287 return -ENODEV;
288 }
289 ((char *) mtd->name)[name_len] = '\0';
290
291 /* Setup the MTD structure */
292 mtd->type = MTD_NANDFLASH;
293 mtd->flags = MTD_CAP_NANDFLASH;
294 if(flags & NAND_DEV_FLAG_READ_ONLY)
295 mtd->flags &= ~MTD_WRITEABLE;
296
297 mtd->owner = THIS_MODULE;
298 mtd->erase = goldfish_nand_erase;
299 mtd->read = goldfish_nand_read;
300 mtd->write = goldfish_nand_write;
301 mtd->read_oob = goldfish_nand_read_oob;
302 mtd->write_oob = goldfish_nand_write_oob;
303 mtd->block_isbad = goldfish_nand_block_isbad;
304 mtd->block_markbad = goldfish_nand_block_markbad;
305
306 if (add_mtd_device(mtd)) {
307 kfree(mtd->name);
308 mtd->name = NULL;
309 return -EIO;
310 }
311
312 return 0;
313 }
306行,调用add_mtd_device
goldfish_nand_probe->goldfish_nand_init_device->add_mtd_device
35 /**
36 * add_mtd_device - register an MTD device
37 * @mtd: pointer to new MTD device info structure
38 *
39 * Add a device to the list of MTD devices present in the system, and
40 * notify each currently active MTD 'user' of its arrival. Returns
41 * zero on success or 1 on failure, which currently will only happen
42 * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16)
43 */
44
45 int add_mtd_device(struct mtd_info *mtd)
46 {
47 int i;
48
49 BUG_ON(mtd->writesize == 0);
50 mutex_lock(&mtd_table_mutex);
51
52 for (i=0; i < MAX_MTD_DEVICES; i++)
53 if (!mtd_table[i]) {
54 struct mtd_notifier *not;
55
56 mtd_table[i] = mtd;
57 mtd->index = i;
58 mtd->usecount = 0;
59
60 if (is_power_of_2(mtd->erasesize))
61 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
62 else
63 mtd->erasesize_shift = 0;
64
65 if (is_power_of_2(mtd->writesize))
66 mtd->writesize_shift = ffs(mtd->writesize) - 1;
67 else
68 mtd->writesize_shift = 0;
69
70 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
71 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
72
73 /* Some chips always power up locked. Unlock them now */
74 if ((mtd->flags & MTD_WRITEABLE)
75 && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
76 if (mtd->unlock(mtd, 0, mtd->size))
77 printk(KERN_WARNING
78 "%s: unlock failed, "
79 "writes may not work\n",
80 mtd->name);
81 }
82
83 DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
84 /* No need to get a refcount on the module containing
85 the notifier, since we hold the mtd_table_mutex */
86 list_for_each_entry(not, &mtd_notifiers, list)
87 {
88 not->add(mtd);
89 }
90
91 mutex_unlock(&mtd_table_mutex);
92 /* We _know_ we aren't being removed, because
93 our caller is still holding us here. So none
94 of this try_ nonsense, and no bitching about it
95 either. :) */
96 __module_get(THIS_MODULE);
97 return 0;
98 }
99
100 mutex_unlock(&mtd_table_mutex);
101 return 1;
102 }
103
注意第88行,它调用的是:blktrans_notify_add.为啥是这个呢?在初始化的时候,有个模块是init_mtdblock,其调用了register_mtd_blktrans(&mtdblock_tr),结构mtdblock_tr中定义了一些操作mtdblock的一些操作,比如add_mtd。
362 static struct mtd_blktrans_ops mtdblock_tr = {
363 .name = "mtdblock",
364 .major = 31,
365 .part_bits = 0,
366 .blksize = 512,
367 .open = mtdblock_open,
368 .flush = mtdblock_flush,
369 .release = mtdblock_release,
370 .readsect = mtdblock_readsect,
371 .writesect = mtdblock_writesect,
372 .add_mtd = mtdblock_add_mtd,
373 .remove_dev = mtdblock_remove_dev,
374 .owner = THIS_MODULE,
375 };
那么它在初始化的时候,又定义了一个mtd_notifier结构的blktrans_notifier。具体,为什么要搞的这么复杂,Linux主要是为了可扩展性的考虑。
335 static struct mtd_notifier blktrans_notifier = {
336 .add = blktrans_notify_add,
337 .remove = blktrans_notify_remove,
338 };
324 static void blktrans_notify_add(struct mtd_info *mtd)
325 {
326 struct mtd_blktrans_ops *tr;
327
328 if (mtd->type == MTD_ABSENT)
329 return;
330
331 list_for_each_entry(tr, &blktrans_majors, list)
332 tr->add_mtd(tr, mtd);
333 }
回到上文,通过not->add(mtd)添加mtd设备,not->add又调用了tr->add_mtd,这个函数是上面mtdblock_tr定义的mtdblock_add_mtd。
blktrans_notify_add->mtdblock_add_mtd
337 static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
338 {
339 struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
340
341 if (!dev)
342 return;
343
344 dev->mtd = mtd;
345 dev->devnum = mtd->index;
346
347 dev->size = mtd->size >> 9;
348 dev->tr = tr;
349
350 if (!(mtd->flags & MTD_WRITEABLE))
351 dev->readonly = 1;
352
353 add_mtd_blktrans_dev(dev);
354 }
blktrans_notify_add->mtdblock_add_mtd->add_mtd_blktrans_dev
216 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
217 {
218 struct mtd_blktrans_ops *tr = new->tr;
219 struct mtd_blktrans_dev *d;
220 int last_devnum = -1;
221 struct gendisk *gd;
222
223 if (mutex_trylock(&mtd_table_mutex)) {
224 mutex_unlock(&mtd_table_mutex);
225 BUG();
226 }
227
228 list_for_each_entry(d, &tr->devs, list) {
229 if (new->devnum == -1) {
230 /* Use first free number */
231 if (d->devnum != last_devnum+1) {
232 /* Found a free devnum. Plug it in here */
233 new->devnum = last_devnum+1;
234 list_add_tail(&new->list, &d->list);
235 goto added;
236 }
237 } else if (d->devnum == new->devnum) {
238 /* Required number taken */
239 return -EBUSY;
240 } else if (d->devnum > new->devnum) {
241 /* Required number was free */
242 list_add_tail(&new->list, &d->list);
243 goto added;
244 }
245 last_devnum = d->devnum;
246 }
247 if (new->devnum == -1)
248 new->devnum = last_devnum+1;
249
250 if ((new->devnum << tr->part_bits) > 256) {
251 return -EBUSY;
252 }
253
254 list_add_tail(&new->list, &tr->devs);
255 added:
256 mutex_init(&new->lock);
257 if (!tr->writesect)
258 new->readonly = 1;
259
260 gd = alloc_disk(1 << tr->part_bits);//哦,在这里分配alloc_disk
261 if (!gd) {
262 list_del(&new->list);
263 return -ENOMEM;
264 }
265 gd->major = tr->major;
266 gd->first_minor = (new->devnum) << tr->part_bits;
267 gd->fops = &mtd_blktrans_ops;
268
269 if (tr->part_bits)
270 if (new->devnum < 26)
271 snprintf(gd->disk_name, sizeof(gd->disk_name),
272 "%s%c", tr->name, 'a' + new->devnum);
273 else
274 snprintf(gd->disk_name, sizeof(gd->disk_name),
275 "%s%c%c", tr->name,
276 'a' - 1 + new->devnum / 26,
277 'a' + new->devnum % 26);
278 else
279 snprintf(gd->disk_name, sizeof(gd->disk_name),
280 "%s%d", tr->name, new->devnum);
281
282 /* 2.5 has capacity in units of 512 bytes while still
283 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
284 set_capacity(gd, (new->size * tr->blksize) >> 9);
285
286 gd->private_data = new;
287 new->blkcore_priv = gd;
288 gd->queue = tr->blkcore_priv->rq;//使用的队列是tr的队列
289
290 if (new->readonly)
291 set_disk_ro(gd, 1);
292
293 add_disk(gd);//加入
294
295 return 0;
296 }
288行,使用了tr的队列,这个tr队列是在register_mtd_blktrans(&mtdblock_tr)初始化时定义的。
340 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
341 {
342 int ret, i;
343
344 /* Register the notifier if/when the first device type is
345 registered, to prevent the link/init ordering from fucking
346 us over. */
347 if (!blktrans_notifier.list.next)
348 register_mtd_user(&blktrans_notifier);
349
350 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);//几乎算是一个队列了
351 if (!tr->blkcore_priv)
352 return -ENOMEM;
353
354 mutex_lock(&mtd_table_mutex);
355
356 ret = register_blkdev(tr->major, tr->name);//"mtdblk"注册一个通用块设备
357 if (ret) {
358 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
359 tr->name, tr->major, ret);
360 kfree(tr->blkcore_priv);
361 mutex_unlock(&mtd_table_mutex);
362 return ret;
363 }
364 spin_lock_init(&tr->blkcore_priv->queue_lock);
365 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
tr队列比较通用,只有request_fn不同。设置的这么简单,让人始料不及啊。不过令人以外的是,mtd的读写并不经过request等策略。它们是通过什么策略呢?
我们直到在Linux中,文件的读写是经过几个层次,最上面是VFS,然后是具体的文件系统。具体的文件系统决定了,是否经过request策略。我们不妨直接去看看Yaffs2的file_operations对象,从那里入手,看看具体是否经过了request吧。
由于在Linux中的VFS是具有页缓存的,而页缓存相关联的数据结构是address_space,其host是inode。所以,对于文件的读写,基本上最终是通过调用address_space的operations结构。
这个是yaffs2的address_operations结构。它们是否最终request,看看其readpage即可
270 static struct address_space_operations yaffs_file_address_operations = {
271 .readpage = yaffs_readpage,
272 .writepage = yaffs_writepage,
273 #if (YAFFS_USE_WRITE_BEGIN_END > 0)
274 .write_begin = yaffs_write_begin,
275 .write_end = yaffs_write_end,
276 #else
277 .prepare_write = yaffs_prepare_write,
278 .commit_write = yaffs_commit_write,
279 #endif
280 };
由于yaffs_read主要涉及yaffs2文件系统内部的流程,因此不再详述。比较有特点的是,对于yaffs2文件系统,它并没有使用传统的页缓存的概念。在yaffs_device数据结构里面有“yaffs_ChunkCache *srCache;”一个成员变量,其数据类型:
111 /* Special sequence number for bad block that failed to be marked bad */
112 #define YAFFS_SEQUENCE_BAD_BLOCK 0xFFFF0000
113
114 /* ChunkCache is used for short read/write operations.*/
115 typedef struct {
116 struct yaffs_ObjectStruct *object;
117 int chunkId;
118 int lastUse;
119 int dirty;
120 int nBytes; /* Only valid if the cache is dirty */
121 int locked; /* Can't push out or flush while locked. */
122 #ifdef CONFIG_YAFFS_YAFFS2
123 __u8 *data;
124 #else
125 __u8 data[YAFFS_BYTES_PER_CHUNK];
126 #endif
127 } yaffs_ChunkCache;
每一次,在cache中,查找是否存在对应的cache的时候,它的查找函数是这样的:
4015 /* Find a cached chunk */
4016 static yaffs_ChunkCache *yaffs_FindChunkCache(const yaffs_Object *obj,
4017 int chunkId)
4018 {
4019 yaffs_Device *dev = obj->myDev;
4020 int i;
4021 if (dev->nShortOpCaches > 0) {
4022 for (i = 0; i < dev->nShortOpCaches; i++) {
4023 if (dev->srCache[i].object == obj &&
4024 dev->srCache[i].chunkId == chunkId) {
4025 dev->cacheHits++;
4026
4027 return &dev->srCache[i];
4028 }
4029 }
4030 }
4031 return NULL;
4032 }
从上面的查找中,可以发现,它并不像ext2那样,用hash或者radix_tree那样将页缓存组织起来。它的组织方式,和nand设备是一样的。哈,这也许是yaffs2移植性强的一个体现吧。它不局限于在Linux中,不局限于是否支持MTD,只要是一个OS,任意类型的,都可以被移植进去。
所以,回归上文,其实之前申请的alloc_disk,request_queue之类的玩意,都是坑爹的啊。所以,对于Nand设备的读写并没有其他块设备那么复杂啊。之前的那个request_fn竟然都木有用上。
评论
发表评论