SD卡读写操作浅析
一个读写请求何时被读写,怎样读写,全看请求队列。以Goldfish平台上的MMC卡,我们来看看其请求队列都怎样设置的:
597 struct mmc_blk_data *md;
598 int err;
599
600 char cap_str[10];
601
602 /*
603 * Check that the card supports the command class(es) we need.
604 */
605 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
606 return -ENODEV;
607
608 md = mmc_blk_alloc(card);
mmc_blk_probe->mmc_blk_alloc()
510 struct mmc_blk_data *md;511 int devidx, ret;
512
513 devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);//在dev_use中查找一个没有被用到的
514 if (devidx >= MMC_NUM_MINORS)
515 return ERR_PTR(-ENOSPC);
516 __set_bit(devidx, dev_use);
517
518 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
519 if (!md) {
520 ret = -ENOMEM;
521 goto out;
522 }
523
524
525 /*
526 * Set the read-only status based on the supported commands
527 * and the write protect switch.
528 */
529 md->read_only = mmc_blk_readonly(card);
530
531 md->disk = alloc_disk(1 << MMC_SHIFT);//这个结构非常非常重要
532 if (md->disk == NULL) {
533 ret = -ENOMEM;
534 goto err_kfree;
535 }
536
537 spin_lock_init(&md->lock);
538 md->usage = 1;//每get一次,会++
539
540 ret = mmc_init_queue(&md->queue, card, &md->lock);
541 if (ret)
542 goto err_putdisk;
543
544 md->queue.issue_fn = mmc_blk_issue_rq;
545 md->queue.data = md;
546
547 md->disk->major = MMC_BLOCK_MAJOR;//发送的request将根据它来寻找disk,然后挂载在disk->queue上
548 md->disk->first_minor = devidx << MMC_SHIFT;
549 md->disk->fops = &mmc_bdops;
550 md->disk->private_data = md;
551 md->disk->queue = md->queue.queue;//哦,原来如此哇~
552 md->disk->driverfs_dev = &card->dev;
第531行通过alloc_disk分配一个disk,这个结构就是通用块设备结构。所有的request,将查询其/dev/目录下的对应设备,通过major和minor找到对应的disk,然后挂载在disk->queue上。这个request被执行的时机,全部由这个请求队列决定。哪些方面呢?我们接着看,等所有流程跟踪结束后,会做一个总结。
mmc_blk_probe->mmc_blk_alloc()->mmc_init_queue()
125 mq->card = card;126 mq->queue = blk_init_queue(mmc_request, lock);
127 if (!mq->queue)
128 return -ENOMEM;
129
130 mq->queue->queuedata = mq;
131 mq->req = NULL;
132
133 blk_queue_prep_rq(mq->queue, mmc_prep_request);
134 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
135 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
这个mmc_init_queue,由调用通用的blk_init_queue创建一个请求队列,在mmc_init_queue中,单独设定了其特定的参数。
其特定的参数由:
queue->request_fn=mmc_request
queue->prep_rq_fn=mmc_prep_request
queue->odered=QUEUE_ORDERED_DRAIN
queue->next_ordered=QUEUE_ORDERED_DRAIN
queue->prepare_flush_fn=NULL
除了这些特定的参数外,还有一些参数是通用的,也是必不可少的。继续往下看:
mmc_blk_probe->mmc_blk_alloc()->mmc_init_queue()->blk_init_queue
540 /**
541 * blk_init_queue - prepare a request queue for use with a block device
542 * @rfn: The function to be called to process requests that have been
543 * placed on the queue.
544 * @lock: Request queue spin lock
545 *
546 * Description:
547 * If a block device wishes to use the standard request handling procedures,
548 * which sorts requests and coalesces adjacent requests, then it must
549 * call blk_init_queue(). The function @rfn will be called when there
550 * are requests on the queue that need to be processed. If the device
551 * supports plugging, then @rfn may not be called immediately when requests
552 * are available on the queue, but may be called at some time later instead.
553 * Plugged queues are generally unplugged when a buffer belonging to one
554 * of the requests on the queue is needed, or due to memory pressure.
555 *
556 * @rfn is not required, or even expected, to remove all requests off the
557 * queue, but only as many as it can handle at a time. If it does leave
558 * requests on the queue, it is responsible for arranging that the requests
559 * get dealt with eventually.
560 *
561 * The queue spin lock must be held while manipulating the requests on the
562 * request queue; this lock will be taken also from interrupt context, so irq
563 * disabling is needed for it.
564 *
565 * Function returns a pointer to the initialized request queue, or %NULL if
566 * it didn't succeed.
567 *
568 * Note:
569 * blk_init_queue() must be paired with a blk_cleanup_queue() call
570 * when the block device is deactivated (such as at module unload).
571 **/
572
573 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
574 {
575 return blk_init_queue_node(rfn, lock, -1);
576 }
mmc_blk_probe->mmc_blk_alloc()->mmc_init_queue()->blk_init_queue->blk_init_queue_node
579 struct request_queue *580 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
581 {
582 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
583
584 if (!q)
585 return NULL;
586
587 q->node = node_id;
588 if (blk_init_free_list(q)) {
589 kmem_cache_free(blk_requestq_cachep, q);
590 return NULL;
591 }
592
593 /*
594 * if caller didn't supply a lock, they get per-queue locking with
595 * our embedded lock
596 */
597 if (!lock)
598 lock = &q->__queue_lock;
599
600 q->request_fn = rfn;
601 q->prep_rq_fn = NULL;
602 q->unplug_fn = generic_unplug_device;
603 q->queue_flags = QUEUE_FLAG_DEFAULT;
604 q->queue_lock = lock;
605
606 blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
607
608 blk_queue_make_request(q, __make_request);
609 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
610
611 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
612 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
613
614 q->sg_reserved_size = INT_MAX;
615
616 blk_set_cmd_filter_defaults(&q->cmd_filter);
617
618 /*
619 * all done
620 */
621 if (!elevator_init(q, NULL)) {
622 blk_queue_congestion_threshold(q);
623 return q;
624 }
在这里,又定义了几个通用的参数:
queue->make_request_fn=__make_request,这个函数太通用了
queue->seg_boundary_mask,这个是合并的规则,默认是0xFFFFFFFF
queue->max_segment_size,最大的segment是2^16B(64KB)
queue->max_hw_segments,最多的segment数目(128)
queue->max_phys_segments,最多物理段数目(128)
第608行,通过调用blk_queue_make_request,设置了一个关键的数据结构,queue->unplug_timer,它决定了request执行的时机。
第616行,设置一些标志位,这些标志位起到filter的作用。在request的执行中起作用。621行设置了I/O调度算法,默认采用“anticipatory”,不过将之改为none也可以。各个块设备的request与电梯调度中的queue是什么关系呢?
mmc_blk_probe->mmc_blk_alloc()->mmc_init_queue()->blk_init_queue->blk_init_queue_node->blk_queue_make_request
98 /**
99 * blk_queue_make_request - define an alternate make_request function for a device100 * @q: the request queue for the device to be affected
101 * @mfn: the alternate make_request function
102 *
103 * Description:
104 * The normal way for &struct bios to be passed to a device
105 * driver is for them to be collected into requests on a request
106 * queue, and then to allow the device driver to select requests
107 * off that queue when it is ready. This works well for many block
108 * devices. However some block devices (typically virtual devices
109 * such as md or lvm) do not benefit from the processing on the
110 * request queue, and are served best by having the requests passed
111 * directly to them. This can be achieved by providing a function
112 * to blk_queue_make_request().
113 *
114 * Caveat:
115 * The driver that does this *must* be able to deal appropriately
116 * with buffers in "highmemory". This can be accomplished by either calling
117 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
118 * blk_queue_bounce() to create a buffer in normal memory.
119 **/
120 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
121 {
122 /*
123 * set defaults
124 */
125 q->nr_requests = BLKDEV_MAX_RQ;
126 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
127 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
128 blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
129 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
130
131 q->make_request_fn = mfn;
132 q->backing_dev_info.ra_pages =
133 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
134 q->backing_dev_info.state = 0;
135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
137 blk_queue_hardsect_size(q, 512);
138 blk_queue_dma_alignment(q, 511);
139 blk_queue_congestion_threshold(q);
140 q->nr_batching = BLK_BATCH_REQ;
141
142 q->unplug_thresh = 4; /* hmm */
143 q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
144 if (q->unplug_delay == 0)
145 q->unplug_delay = 1;
146
147 q->unplug_timer.function = blk_unplug_timeout;
148 q->unplug_timer.data = (unsigned long)q;
149
150 /*
151 * by default assume old behaviour and bounce for any highmem page
152 */
153 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
154 }
每当插入一个request的时候,若请求队列为空,则会 blk_plug_device
__make_request
1248 if (!blk_queue_nonrot(q) && elv_queue_empty(q))
1249 blk_plug_device(q);1250 add_request(q, req);
__make_request->blk_plug_device
205 /*
206 * "plug" the device if there are no outstanding requests: this will
207 * force the transfer to start only after we have put all the requests
208 * on the list.
209 *
210 * This is called with interrupts off and no requests on the queue and
211 * with the queue lock held.
212 */
213 void blk_plug_device(struct request_queue *q)
214 {
215 WARN_ON(!irqs_disabled());
216
217 /*
218 * don't plug a stopped queue, it must be paired with blk_start_queue()
219 * which will restart the queueing
220 */
221 if (blk_queue_stopped(q))
222 return;
223
224 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
225 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
226 trace_block_plug(q);
227 }
228 }
通过225行的mod_timer来插入一个定时器。当定时器到期后,会执行timer.function,即blk_unplug_timeout。
316 void blk_unplug_timeout(unsigned long data)
317 {
318 struct request_queue *q = (struct request_queue *)data;
319
320 trace_block_unplug_timer(q);
321 kblockd_schedule_work(q, &q->unplug_work);
322 }
321行,定时器又调度了q->unplug_work。而q->unplug_work是什么时候定义的呢?
在前面的blk_init_queue_node就定义了的。
mmc_blk_probe->mmc_blk_alloc()->mmc_init_queue()->blk_init_queue->blk_init_queue_node
579 struct request_queue *
580 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)581 {
582 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
mmc_blk_probe->mmc_blk_alloc()->mmc_init_queue()->blk_init_queue->blk_init_queue_node->blk_alloc_queue_node
508 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
509 {
510 struct request_queue *q;
511 int err;
512
513 q = kmem_cache_alloc_node(blk_requestq_cachep,
514 gfp_mask | __GFP_ZERO, node_id);
515 if (!q)
516 return NULL;
517
518 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
519 q->backing_dev_info.unplug_io_data = q;
520 err = bdi_init(&q->backing_dev_info);
521 if (err) {
522 kmem_cache_free(blk_requestq_cachep, q);
523 return NULL;
524 }
525
526 init_timer(&q->unplug_timer);
527 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
528 INIT_LIST_HEAD(&q->timeout_list);
529 INIT_WORK(&q->unplug_work, blk_unplug_work);
530
531 kobject_init(&q->kobj, &blk_queue_ktype);
532
533 mutex_init(&q->sysfs_lock);
534 spin_lock_init(&q->__queue_lock);
535
536 return q;
537 }
在529行,定义了queue->unplug_work。(在第527行,竟然还有一个timer,这个q->timeout的timer什么时候用呢?)
queue->unplug_work的function是blk_unplug_work
blk_unplug_timeout->blk_unplug_work
307 void blk_unplug_work(struct work_struct *work)
308 {309 struct request_queue *q =
310 container_of(work, struct request_queue, unplug_work);
311
312 trace_block_unplug_io(q);
313 q->unplug_fn(q);
314 }
第313行又调用了q->unplug_fn,即generic_unplug_device(在blk_init_queue_node中定义了)。
blk_unplug_timeout->blk_unplug_work->generic_unplug_device
278 /**
280 * @q: The &struct request_queue in question
281 *
282 * Description:
283 * Linux uses plugging to build bigger requests queues before letting
284 * the device have at them. If a queue is plugged, the I/O scheduler
285 * is still adding and merging requests on the queue. Once the queue
286 * gets unplugged, the request_fn defined for the queue is invoked and
287 * transfers started.
288 **/
289 void generic_unplug_device(struct request_queue *q)
290 {
291 if (blk_queue_plugged(q)) {
292 spin_lock_irq(q->queue_lock);
293 __generic_unplug_device(q);
294 spin_unlock_irq(q->queue_lock);
295 }
296 }
blk_unplug_timeout->blk_unplug_work->generic_unplug_device->__generic_unplug_device
268 void __generic_unplug_device(struct request_queue *q)
270 if (unlikely(blk_queue_stopped(q)))
271 return;
272 if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
273 return;
274
275 q->request_fn(q);
276 }
275行的request_fn对于mmc是mmc_request
blk_unplug_timeout->blk_unplug_work->generic_unplug_device->__generic_unplug_device->mmc_request
81 /*
82 * Generic MMC request handler. This is called for any queue on a
83 * particular host. When the host is not busy, we look for a request
84 * on any queue on this host, and attempt to issue it. This may
85 * not be the queue we were asked to process.
86 */
87 static void mmc_request(struct request_queue *q)
88 {
89 struct mmc_queue *mq = q->queuedata;
90 struct request *req;
91 int ret;
92
93 if (!mq) {
94 printk(KERN_ERR "MMC: killing requests for dead queue\n");
95 while ((req = elv_next_request(q)) != NULL) {
96 do {
97 ret = __blk_end_request(req, -EIO,
98 blk_rq_cur_bytes(req));
99 } while (ret);
100 }
101 return;
102 }
103
104 if (!mq->req)
105 wake_up_process(mq->thread);//用线程来写
106 }
在105行,mmc又唤醒了mq->thread.即
mmc_init_queue
202 init_MUTEX(&mq->thread_sem);
203
204 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
205 if (IS_ERR(mq->thread)) {
206 ret = PTR_ERR(mq->thread);
207 goto free_bounce_sg;
208 }
blk_unplug_timeout->blk_unplug_work->generic_unplug_device->__generic_unplug_device->mmc_request->mmc_queue_thread
44 static int mmc_queue_thread(void *d)
45 {
46 struct mmc_queue *mq = d;
47 struct request_queue *q = mq->queue;
48
49 current->flags |= PF_MEMALLOC;
50
51 down(&mq->thread_sem);
52 do {
53 struct request *req = NULL;
54
55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE);
57 if (!blk_queue_plugged(q))58 req = elv_next_request(q);
59 mq->req = req;
60 spin_unlock_irq(q->queue_lock);
61
62 if (!req) {
63 if (kthread_should_stop()) {
64 set_current_state(TASK_RUNNING);
65 break;
66 }
67 up(&mq->thread_sem);
68 schedule();
69 down(&mq->thread_sem);
70 continue;
71 }
72 set_current_state(TASK_RUNNING);
73
74 mq->issue_fn(mq, req);
75 } while (1);
76 up(&mq->thread_sem);
77
78 return 0;
79 }
80
而,该线程所做的事情是,从请求队列上取下一个request,然后用mq->issue_fn去执行。mq->issue_fn是在mmc_blk_probe->mmc_blk_alloc中定义的:mmc_blk_issue_rq
mmc_queue_thread->mmc_blk_issue_rq:
264 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
265 {
266 struct mmc_blk_data *md = mq->data;
267 struct mmc_card *card = md->queue.card;
268 struct mmc_blk_request brq;
269 int ret = 1, disable_multi = 0;
270
271 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
272 if (mmc_bus_needs_resume(card->host)) {
273 mmc_resume_bus(card->host);
274 mmc_blk_set_blksize(md, card);
275 }
276 #endif
277
278 mmc_claim_host(card->host);
279
280 do {
281 struct mmc_command cmd;
282 u32 readcmd, writecmd, status = 0;
283
284 memset(&brq, 0, sizeof(struct mmc_blk_request));
285 brq.mrq.cmd = &brq.cmd;
286 brq.mrq.data = &brq.data;
287
288 brq.cmd.arg = req->sector;
289 if (!mmc_card_blockaddr(card))
290 brq.cmd.arg <<= 9;
291 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
292 brq.data.blksz = 512;
293 brq.stop.opcode = MMC_STOP_TRANSMISSION;
294 brq.stop.arg = 0;
295 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
296 brq.data.blocks = req->nr_sectors;
297
298 /*
299 * The block layer doesn't support all sector count
300 * restrictions, so we need to be prepared for too big
301 * requests.
302 */
303 if (brq.data.blocks > card->host->max_blk_count)
304 brq.data.blocks = card->host->max_blk_count;
305
306 /*
307 * After a read error, we redo the request one sector at a time
308 * in order to accurately determine which sectors can be read
309 * successfully.
310 */
311 if (disable_multi && brq.data.blocks > 1)
312 brq.data.blocks = 1;
313
314 if (brq.data.blocks > 1) {
315 /* SPI multiblock writes terminate using a special
316 * token, not a STOP_TRANSMISSION request.
317 */
318 if (!mmc_host_is_spi(card->host)
319 || rq_data_dir(req) == READ)
320 brq.mrq.stop = &brq.stop;
321 readcmd = MMC_READ_MULTIPLE_BLOCK;
322 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
323 } else {
324 brq.mrq.stop = NULL;
325 readcmd = MMC_READ_SINGLE_BLOCK;
326 writecmd = MMC_WRITE_BLOCK;
327 }
328
329 if (rq_data_dir(req) == READ) {
330 brq.cmd.opcode = readcmd;
331 brq.data.flags |= MMC_DATA_READ;
332 } else {
333 brq.cmd.opcode = writecmd;
334 brq.data.flags |= MMC_DATA_WRITE;
335 }
336
337 mmc_set_data_timeout(&brq.data, card);
338
339 brq.data.sg = mq->sg;
340 brq.data.sg_len = mmc_queue_map_sg(mq);
341
342 /*
343 * Adjust the sg list so it is the same size as the
344 * request.
345 */
346 if (brq.data.blocks != req->nr_sectors) {
347 int i, data_size = brq.data.blocks << 9;
348 struct scatterlist *sg;
349
350 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
351 data_size -= sg->length;
352 if (data_size <= 0) {
353 sg->length += data_size;
354 i++;
355 break;
356 }
357 }
358 brq.data.sg_len = i;
359 }
360
361 mmc_queue_bounce_pre(mq);
362
363 mmc_wait_for_req(card->host, &brq.mrq);
364
365 mmc_queue_bounce_post(mq);
366
367 /*
368 * Check for errors here, but don't jump to cmd_err
369 * until later as we need to wait for the card to leave
370 * programming mode even when things go wrong.
371 */
372 if (brq.cmd.error || brq.data.error || brq.stop.error) {
373 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
374 /* Redo read one sector at a time */
375 printk(KERN_WARNING "%s: retrying using single "
376 "block read\n", req->rq_disk->disk_name);
377 disable_multi = 1;
378 continue;
379 }
380 status = get_card_status(card, req);
381 } else if (disable_multi == 1) {
382 disable_multi = 0;
383 }
384
385 if (brq.cmd.error) {
386 printk(KERN_ERR "%s: error %d sending read/write "
387 "command, response %#x, card status %#x\n",
388 req->rq_disk->disk_name, brq.cmd.error,
389 brq.cmd.resp[0], status);
390 }
391
392 if (brq.data.error) {
393 if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
394 /* 'Stop' response contains card status */
395 status = brq.mrq.stop->resp[0];
396 printk(KERN_ERR "%s: error %d transferring data,"
397 " sector %u, nr %u, card status %#x\n",
398 req->rq_disk->disk_name, brq.data.error,
399 (unsigned)req->sector,
400 (unsigned)req->nr_sectors, status);
401 }
402
403 if (brq.stop.error) {
404 printk(KERN_ERR "%s: error %d sending stop command, "
405 "response %#x, card status %#x\n",
406 req->rq_disk->disk_name, brq.stop.error,
407 brq.stop.resp[0], status);
408 }
409
410 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
411 do {
412 int err;
413
414 cmd.opcode = MMC_SEND_STATUS;
415 cmd.arg = card->rca << 16;
416 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
417 err = mmc_wait_for_cmd(card->host, &cmd, 5);
418 if (err) {
419 printk(KERN_ERR "%s: error %d requesting status\n",
420 req->rq_disk->disk_name, err);
421 goto cmd_err;
422 }
423 /*
424 * Some cards mishandle the status bits,
425 * so make sure to check both the busy
426 * indication and the card state.
427 */
428 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
429 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
430
431 #if 0
432 if (cmd.resp[0] & ~0x00000900)
433 printk(KERN_ERR "%s: status = %08x\n",
434 req->rq_disk->disk_name, cmd.resp[0]);
435 if (mmc_decode_status(cmd.resp))
436 goto cmd_err;
437 #endif
438 }
439
440 if (brq.cmd.error || brq.stop.error || brq.data.error) {
441 if (rq_data_dir(req) == READ) {
442 /*
443 * After an error, we redo I/O one sector at a
444 * time, so we only reach here after trying to
445 * read a single sector.
446 */
447 spin_lock_irq(&md->lock);
448 ret = __blk_end_request(req, -EIO, brq.data.blksz);
449 spin_unlock_irq(&md->lock);
450 continue;
451 }
452 goto cmd_err;
453 }
454
455 /*
456 * A block was successfully transferred.
457 */
458 spin_lock_irq(&md->lock);
459 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
460 spin_unlock_irq(&md->lock);
461 } while (ret);
462
463 mmc_release_host(card->host);
464
465 return 1;
466
467 cmd_err:
468 /*
469 * If this is an SD card and we're writing, we can first
470 * mark the known good sectors as ok.
471 *
472 * If the card is not SD, we can still ok written sectors
473 * as reported by the controller (which might be less than
474 * the real number of written sectors, but never more).
475 */
476 if (mmc_card_sd(card)) {
477 u32 blocks;
478
479 blocks = mmc_sd_num_wr_blocks(card);
480 if (blocks != (u32)-1) {
481 spin_lock_irq(&md->lock);
482 ret = __blk_end_request(req, 0, blocks << 9);
483 spin_unlock_irq(&md->lock);
484 }
485 } else {
486 spin_lock_irq(&md->lock);
487 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
488 spin_unlock_irq(&md->lock);
489 }
490
491 mmc_release_host(card->host);
492
493 spin_lock_irq(&md->lock);
494 while (ret)
495 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
496 spin_unlock_irq(&md->lock);
497
498 return 0;
499 }
500
该函数所做的事情就是将mmc->host锁住,然后让host进行操作。操作最重要的是363行:mmc_wait_for_req(card->host, &brq.mrq)
mmc_queue_thread->mmc_blk_issue_rq->mmc_wait_for_req
186 /**
187 * mmc_wait_for_req - start a request and wait for completion
188 * @host: MMC host to start command
189 * @mrq: MMC request to start
190 *
191 * Start a new MMC custom command request for a host, and wait
192 * for the command to complete. Does not attempt to parse the
193 * response.
194 */
195 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
196 {
197 DECLARE_COMPLETION_ONSTACK(complete);
198
199 mrq->done_data = &complete;
200 mrq->done = mmc_wait_done;
201 202 mmc_start_request(host, mrq);//maybe have a long time.
203
204 wait_for_completion(&complete);//wait until the data completed.the sem also anipulated by interrupt.
205 }
206
207 EXPORT_SYMBOL(mmc_wait_for_req);
mmc_start_request,就开始操作了。什么时候操作结束呢?要看complete了。complete中的done变量,在数据读取结束时,有中断来将其+1。然后本线程检测到后,就可以结束了。
注:2012.8.24日重新更新了一下排版将代码行间多出的两行空格删掉。这篇浅析是在读书时代总结的,内容分析的比较粗浅,代码看的也不太详细。分析不到位的地方涵请谅解:)。
评论
发表评论