在Kernel中system control寄存器(cp15 c1)的设置过程
C1寄存器-system control register的功能是起到control的作用,因此非常重要。虽然对每一位在arch/arm/include/asm/system.h头文件中都有define,但是kernel大部分并没有采用,而是直接设置了magic number. 很不容易去看。因此在这里从头开始看c1寄存器是如何被设置的。
首先从arch/arm/boot/compressed/head.S开始看起,从cache_on开始,
495 * On entry,
496 * r4 = kernel
execution address
497 * r7 = architecture
number
498 * r8 = atags pointer
499 * On exit,
500 * r0, r1, r2, r3, r9,
r10, r12 corrupted
501 * This routine must
preserve:
502 * r4, r7, r8
503 */
504
.align 5
505
cache_on: mov r3,
#8
@ cache_on function
506
b call_cache_fn
713 * Here follow the relocatable
cache support functions for the
714 * various processors.
This is a generic hook for locating an
715 * entry and jumping to an
instruction at the specified offset
716 * from the start of the
block. Please note this is all position
717 * independent code.
718 *
719 * r1 =
corrupted
720 * r2 =
corrupted
721 * r3 = block
offset
722 * r9 =
corrupted
723 * r12 = corrupted
724 */
725
726 call_cache_fn:
adr r12, proc_types
727 #ifdef CONFIG_CPU_CP15
728
mrc p15, 0, r9, c0, c0 @
get processor ID
729 #else
730
ldr r9, =CONFIG_PROCESSOR_ID
731 #endif
732
1:
ldr r1, [r12,
#0] @ get value
733
ldr r2, [r12,
#4] @ get mask
734
eor r1, r1,
r9
@ (real ^ match)
735
tst r1,
r2
@ & mask
736 ARM(
addeq pc, r12,
r3 ) @
call cache function //----
737
THUMB( addeq r12,
r3
)
738
THUMB( moveq pc,
r12
) @ call cache function
739
add r12, r12, #PROC_ENTRY_SIZE
740
b 1b
639 __armv7_mmu_cache_on:
640
mov r12, lr
641 #ifdef CONFIG_MMU
642
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
643
tst r11,
#0xf
@ VMSA
644
blne __setup_mmu @设置临时的页目录,解压缩也需要效率啊
645
mov r0, #0
646
mcr
p15, 0, r0, c7, c10, 4 @ drain write buffer
647
tst r11, #0xf
@ VMSA
648
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
649 #endif
650
mrc p15, 0, r0, c1, c0, 0 @ read control
reg
651
orr r0, r0,
#0x5000 @ I-cache enable, RR
cache replacement 0b101
652
orr r0, r0,
#0x003c @ write buffer 0b
111100
653 #ifdef CONFIG_MMU
654 #ifdef CONFIG_CPU_ENDIAN_BE8
655
orr r0, r0, #1 <<
25 @ big-endian page tables
656 #endif
657
orrne r0, r0,
#1
@ MMU enabled
658
movne r1, #-1
659
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
660
mcrne p15, 0, r1, c3, c0, 0 @ load domain access
control
661 #endif
662
mcr p15, 0, r0, c7, c5, 4 @ ISB
System control寄存器的format为:
650行读取默认值
651行,5000:RR
=1 V=0 I=1 Icache开启
vector base address 0x00000000
652行,003C: C=1
653行,M=1
开启MMU,自起,直到arch/arm/kernel/head.s开启MMU,都在使用此值:
0b 0000 0000 1100 0101 0101 0000 0111 1101
上面是kernel的zImage被解压缩之前的代码。被解压缩后,开始进入arch/arm/kernel/head.S运行了。
在arch/arm/kernel/head.S中,会调用具体的架构setup函数,对于ARM V7,调用arch/arm/mm/proc-v7.S中的__v7_setup
与c1相关的代码为:
257 mrc p15, 0, r0, c1, c0, 0 @ read control register
258
adr r5, v7_crval
259
ldmia r5, {r5, r6}
V7_crval的值为:
161
/* AT
162
* TFR EV X F I D LR S
163
* .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
164
* rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
165
* 1 0
110 0011 1100 .111 1101 < we want
166
*/
167
.align 2
168
.type v7_crval, #object
169 v7_crval:
170
crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
所以第259行中的r5为:clear=0x0120c302
R6为:mmuset=0x10c03c7d
这样:arch/arm/mm/proc-v7.S:
268
bic r0, r0,
r5
@ clear bits them
269
orr r0, r0,
r6
@ set them
对照读出的值和要clear的值,以及要设定的值,二进位设置以及过程如下:
0b 0000 0000 1100 0101 0101 0000 0111 1101
0b 0000 0001 0010 0000 1100 0011 0000 0010
r5 bic
--
0b 0000 0000 1100 0101 0001 0000 0111 1101
r0 –只是清除了RR位
0b 0001 0000 1100 0000 0011 1100 0111 1101
r6 orr
--
0b 0001 0000 1100 0101 0011 1100 0111 1101
开启的位数:
M(0):MMU enable bit
C(1):Cache enable bit: This is a global enable bit for data and unified
caches
SW(10): SWP/SWPB Enable bit. This bit
enables the use of SWP and SWPB instructions.SWP (Swap) swaps a word between
registers and memory.
Z(11): Branch prediction enable bit.
I(12): Instruction cache enable bit
V(13): Exception base address = 0xFFFF0000.
This setting is referred to as high vectors
U(22): Unaligned access support for loads
and stores of single 16-bit half words and 32-bit words,
TRE(28): tex remap enable
在arch/arm/kernel/head.s中,调用了__v7_setup后,会调用__enable_mmu根据config,做一些额外的工作。
然后LOAD
TTBR0 ,LOAD domain register,然后跳转到__turn_mmu_on,真正开启mmu
406 __enable_mmu:
407 #if defined(CONFIG_ALIGNMENT_TRAP)
&& __LINUX_ARM_ARCH__ < 6
408
orr r0, r0, #CR_A
409 #else
410
bic r0, r0, #CR_A @alignment abort enable?
411 #endif
412 #ifdef CONFIG_CPU_DCACHE_DISABLE
413
bic r0, r0, #CR_C
414 #endif
415 #ifdef CONFIG_CPU_BPREDICT_DISABLE
416
bic r0, r0, #CR_Z
417 #endif
418 #ifdef CONFIG_CPU_ICACHE_DISABLE
419
bic r0, r0, #CR_I
420 #endif
421 #ifdef CONFIG_ARM_LPAE
422
mov r5, #0
423
mcrr p15, 0, r4, r5,
c2
@ load TTBR0
424 #else
425
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
426
domain_val(DOMAIN_KERNEL,
DOMAIN_MANAGER) | \
427
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
428
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
429
mcr p15, 0, r5, c3, c0,
0 @ load domain
access register
430
mcr p15, 0, r4, c2, c0,
0 @ load page table
pointer TTBR0
431 #endif
432
b __turn_mmu_on
435 /*
436 * Enable the MMU. This
completely changes the structure of the visible
437 * memory space. You will
not be able to trace execution through this.
438 * If you have an enquiry about this,
*please* check the linux-arm-kernel
439 * mailing list archives BEFORE
sending another post to the list.
440 *
441 * r0 = cp#15 control
register
442 * r1 = machine ID
443 * r2 = atags or dtb
pointer
444 * r9 = processor ID
445 * r13 = *virtual* address
to jump to upon completion
446 *
447 * other registers depend on the
function called upon completion
448 */
449
.align 5
450
.pushsection .idmap.text, "ax"
451 ENTRY(__turn_mmu_on)
452
mov r0, r0 @nop
453
instr_sync @isb
454
mcr p15, 0, r0, c1, c0,
0 @ write control
reg
455
mrc p15, 0, r3, c0, c0, 0
@ read id reg
456
instr_sync @isb
457
mov r3, r3
458
mov r3, r13
459
mov pc, r3
460 __turn_mmu_on_end:
461 ENDPROC(__turn_mmu_on)
写的有点混乱,如果有不清楚的地方,欢迎交流。
评论
发表评论