gdt-A20

导航

linux启动流程导读(arm为例)<一>

==============================================

本文系本站原创,欢迎转载!转载请注明出处:http://www.cnblogs.com/gdt-a20

==============================================

以arm为例,分析一下kernel的启动过程;

内核版本:linux-3.2.tar.gz

一、arch/arm/kernel/head.s

 1 /*
2 * Kernel startup entry point.
3 * ---------------------------
4 *
5 * This is normally called from the decompressor code. The requirements
6 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
7 * r1 = machine nr, r2 = atags or dtb pointer.
8 *
9 * This code is mostly position independent, so if you link the kernel at
10 * 0xc0008000, you call this at __pa(0xc0008000).
11 *
12 * See linux/arch/arm/tools/mach-types for the complete list of machine
13 * numbers for r1.
14 *
15 * We're trying to keep crap to a minimum; DO NOT add any machine specific
16 * crap here - that's what the boot loader (or in extreme, well justified
17 * circumstances, zImage) is for.
18 */
19 .arm
20
21 __HEAD @#define __HEAD  .section ".head.text","ax"
22 ENTRY(stext)
23
24 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
25 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
26 THUMB( .thumb ) @ switch to Thumb now.
27 THUMB(1: )
28
29 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 关闭普通中断,快速中断,使能svc模式
30 @ and irqs disabled
31 mrc p15, 0, r9, c0, c0 @ get processor id 获得芯片ID
32 bl __lookup_processor_type @ r5=procinfo r9=cpuid 获得处理器型号,r5 == id,#1
33 movs r10, r5 @ invalid processor (r5=0)? 校验正确性,0错误
34 THUMB( it eq ) @ force fixup-able long branch encoding
35 beq __error_p @ yes, error 'p'
36
37 #ifndef CONFIG_XIP_KERNEL
38 adr r3, 2f
39 ldmia r3, {r4, r8}
40 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
41 add r8, r8, r4 @ PHYS_OFFSET
42 #else
43 ldr r8, =PHYS_OFFSET @ always constant in this case #define PHYS_OFFSET   UL(CONFIG_DRAM_BASE)
44 #endif

#1 :arch/arm/kernel/head-common.h

 1 /*
2 * Read processor ID register (CP#15, CR0), and look up in the linker-built
3 * supported processor list. Note that we can't use the absolute addresses
4 * for the __proc_info lists since we aren't running with the MMU on
5 * (and therefore, we are not in the correct address space). We have to
6 * calculate the offset.
7 *
8 * r9 = cpuid
9 * Returns:
10 * r3, r4, r6 corrupted
11 * r5 = proc_info pointer in physical address space
12 * r9 = cpuid (preserved)
13 */
14 __CPUINIT
15 __lookup_processor_type:
16 adr r3, __lookup_processor_type_data @adr 相对偏移读取,读取下面type_data地址
17 ldmia r3, {r4 - r6} @将该地址存放的值 放入r4(.),r5(begin),r6(end)
18 sub r3, r3, r4 @ get offset between virt&phys 链接地址-实际地址=偏移量
19     add    r5, r5, r3            @ convert virt addresses to    得到begin的虚拟起始地址
20 add r6, r6, r3 @ physical address space 得到end的虚拟起始地址
21 1: ldmia r5, {r3, r4} @ value, mask 得到r5的两个值到r3,r4
22 and r4, r4, r9 @ mask wanted bits 确定id值
23 teq r3, r4
24 beq 2f @相等返回,不等读取下一组
25 add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
26 cmp r5, r6
27 blo 1b
28 mov r5, #0 @ unknown processor @没有找到返回r5==0
29 2: mov pc, lr
30 ENDPROC(__lookup_processor_type)
31
32 /*
33 * Look in <asm/procinfo.h> for information about the __proc_info structure.
34 */
35 .align 2
36 .type __lookup_processor_type_data, %object
37 __lookup_processor_type_data:
38 .long .
39 .long __proc_info_begin
40 .long __proc_info_end
41 .size __lookup_processor_type_data, . - __lookup_processor_type_data
42
43 __error_p:
44 #ifdef CONFIG_DEBUG_LL
45 adr r0, str_p1
46 bl printascii
47 mov r0, r9
48 bl printhex8
49 adr r0, str_p2
50 bl printascii
51 b __error
52 str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x"
53 str_p2: .asciz ").\n"
54 .align
55 #endif
56 ENDPROC(__error_p)
57
58 __error:
59 #ifdef CONFIG_ARCH_RPC
60 /*
61 * Turn the screen red on a error - RiscPC only.
62 */
63 mov r0, #0x02000000
64 mov r3, #0x11
65 orr r3, r3, r3, lsl #8
66 orr r3, r3, r3, lsl #16
67 str r3, [r0], #4
68 str r3, [r0], #4
69 str r3, [r0], #4
70 str r3, [r0], #4
71 #endif



继续arch/arm/kernel/head.s

 1 /*
2 * r1 = machine no, r2 = atags or dtb,
3 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
4 */
5 bl __vet_atags @#1,head-common.s
6 #ifdef CONFIG_SMP_ON_UP
7 bl __fixup_smp @略
8 #endif
9 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
10 bl __fixup_pv_table @略
11 #endif
12 bl __create_page_tables @#2

#1 :arch/arm/kernel/head-common.h

 1 /* Determine validity of the r2 atags pointer.  The heuristic requires
2 * that the pointer be aligned, in the first 16k of physical RAM and
3 * that the ATAG_CORE marker is first and present. If CONFIG_OF_FLATTREE
4 * is selected, then it will also accept a dtb pointer. Future revisions
5 * of this function may be more lenient with the physical address and
6 * may also be able to move the ATAGS block if necessary.
7 *
8 * Returns:
9 * r2 either valid atags pointer, valid dtb pointer, or zero
10 * r5, r6 corrupted
11 */
12 __vet_atags:
13 tst r2, #0x3 @ aligned? 不对其就返回
14 bne 1f
15
16 ldr r5, [r2, #0] @读到r5
17 #ifdef CONFIG_OF_FLATTREE
18 ldr r6, =OF_DT_MAGIC @ is it a DTB? 过滤DTB
19 cmp r5, r6
20 beq 2f
21 #endif
22 cmp r5, #ATAG_CORE_SIZE @ is first tag ATAG_CORE? must be first
23 cmpne r5, #ATAG_CORE_SIZE_EMPTY
24 bne 1f
25 ldr r5, [r2, #4]
26 ldr r6, =ATAG_CORE
27 cmp r5, r6
28 bne 1f
29
30 2: mov pc, lr @ atag/dtb pointer is ok
31
32 1: mov r2, #0
33 mov pc, lr
34 ENDPROC(__vet_atags)

@#2:__create_page_tables

 1 /*
2 * Setup the initial page tables. We only setup the barest
3 * amount which are required to get the kernel running, which
4 * generally means mapping in the kernel code.
5 *
6 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
7 *
8 * Returns:
9 * r0, r3, r5-r7 corrupted
10 * r4 = physical page table address
11 */
12 __create_page_tables:
13 pgtbl r4, r8 @ page table address
       @#define PG_DIR_SIZE 0x4000
#define PMD_ORDER 2
       @vmlinux.lds.s,#define LOAD_OFFSET TEXT_OFFSET,0x30008000?
       @.macro pgtbl, rd, phys
  @ add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE r4= offset+ 0x30008000-0x4000,page_dir_size = 0x4000 = 16k
  @.endm
14
15 /*
16 * Clear the swapper page table
17 */
18 mov r0, r4 @r0==r4
19 mov r3, #0 @r3==0
20 add r6, r0, #PG_DIR_SIZE @r6==page_dir_end
21 1: str r3, [r0], #4 @clear 0,四个一组
22 str r3, [r0], #4
23 str r3, [r0], #4
24 str r3, [r0], #4
25 teq r0, r6
26 bne 1b @不到结尾返回继续
27
28 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags @load mmuflags
29
30 /*
31 * Create identity mapping to cater for __enable_mmu.
32 * This identity mapping will be removed by paging_init().
33 */
34 adr r0, __enable_mmu_loc
35 ldmia r0, {r3, r5, r6}
36 sub r0, r0, r3 @ virt->phys offset 重新计算得到偏移地址,应当是负值,r0位置无关地址,r3链接地址
37 add r5, r5, r0 @ __enable_mmu 重新得到实际物理地址
38     add    r6, r6, r0            @ __enable_mmu_end
39 mov r5, r5, lsr #SECTION_SHIFT @1M对齐
40 mov r6, r6, lsr #SECTION_SHIFT
41
42 1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base 从enable_mmu地址开始映射,1M为单位,直接映射
                                                  @r5为enable_mmu物理地址
43 str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping 括号内可得物理地址表项,物理地址对应表项映射回相应地址
44     cmp    r5, r6                              @ 直到 mmu_end
45 addlo r5, r5, #1 @ next section
46 blo 1b
47
48 /*
49 * Now setup the pagetables for our kernel direct
50 * mapped region.
51 */
52 mov r3, pc @当前pc存入r3
53 mov r3, r3, lsr #SECTION_SHIFT @1M对齐
54 orr r3, r7, r3, lsl #SECTION_SHIFT @kernel映射,0xc...虚拟地址映射到当前pc物理地址
55 add r0, r4, #(KERNEL_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
56 str r3, [r0, #((KERNEL_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
57 ldr r6, =(KERNEL_END - 1)
58 add r0, r0, #1 << PMD_ORDER
59 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
60 1: cmp r0, r6
61 add r3, r3, #1 << SECTION_SHIFT
62 strls r3, [r0], #1 << PMD_ORDER
1     .ltorg
2 .align
3 __enable_mmu_loc:
4 .long .
5 .long __enable_mmu
6 .long __enable_mmu_end

 

 1 /*
2 * Then map boot params address in r2 or
3 * the first 1MB of ram if boot params address is not specified.
        启动参数的偏移映射,第一1m空间,r2=boot params_phys,r8=phys_offset
4 */
5 mov r0, r2, lsr #SECTION_SHIFT
6 movs r0, r0, lsl #SECTION_SHIFT @清空了r0 1m
7 moveq r0, r8 @如果是0,也就是没有指定boot params则放入r8,即内存其实地址0x3------
8 sub r3, r0, r8 @得到r0与r8的差,如果没指定则差为0,如果指定了得出个偏移
 9     add    r3, r3, #PAGE_OFFSET         @差值+内存基地址,得到实际参数地址
10 add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) @做映射,直接映射,无偏移
11 orr r6, r7, r0
12 str r6, [r3]

3.0选择性将enablemmu和boot params双映射,并不是以前机械的前8M双映射

1 mov    pc, lr


回到

 1     /*
2 * The following calls CPU specific code in a position independent
3 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
4 * xxx_proc_info structure selected by __lookup_processor_type
5 * above. On return, the CPU will be ready for the MMU to be
6 * turned on, and r0 will hold the CPU control register value.
7 */
8 ldr r13, =__mmap_switched @ address to jump to after
9 @ mmu has been enabled
10 adr lr, BSYM(1f) @ return (PIC) address
11 mov r8, r4 @ set TTBR1 to swapper_pg_dir
12 ARM( add pc, r10, #PROCINFO_INITFUNC )
13 THUMB( add r12, r10, #PROCINFO_INITFUNC )
14 THUMB( mov pc, r12 )
15 1: b __enable_mmu

进入enable_mmu

 1 /*
2 * Setup common bits before finally enabling the MMU. Essentially
3 * this is just loading the page table pointer and domain access
4 * registers.
5 *
6 * r0 = cp#15 control register
7 * r1 = machine ID
8 * r2 = atags or dtb pointer
9 * r4 = page table pointer
10 * r9 = processor ID
11 * r13 = *virtual* address to jump to upon completion
12 */
13 __enable_mmu:
14 #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
15 orr r0, r0, #CR_A
16 #else
17 bic r0, r0, #CR_A
18 #endif
19 #ifdef CONFIG_CPU_DCACHE_DISABLE
20 bic r0, r0, #CR_C
21 #endif
22 #ifdef CONFIG_CPU_BPREDICT_DISABLE
23 bic r0, r0, #CR_Z
24 #endif
25 #ifdef CONFIG_CPU_ICACHE_DISABLE
26 bic r0, r0, #CR_I
27 #endif
28 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
29 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
30 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
31 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
32 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
33 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
34 b __turn_mmu_on
35 ENDPROC(__enable_mmu)
 1 /*
2 * Enable the MMU. This completely changes the structure of the visible
3 * memory space. You will not be able to trace execution through this.
4 * If you have an enquiry about this, *please* check the linux-arm-kernel
5 * mailing list archives BEFORE sending another post to the list.
6 *
7 * r0 = cp#15 control register
8 * r1 = machine ID
9 * r2 = atags or dtb pointer
10 * r9 = processor ID
11 * r13 = *virtual* address to jump to upon completion
12 *
13 * other registers depend on the function called upon completion
14 */
15 .align 5
16 __turn_mmu_on:
17 mov r0, r0
18 mcr p15, 0, r0, c1, c0, 0 @ write control reg
19 mrc p15, 0, r3, c0, c0, 0 @ read id reg
20 mov r3, r3
21 mov r3, r13 //跳回r13,=__mmap_switched
22 mov pc, r3
23 __enable_mmu_end:
24 ENDPROC(__turn_mmu_on)


#arch/arm/kernel/head-common.s

 1 /*
2 * The following fragment of code is executed with the MMU on in MMU mode,
3 * and uses absolute addresses; this is not position independent.
4 *
5 * r0 = cp#15 control register
6 * r1 = machine ID
7 * r2 = atags/dtb pointer
8 * r9 = processor ID
9 */
10 __INIT
11 __mmap_switched:
12 adr r3, __mmap_switched_data
13
14 ldmia r3!, {r4, r5, r6, r7}
15 cmp r4, r5 @ Copy data segment if needed
16 1: cmpne r5, r6
17 ldrne fp, [r4], #4
18 strne fp, [r5], #4
19 bne 1b
20
21 mov fp, #0 @ Clear BSS (and zero fp)
22 1: cmp r6, r7
23 strcc fp, [r6],#4
24 bcc 1b
25
26 ARM( ldmia r3, {r4, r5, r6, r7, sp})
27 THUMB( ldmia r3, {r4, r5, r6, r7} )
28 THUMB( ldr sp, [r3, #16] )
29 str r9, [r4] @ Save processor ID
30 str r1, [r5] @ Save machine type
31 str r2, [r6] @ Save atags pointer
32 bic r4, r0, #CR_A @ Clear 'A' bit
33 stmia r7, {r0, r4} @ Save control register values
34 b start_kernel
35 ENDPROC(__mmap_switched)

绝对地址操作,copy data,bss清空,跳入start_kernel,进入c环境。







 



posted on 2012-01-14 15:14  gdt-A20  阅读(2036)  评论(0编辑  收藏  举报