Understanding Kdump (Executing Part)

Capture kernel brings up when panic happens. The specific location in source code and the routine will be analyzed in this blog.

69 void panic(const char *fmt, ...)
 70 {
 71         static DEFINE_SPINLOCK(panic_lock);
 72         static char buf[1024];
 73         va_list args;
 74         long i, i_next = 0;
 75         int state = 0;
 76
 77         /*
 78          * It's possible to come here directly from a panic-assertion and
 79          * not have preempt disabled. Some functions called from here want
 80          * preempt to be disabled. No point enabling it later though...
 81          *
 82          * Only one CPU is allowed to execute the panic code from here. For
 83          * multiple parallel invocations of panic, all other CPUs either
 84          * stop themself or will wait until they are stopped by the 1st CPU
 85          * with smp_send_stop().
 86          */
 87         if (!spin_trylock(&panic_lock))
 88                 panic_smp_self_stop();
 89
 90         console_verbose();
 91         bust_spinlocks(1);
 92         va_start(args, fmt);
 93         vsnprintf(buf, sizeof(buf), fmt, args);
 94         va_end(args);
 95         printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
 96 #ifdef CONFIG_DEBUG_BUGVERBOSE
 97         /*
 98          * Avoid nested stack-dumping if a panic occurs during oops processing
 99          */
100         if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
101                 dump_stack();
102 #endif
103
104         /*
105          * If we have crashed and we have a crash kernel loaded let it handle
106          * everything else.
107          * Do we want to call this before we try to display a message?
108          */
109         crash_kexec(NULL);
painc->crash_kexec:
1081 void crash_kexec(struct pt_regs *regs)
1082 {
1083         /* Take the kexec_mutex here to prevent sys_kexec_load
1084          * running on one cpu from replacing the crash kernel
1085          * we are using after a panic on a different cpu.
1086          *
1087          * If the crash kernel was not located in a fixed area
1088          * of memory the xchg(&kexec_crash_image) would be
1089          * sufficient.  But since I reuse the memory...
1090          */
1091         if (mutex_trylock(&kexec_mutex)) {
1092                 if (kexec_crash_image) {
1093                         struct pt_regs fixed_regs;
1094
1095                         crash_setup_regs(&fixed_regs, regs);
1096                         crash_save_vmcoreinfo();
1097                         machine_crash_shutdown(&fixed_regs);
1098                         machine_kexec(kexec_crash_image);
1099                 }
1100                 mutex_unlock(&kexec_mutex);
1101         }
1102 }

From the post, when a capture kernel is loaded into memory, kexec_crash_image is be filled. There are 4 steps, which each step corresponds with each line.
1.       crash_setup_regs(). Save the panic spot’s register. regs is the parameter of panic, and it is passed by exception process handler. Here, the regs will be stored to fixed_regs.
2.       crash_save_vmcoreinfo(). Recording the time now to vmcoreinfo_data[]. Then transfer to vmcoreinfo_note[].It's address will be got by reading /sys/kernel/vmcoreinfo.
3.       machine_crash_shutdown(). Save the fixed_regs to crash_notes[], which will be read by “/sys/devices/system/cpu/cpu0/crash_notes”. Then disable irq by machine_kexec_mask_interrupts().
4.       machine_kexec(). Copy relocate_new_kernel code to reboot_code_buffer. Then give the control to reboot_code_buffer.

painc->crash_kexec->machine_kexec
 82 void machine_kexec(struct kimage *image)
 83 {
 84         unsigned long page_list;
 85         unsigned long reboot_code_buffer_phys;
 86         void *reboot_code_buffer;
 87
 88
 89         page_list = image->head & PAGE_MASK;
 90
 91         /* we need both effective and real address here */
 92         reboot_code_buffer_phys =
 93             page_to_pfn(image->control_code_page) << PAGE_SHIFT;
 94         reboot_code_buffer = page_address(image->control_code_page);
 95
 96         /* Prepare parameters for reboot_code_buffer*/
 97         kexec_start_address = image->start;
 98         kexec_indirection_page = page_list;
 99         kexec_mach_type = machine_arch_type;
100         kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
101
102         /* copy our kernel relocation code to the control code page */
103         memcpy(reboot_code_buffer,
104                relocate_new_kernel, relocate_new_kernel_size);
105
106
107         flush_icache_range((unsigned long) reboot_code_buffer,
108                            (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
109         printk(KERN_INFO "Bye!\n");
110
111         if (kexec_reinit)
112                 kexec_reinit();
113
114         soft_restart(reboot_code_buffer_phys);
115 }
painc->crash_kexec->machine_kexec->soft_restart

131 void soft_restart(unsigned long addr)
132 {
133         u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
134
135         /* Disable interrupts first */
136         local_irq_disable();
137         local_fiq_disable();
138
139         /* Disable the L2 if we're the last man standing. */
140         if (num_online_cpus() == 1)
141                 outer_disable();
142
143         /* Change to the new stack and continue with the reset. */
144         call_with_stack(__soft_restart, (void *)addr, (void *)stack);
145
146         /* Should never get here. */
147         BUG();
148 }

painc->crash_kexec->machine_kexec->soft_restart->call_with_stack

 24 /*
 25  * void call_with_stack(void (*fn)(void *), void *arg, void *sp)
 26  *
 27  * Change the stack to that pointed at by sp, then invoke fn(arg) with
 28  * the new stack.
 29  */
 30 ENTRY(call_with_stack)
 31         str     sp, [r2, #-4]!
 32         str     lr, [r2, #-4]!
 33
 34         mov     sp, r2
 35         mov     r2, r0
 36         mov     r0, r1
 37
 38         adr     lr, BSYM(1f)
 39         mov     pc, r2
 40
 41 1:      ldr     lr, [sp]
 42         ldr     sp, [sp, #4]
 43         mov     pc, lr
 44 ENDPROC(call_with_stack)

painc->crash_kexec->machine_kexec->soft_restart->call_with_stack->__soft_restart

107 static void __soft_restart(void *addr)
108 {
109         phys_reset_t phys_reset;
110
111         /* Take out a flat memory mapping. */
112         setup_mm_for_reboot();
113
114         /* Clean and invalidate caches */
115         flush_cache_all();
116
117         /* Turn off caching */
118         cpu_proc_fin();
119
120         /* Push out any further dirty data, and ensure cache is empty */
121         flush_cache_all();
122
123         /* Switch to the identity mapping. */
124         phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
125         phys_reset((unsigned long)addr);
126
127         /* Should never get here. */
128         BUG();
129 }
painc->crash_kexec->machine_kexec->soft_restart->call_with_stack->__soft_restart->cpu_v7_reset

 41  *      cpu_v7_reset(loc)
 42  *
 43  *      Perform a soft reset of the system. Put the CPU into the
 44  *      same state as it would be if it had been reset, and branch
 45  *      to what would be the reset vector.
 46  *
 47  *      - loc   - location to jump to for soft reset
 48  *
 49  *      This code must be executed using a flat identity mapping with
 50  *      caches disabled.
 51  */
 52         .align  5
 53         .pushsection    .idmap.text, "ax"
 54 ENTRY(cpu_v7_reset)
 55         mrc     p15, 0, r1, c1, c0, 0           @ ctrl register
 56         bic     r1, r1, #0x1                    @ ...............m
 57  THUMB( bic     r1, r1, #1 << 30 )              @ SCTLR.TE (Thumb exceptions)
 58         mcr     p15, 0, r1, c1, c0, 0           @ disable MMU
 59         isb
 60         mov     pc, r0
 61 ENDPROC(cpu_v7_reset)

painc->crash_kexec->machine_kexec->soft_restart->call_with_stack->__soft_restart->cpu_v7_reset-> relocate_new_kernel

  7         .globl relocate_new_kernel
  8 relocate_new_kernel:
  9
 10         ldr     r0,kexec_indirection_page //image->head is 0
 11         ldr     r1,kexec_start_address  // “image->start” is reserved memory start+32KB.
 12
 13         /*
 14          * If there is no indirection page (we are doing crashdumps)
 15          * skip any relocation.
 16          */
 17         cmp     r0, #0
 18         beq     2f
 19
 20 0:      /* top, read another word for the indirection page */
 21         ldr     r3, [r0],#4
 22
 23         /* Is it a destination page. Put destination address to r4 */
 24         tst     r3,#1,0
 25         beq     1f
 26         bic     r4,r3,#1
 27         b       0b
 28 1:
 29         /* Is it an indirection page */
 30         tst     r3,#2,0
 31         beq     1f
 32         bic     r0,r3,#2
 33         b       0b
 34 1:
 35
 36         /* are we done ? */
 37         tst     r3,#4,0
 38         beq     1f
 39         b       2f
 40
 41 1:
 42         /* is it source ? */
 43         tst     r3,#8,0
 44         beq     0b
 45         bic r3,r3,#8
 46         mov r6,#1024
 47 9:
 48         ldr r5,[r3],#4
 49         str r5,[r4],#4
 50         subs r6,r6,#1
 51         bne 9b
 52         b 0b
 53
 54 2:
 55         /* Jump to relocated kernel */
 56         mov lr,r1
 57         mov r0,#0
 58         ldr r1,kexec_mach_type
 59         ldr r2,kexec_boot_atags
 60  ARM(   mov pc, lr      )
 61  THUMB( bx lr           )

Actual code for kdump feature in relocate_new_kernel is Line 10~18 and Line 54~60.
When pc move to capture kernel’s start address, the registers value lists below.
r0:0
r1: MACH_TYPE_INTEGRATOR
r2:Reserved memory start+1KB
PC:Reserved memory start+32KB

In a word, before soft reboot, system will

1.       IRQ&FIQ disable (in soft_restart)
2.       Turn off cache (in __soft_restart)
3.       Disable MMU(in __soft_restart).

Meanwhile, because of the cache synchronization, flush cache will be necessary.

NOTE, atags elfcorehdr and capture kernel image are loaded by sys_kexec_load.

评论

此博客中的热门博文

提交了30次才AC ---【附】POJ 2488解题报告

n个进程共享m个资源得死锁问题证明