linux中断之二(s5pv210)

时间:2021-10-02 01:02:42

异常向量初始化

在init/main.c中的start_kernel()函数中:

asmlinkage void __init start_kernel(void)
{
char * command_line;
extern struct kernel_param __start___param[], __stop___param[];

smp_setup_processor_id();

/*
* Need to run as early as possible, to initialize the
* lockdep hash:
*/
lockdep_init();
debug_objects_early_init();

/*
* Set up the the initial canary ASAP:
*/
boot_init_stack_canary();

cgroup_init_early();

local_irq_disable();
early_boot_irqs_off();
early_init_irq_lock_class();

/*
* Interrupts are still disabled. Do necessary setups, then
* enable them
*/
lock_kernel();
tick_init();
boot_cpu_init();
page_address_init();
printk(KERN_NOTICE "%s", linux_banner);
setup_arch(&command_line);
mm_init_owner(&init_mm, &init_task);
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
smp_prepare_boot_cpu();/* arch-specific boot-cpu hooks */

build_all_zonelists(NULL);
page_alloc_init();

printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
parse_early_param();
parse_args("Booting kernel", static_command_line, __start___param,
__stop___param - __start___param,
&unknown_bootoption);
/*
* These use large bootmem allocations and must precede
* kmem_cache_init()
*/
pidhash_init();
vfs_caches_init_early();
sort_main_extable();
trap_init();
mm_init();
/*
* Set up the scheduler prior starting any interrupts (such as the
* timer interrupt). Full topology setup happens at smp_init()
* time - but meanwhile we still have a functioning scheduler.
*/
sched_init();
/*
* Disable preemption - early bootup scheduling is extremely
* fragile until we cpu_idle() for the first time.
*/
preempt_disable();
if (!irqs_disabled()) {
printk(KERN_WARNING "start_kernel(): bug: interrupts were "
"enabled *very* early, fixing it\n");
local_irq_disable();
}
rcu_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
init_IRQ();
prio_tree_init();
init_timers();
hrtimers_init();
softirq_init();
timekeeping_init();
time_init();
profile_init();
if (!irqs_disabled())
printk(KERN_CRIT "start_kernel(): bug: interrupts were "
"enabled early\n");
early_boot_irqs_on();
local_irq_enable();

/* Interrupts are enabled now so all GFP allocations are safe. */
gfp_allowed_mask = __GFP_BITS_MASK;

kmem_cache_init_late();

/*
* HACK ALERT! This is early. We're enabling the console before
* we've done PCI setups etc, and console_init() must be aware of
* this. But we do want output early, in case something goes wrong.
*/
console_init();
if (panic_later)
panic(panic_later, panic_param);

lockdep_info();

/*
* Need to run this when irqs are enabled, because it wants
* to self-test [hard/soft]-irqs on/off lock inversion bugs
* too:
*/
locking_selftest();

#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start && !initrd_below_start_ok &&
page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
"disabling it.\n",
page_to_pfn(virt_to_page((void *)initrd_start)),
min_low_pfn);
initrd_start = 0;
}
#endif
page_cgroup_init();
enable_debug_pagealloc();
kmemtrace_init();
kmemleak_init();
debug_objects_mem_init();
idr_init_cache();
setup_per_cpu_pageset();
numa_policy_init();
if (late_time_init)
late_time_init();
sched_clock_init();
calibrate_delay();
pidmap_init();
anon_vma_init();
#ifdef CONFIG_X86
if (efi_enabled)
efi_enter_virtual_mode();
#endif
thread_info_cache_init();
cred_init();
fork_init(totalram_pages);
proc_caches_init();
buffer_init();
key_init();
security_init();
dbg_late_init();
vfs_caches_init(totalram_pages);
signals_init();
/* rootfs populating might need page-writeback */
page_writeback_init();
#ifdef CONFIG_PROC_FS
proc_root_init();
#endif
cgroup_init();
cpuset_init();
taskstats_init_early();
delayacct_init();

check_bugs();

acpi_early_init(); /* before LAPIC and SMP init */
sfi_init_late();

ftrace_init();

/* Do the rest non-__init'ed, we're now alive */
rest_init();
}

在setup_arch(&command_line)中

该函数位于:arch/arm/kernel/setup.c中

static int __init customize_machine(void)
{
/* customizes platform devices, or adds new ones */
if (init_machine)
init_machine();
return 0;
}
arch_initcall(customize_machine);

void __init setup_arch(char **cmdline_p)
{
struct tag *tags = (struct tag *)&init_tags;
struct machine_desc *mdesc;
char *from = default_command_line;

unwind_init();

setup_processor();
mdesc = setup_machine(machine_arch_type);
machine_name = mdesc->name;

if (mdesc->soft_reboot)
reboot_setup("s");

if (__atags_pointer)
tags = phys_to_virt(__atags_pointer);
else if (mdesc->boot_params)
tags = phys_to_virt(mdesc->boot_params);

/*
* If we have the old style parameters, convert them to
* a tag list.
*/
if (tags->hdr.tag != ATAG_CORE)
convert_to_tag_list(tags);
if (tags->hdr.tag != ATAG_CORE)
tags = (struct tag *)&init_tags;

if (mdesc->fixup)
mdesc->fixup(mdesc, tags, &from, &meminfo);

if (tags->hdr.tag == ATAG_CORE) {
if (meminfo.nr_banks != 0)
squash_mem_tags(tags);
save_atags(tags);
parse_tags(tags);
}

init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;

/* parse_early_param needs a boot_command_line */
strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);

/* populate cmd_line too for later use, preserving boot_command_line */
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;

parse_early_param();

paging_init(mdesc);
request_standard_resources(&meminfo, mdesc);

#ifdef CONFIG_SMP
smp_init_cpus();
#endif

cpu_init();
tcm_init();

/*
* Set up various architecture-specific pointers
*/
init_arch_irq = mdesc->init_irq;
system_timer = mdesc->timer;
init_machine = mdesc->init_machine;

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
#endif
early_trap_init();
}

调用early_trap_init();
arch/arm/kernel/traps.c中

void __init early_trap_init(void)
{
unsigned long vectors = CONFIG_VECTORS_BASE;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;

/*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);

/*
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
sizeof(sigreturn_codes));
memcpy((void *)KERN_RESTART_CODE, syscall_restart_code,
sizeof(syscall_restart_code));

flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}

该函数的作用是将中断向量表复制到0x00000000或者oxffff0000处,该文件中是复制到0xffff0000中.

CONFIG_VECTORS_BASE=0XFFFF0000

在include/generated/autoconfig.h中可以找到这个定义

...
#define CONFIG_RT_MUTEXES 1
#define CONFIG_VECTORS_BASE 0xffff0000
#define CONFIG_NETFILTER_XT_TARGET_MARK 1
#define CONFIG_VIDEO_CX231XX_DVB_MODULE 1
...

在arch/arm/kernel/entry-armv.S中

.equstubs_offset, __vectors_start + 0x200 - __stubs_start

.globl__vectors_start
__vectors_start:
ARM(swiSYS_ERROR0)
THUMB(svc#0)
THUMB(nop)
W(b)vector_und + stubs_offset
W(ldr)pc, .LCvswi + stubs_offset
W(b)vector_pabt + stubs_offset
W(b)vector_dabt + stubs_offset
W(b)vector_addrexcptn + stubs_offset
W(b)vector_irq + stubs_offset
W(b)vector_fiq + stubs_offset

.globl__vectors_end
__vectors_end:


__stubs_start:
/*
* Interrupt dispatcher
*/
vector_stubirq, IRQ_MODE, 4

.long__irq_usr@ 0 (USR_26 / USR_32)
.long__irq_invalid@ 1 (FIQ_26 / FIQ_32)
.long__irq_invalid@ 2 (IRQ_26 / IRQ_32)
.long__irq_svc@ 3 (SVC_26 / SVC_32)
.long__irq_invalid@ 4
.long__irq_invalid@ 5
.long__irq_invalid@ 6
.long__irq_invalid@ 7
.long__irq_invalid@ 8
.long__irq_invalid@ 9
.long__irq_invalid@ a
.long__irq_invalid@ b
.long__irq_invalid@ c
.long__irq_invalid@ d
.long__irq_invalid@ e
.long__irq_invalid@ f

/*
* Data abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
vector_stubdabt, ABT_MODE, 8

.long__dabt_usr@ 0 (USR_26 / USR_32)
.long__dabt_invalid@ 1 (FIQ_26 / FIQ_32)
.long__dabt_invalid@ 2 (IRQ_26 / IRQ_32)
.long__dabt_svc@ 3 (SVC_26 / SVC_32)
.long__dabt_invalid@ 4
.long__dabt_invalid@ 5
.long__dabt_invalid@ 6
.long__dabt_invalid@ 7
.long__dabt_invalid@ 8
.long__dabt_invalid@ 9
.long__dabt_invalid@ a
.long__dabt_invalid@ b
.long__dabt_invalid@ c
.long__dabt_invalid@ d
.long__dabt_invalid@ e
.long__dabt_invalid@ f

/*
* Prefetch abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
vector_stubpabt, ABT_MODE, 4

.long__pabt_usr@ 0 (USR_26 / USR_32)
.long__pabt_invalid@ 1 (FIQ_26 / FIQ_32)
.long__pabt_invalid@ 2 (IRQ_26 / IRQ_32)
.long__pabt_svc@ 3 (SVC_26 / SVC_32)
.long__pabt_invalid@ 4
.long__pabt_invalid@ 5
.long__pabt_invalid@ 6
.long__pabt_invalid@ 7
.long__pabt_invalid@ 8
.long__pabt_invalid@ 9
.long__pabt_invalid@ a
.long__pabt_invalid@ b
.long__pabt_invalid@ c
.long__pabt_invalid@ d
.long__pabt_invalid@ e
.long__pabt_invalid@ f

/*
* Undef instr entry dispatcher
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*/
vector_stubund, UND_MODE

.long__und_usr@ 0 (USR_26 / USR_32)
.long__und_invalid@ 1 (FIQ_26 / FIQ_32)
.long__und_invalid@ 2 (IRQ_26 / IRQ_32)
.long__und_svc@ 3 (SVC_26 / SVC_32)
.long__und_invalid@ 4
.long__und_invalid@ 5
.long__und_invalid@ 6
.long__und_invalid@ 7
.long__und_invalid@ 8
.long__und_invalid@ 9
.long__und_invalid@ a
.long__und_invalid@ b
.long__und_invalid@ c
.long__und_invalid@ d
.long__und_invalid@ e
.long__und_invalid@ f

.align5

/*=============================================================================
* Undefined FIQs
*-----------------------------------------------------------------------------
* Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
* MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
* Basically to switch modes, we *HAVE* to clobber one register... brain
* damage alert! I don't think that we can execute any code in here in any
* other mode than FIQ... Ok you can switch to another mode, but you can't
* get out of that mode without clobbering one register.
*/
vector_fiq:
disable_fiq
subspc, lr, #4

/*=============================================================================
* Address exception handler
*-----------------------------------------------------------------------------
* These aren't too critical.
* (they're not supposed to happen, and won't happen in 32-bit data mode).
*/

vector_addrexcptn:
bvector_addrexcptn

/*
* We group all the following data together to optimise
* for CPUs with separate I & D caches.
*/
.align5

.LCvswi:
.wordvector_swi

.globl__stubs_end
__stubs_end:


__kuser_helper_start:

/*
* Reference prototype:
*
*void __kernel_memory_barrier(void)
*
* Input:
*
*lr = return address
*
* Output:
*
*none
*
* Clobbered:
*
*none
*
* Definition and user space usage example:
*
*typedef void (__kernel_dmb_t)(void);
*#define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
*
* Apply any needed memory barrier to preserve consistency with data modified
* manually and __kuser_cmpxchg usage.
*
* This could be used as follows:
*
* #define __kernel_dmb() \
* asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
* : : : "r0", "lr","cc" )
*/

__kuser_memory_barrier:@ 0xffff0fa0
smp_dmb
usr_retlr

.align5

/*
* Reference prototype:
*
*int __kernel_cmpxchg(int oldval, int newval, int *ptr)
*
* Input:
*
*r0 = oldval
*r1 = newval
*r2 = ptr
*lr = return address
*
* Output:
*
*r0 = returned value (zero or non-zero)
*C flag = set if r0 == 0, clear if r0 != 0
*
* Clobbered:
*
*r3, ip, flags
*
* Definition and user space usage example:
*
*typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
*#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
*
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
* Return zero if *ptr was changed or non-zero if no exchange happened.
* The C flag is also set if *ptr was changed to allow for assembly
* optimization in the calling code.
*
* Notes:
*
* - This routine already includes memory barriers as needed.
*
* For example, a user space atomic_add implementation could look like this:
*
* #define atomic_add(ptr, val) \
*({ register unsigned int *__ptr asm("r2") = (ptr); \
* register unsigned int __result asm("r1"); \
* asm volatile ( \
* "1: @ atomic_add\n\t" \
* "ldrr0, [r2]\n\t" \
* "movr3, #0xffff0fff\n\t" \
* "addlr, pc, #4\n\t" \
* "addr1, r0, %2\n\t" \
* "addpc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
* "bcc1b" \
* : "=&r" (__result) \
* : "r" (__ptr), "rIL" (val) \
* : "r0","r3","ip","lr","cc","memory" ); \
* __result; })
*/

__kuser_cmpxchg:@ 0xffff0fc0

#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)

/*
* Poor you. No fast solution possible...
* The kernel itself must perform the operation.
* A special ghost syscall is used for that (see traps.c).
*/
stmfdsp!, {r7, lr}
ldrr7, =1f@ it's 20 bits
swi__ARM_NR_cmpxchg
ldmfdsp!, {r7, pc}
1:.word__ARM_NR_cmpxchg

#elif __LINUX_ARM_ARCH__ < 6

#ifdef CONFIG_MMU

/*
* The only thing that can break atomicity in this cmpxchg
* implementation is either an IRQ or a data abort exception
* causing another process/thread to be scheduled in the middle
* of the critical sequence. To prevent this, code is added to
* the IRQ and data abort exception handlers to set the pc back
* to the beginning of the critical section if it is found to be
* within that critical section (see kuser_cmpxchg_fixup).
*/
1:ldrr3, [r2]@ load current val
subsr3, r3, r0@ compare with oldval
2:streqr1, [r2]@ store newval if eq
rsbsr0, r3, #0@ set return val and C flag
usr_retlr

.text
kuser_cmpxchg_fixup:
@ Called from kuser_cmpxchg_check macro.
@ r2 = address of interrupted insn (must be preserved).
@ sp = saved regs. r7 and r8 are clobbered.
@ 1b = first critical insn, 2b = last critical insn.
@ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
movr7, #0xffff0fff
subr7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
subsr8, r2, r7
rsbcssr8, r8, #(2b - 1b)
strcsr7, [sp, #S_PC]
movpc, lr
.previous

#else
#warning "NPTL on non MMU needs fixing"
movr0, #-1
addsr0, r0, #0
usr_retlr
#endif

#else

smp_dmb
1:ldrexr3, [r2]
subsr3, r3, r0
strexeqr3, r1, [r2]
teqeqr3, #1
beq1b
rsbsr0, r3, #0
/* beware -- each __kuser slot must be 8 instructions max */
#ifdef CONFIG_SMP
b__kuser_memory_barrier
#else
usr_retlr
#endif

#endif

.align5

/*
* Reference prototype:
*
*int __kernel_get_tls(void)
*
* Input:
*
*lr = return address
*
* Output:
*
*r0 = TLS value
*
* Clobbered:
*
*none
*
* Definition and user space usage example:
*
*typedef int (__kernel_get_tls_t)(void);
*#define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
*
* Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
*
* This could be used as follows:
*
* #define __kernel_get_tls() \
*({ register unsigned int __val asm("r0"); \
* asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
* : "=r" (__val) : : "lr","cc" ); \
* __val; })
*/

__kuser_get_tls:@ 0xffff0fe0

#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
ldrr0, [pc, #(16 - 8)]@ TLS stored at 0xffff0ff0
#else
mrcp15, 0, r0, c13, c0, 3@ read TLS register
#endif
usr_retlr

.rep5
.word0@ pad up to __kuser_helper_version
.endr

/*
* Reference declaration:
*
*extern unsigned int __kernel_helper_version;
*
* Definition and user space usage example:
*
*#define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
*
* User space may read this to determine the curent number of helpers
* available.
*/

__kuser_helper_version:@ 0xffff0ffc
.word((__kuser_helper_end - __kuser_helper_start) >> 5)

.globl__kuser_helper_end
__kuser_helper_end:

宏定义

/*
* Vector stubs.
*
* This code is copied to 0xffff0200 so we can use branches in the
* vectors, rather than ldr's. Note that this code must not
* exceed 0x300 bytes.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*
* SP points to a minimal amount of processor-private memory, the address
* of which is copied into r0 for the mode specific abort handler.
*/
.macrovector_stub, name, mode, correction=0
.align5

vector_\name:
.if \correction
sublr, lr, #\correction
.endif

@
@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
@ (parent CPSR)
@
stmiasp, {r0, lr}@ save r0, lr
mrslr, spsr
strlr, [sp, #8]@ save spsr

@
@ Prepare for SVC32 mode. IRQs remain disabled.
@
mrsr0, cpsr
eorr0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
msrspsr_cxsf, r0

@
@ the branch table must immediately follow this code
@
andlr, lr, #0x0f
THUMB(adrr0, 1f)
THUMB(ldrlr, [r0, lr, lsl #2])
movr0, sp
ARM(ldrlr, [pc, lr, lsl #2])
movspc, lr@ branch to handler in SVC mode
ENDPROC(vector_\name)

.align2
@ handler addresses follow this label
1:
.endm

memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); 这句就中的__stubs_start到__stubs_end的区域就是调用上面分析的宏展开的跳转代码的地方,将这一段地址复制 的跳转表代码段复制到vectors + 0x200地址。 关于stubs_offset的分析, .equstubs_offset, __vectors_start + 0x200 - __stubs_start 引用网上摘抄的一段话, 当汇编器看到B指令后会把要跳转的标签转化为相对于当前PC的偏移量(±32M)写入指令码。从上面的代码可以看到中断向量表和stubs都发生了代码搬移,所以如果中断向量表中仍然写成b vector_irq,那么实际执行的时候就无法跳转到搬移后的vector_irq处,因为指令码里写的是原来的偏移量,所以需要把指令码中的偏移量写成搬移后的。我们把搬移前的中断向量表中的irq入口地址记irq_PC,它在中断向量表的偏移量就是irq_PC-vectors_start, vector_irq在stubs中的偏移量是vector_irq-stubs_start,这两个偏移量在搬移前后是不变的。搬移后 vectors_start在0xffff0000处,而stubs_start在0xffff0200处,所以搬移后的vector_irq相对于中断 向量中的中断入口地址的偏移量就是,200+vector_irq在stubs中的偏移量再减去中断入口在向量表中的偏移量,即200+ vector_irq-stubs_start-irq_PC+vectors_start = (vector_irq-irq_PC) + vectors_start+200-stubs_start,对于括号内的值实际上就是中断向量表中写的vector_irq,减去irq_PC是由汇编器完成的,而后面的 vectors_start+200-stubs_start就应该是stubs_offset,实际上在entry-armv.S中也是这样定义的。

中断发生时,就会进入上面的这个表,然后判断是进入那个irq.
W(b)vector_irq + stubs_offset
vector_stubirq, IRQ_MODE, 4.long__irq_usr@  0  (USR_26 / USR_32).long__irq_invalid@  1  (FIQ_26 / FIQ_32).long__irq_invalid@  2  (IRQ_26 / IRQ_32).long__irq_svc@  3  (SVC_26 / SVC_32).long__irq_invalid@  4.long__irq_invalid@  5.long__irq_invalid@  6.long__irq_invalid@  7.long__irq_invalid@  8.long__irq_invalid@  9.long__irq_invalid@  a.long__irq_invalid@  b.long__irq_invalid@  c.long__irq_invalid@  d.long__irq_invalid@  e.long__irq_invalid@  f






分析一下__irq_usr

@
@ IRQs on, then call the main handler
@
enable_irq
movr2, sp
adrlr, BSYM(ret_from_exception)
bdo_DataAbort
UNWIND(.fnend)
ENDPROC(__dabt_usr)

.align5
__irq_usr:
usr_entry@宏
kuser_cmpxchg_check

get_thread_info tsk
#ifdef CONFIG_PREEMPT
ldrr8, [tsk, #TI_PREEMPT]@ get preempt count
addr7, r8, #1@ increment it
strr7, [tsk, #TI_PREEMPT]
#endif

irq_handler@宏
#ifdef CONFIG_PREEMPT
ldrr0, [tsk, #TI_PREEMPT]
strr8, [tsk, #TI_PREEMPT]
teqr0, r7
ARM(strner0, [r0, -r0])
THUMB(movner0, #0)
THUMB(strner0, [r0])
#endif

movwhy, #0
bret_to_user
UNWIND(.fnend)
ENDPROC(__irq_usr)

*
* Interrupt handling. Preserves r7, r8, r9
*/
.macroirq_handler
get_irqnr_preamble r5, lr
1:get_irqnr_and_base r0, r6, r5, lr
movner1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrnelr, BSYM(1b)
bneasm_do_IRQ

#ifdef CONFIG_SMP
/*
* XXX
*
* this macro assumes that irqstat (r6) and base (r5) are
* preserved from get_irqnr_and_base above
*/
test_for_ipi r0, r6, r5, lr
movner0, sp
adrnelr, BSYM(1b)
bnedo_IPI

#ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r6, r5, lr
movner0, sp
adrnelr, BSYM(1b)
bnedo_local_timer
#endif
#endif

.endm

在文件文件arch/arm/kernel/iqr.c中


/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
* own 'handler'
*/
asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);

irq_enter();

/*
* Some hardware gives randomly wrong interrupts. Rather
* than crashing, do something sensible.
*/
if (unlikely(irq >= NR_IRQS)) {
if (printk_ratelimit())
printk(KERN_WARNING "Bad IRQ%u\n", irq);
ack_bad_irq(irq);
} else {
generic_handle_irq(irq);
}

/* AT91 specific workaround */
irq_finish(irq);

irq_exit();
set_irq_regs(old_regs);
}

在include/linux/irq.h中


static inline void generic_handle_irq(unsigned int irq)
{
generic_handle_irq_desc(irq, irq_to_desc(irq));
}
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc){#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQdesc->handle_irq(irq, desc);#elseif (likely(desc->handle_irq))desc->handle_irq(irq, desc);else__do_IRQ(irq);#endif}

在kernel/irq.c中

/**
* __do_IRQ - original all in one highlevel IRQ handler
* @irq:the interrupt number
*
* __do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*
* This is the original x86 implementation which is used for every
* interrupt type.
*/
unsigned int __do_IRQ(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
unsigned int status;

kstat_incr_irqs_this_cpu(irq, desc);

if (CHECK_IRQ_PER_CPU(desc->status)) {
irqreturn_t action_ret;

/*
* No locking required for CPU-local interrupts:
*/
if (desc->chip->ack)
desc->chip->ack(irq);
if (likely(!(desc->status & IRQ_DISABLED))) {
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
}
desc->chip->end(irq);
return 1;
}

raw_spin_lock(&desc->lock);
if (desc->chip->ack)
desc->chip->ack(irq);
/*
* REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested
*/
status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
status |= IRQ_PENDING; /* we _want_ to handle it */

/*
* If the IRQ is disabled for whatever reason, we cannot
* use the action we have.
*/
action = NULL;
if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
action = desc->action;
status &= ~IRQ_PENDING; /* we commit to handling */
status |= IRQ_INPROGRESS; /* we are handling it */
}
desc->status = status;

/*
* If there is no IRQ handler or it was disabled, exit early.
* Since we set PENDING, if another processor is handling
* a different instance of this same irq, the other processor
* will take care of it.
*/
if (unlikely(!action))
goto out;

/*
* Edge triggered interrupts need to remember
* pending events.
* This applies to any hw interrupts that allow a second
* instance of the same irq to arrive while we are in do_IRQ
* or in the handler. But the code here only handles the _second_
* instance of the irq, not the third or fourth. So it is mostly
* useful for irq hardware that does not mask cleanly in an
* SMP environment.
*/
for (;;) {
irqreturn_t action_ret;

raw_spin_unlock(&desc->lock);

action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);

raw_spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
}
desc->status &= ~IRQ_INPROGRESS;

out:
/*
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
*/
desc->chip->end(irq);
raw_spin_unlock(&desc->lock);

return 1;
}

/**
* handle_IRQ_event - irq action chain handler
* @irq:the interrupt number
* @action:the interrupt action chain for this irq
*
* Handles the action chain of an irq event
*/
irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
{
irqreturn_t ret, retval = IRQ_NONE;
unsigned int status = 0;

do {
trace_irq_handler_entry(irq, action);
ret = action->handler(irq, action->dev_id);
trace_irq_handler_exit(irq, action, ret);

switch (ret) {
case IRQ_WAKE_THREAD:
/*
* Set result to handled so the spurious check
* does not trigger.
*/
ret = IRQ_HANDLED;

/*
* Catch drivers which return WAKE_THREAD but
* did not set up a thread function
*/
if (unlikely(!action->thread_fn)) {
warn_no_thread(irq, action);
break;
}

/*
* Wake up the handler thread for this
* action. In case the thread crashed and was
* killed we just pretend that we handled the
* interrupt. The hardirq handler above has
* disabled the device interrupt, so no irq
* storm is lurking.
*/
if (likely(!test_bit(IRQTF_DIED,
&action->thread_flags))) {
set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
wake_up_process(action->thread);
}

/* Fall through to add to randomness */
case IRQ_HANDLED:
status |= action->flags;
break;

default:
break;
}

retval |= ret;
action = action->next;
} while (action);

if (status & IRQF_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();

return retval;
}

中断注册↓