spinlock导读
==============================================
本文系本站原创,欢迎转载!转载请注明出处:http://www.cnblogs.com/gdt-a20
==============================================
看一下spinlock,以arm为例,
一、先看结构:
include/linux/spinlock_types.h
1 typedef struct spinlock {
2 union {
3 struct raw_spinlock rlock;
4
5 #ifdef CONFIG_DEBUG_LOCK_ALLOC //忽略debug部分
6 # define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
7 struct {
8 u8 __padding[LOCK_PADSIZE];
9 struct lockdep_map dep_map;
10 };
11 #endif
12 };
13 } spinlock_t;
1 typedef struct raw_spinlock {
2 arch_spinlock_t raw_lock; //arch相关结构
3 #ifdef CONFIG_GENERIC_LOCKBREAK
4 unsigned int break_lock;
5 #endif
6 #ifdef CONFIG_DEBUG_SPINLOCK
7 unsigned int magic, owner_cpu;
8 void *owner;
9 #endif
10 #ifdef CONFIG_DEBUG_LOCK_ALLOC
11 struct lockdep_map dep_map;
12 #endif
13 } raw_spinlock_t;
arch/arm/include/asm/spinlock_types.h
1 typedef struct {
2 volatile unsigned int lock;
3 } arch_spinlock_t;
二、spinlock操作
1.加锁 include/linux/spinlock.h
1 static inline void spin_lock(spinlock_t *lock)
2 {
3 raw_spin_lock(&lock->rlock);
4 }
1 #define raw_spin_lock(lock) _raw_spin_lock(lock)
kernel/spinlock.c
1 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
2 {
3 __raw_spin_lock(lock);
4 }
include/linux/spinlock_api_smp.h
1 static inline void __raw_spin_lock(raw_spinlock_t *lock)
2 {
3 preempt_disable(); //抢占点
4 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
5 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
6 }
include/linux/preempt.h
1 #ifdef CONFIG_PREEMPT_COUNT //支持才起作用,否则为空
2
3 #define preempt_disable() \
4 do { \
5 inc_preempt_count(); \ //增加抢占计数
6 barrier(); \ //
7 } while (0)
8
9 #define preempt_enable_no_resched() \
10 do { \
11 barrier(); \
12 dec_preempt_count(); \
13 } while (0)
14
15 #define preempt_enable() \
16 do { \
17 preempt_enable_no_resched(); \
18 barrier(); \
19 preempt_check_resched(); \
20 } while (0)
21
22 /* For debugging and tracer internals only! */
23 #define add_preempt_count_notrace(val) \
24 do { preempt_count() += (val); } while (0)
25 #define sub_preempt_count_notrace(val) \
26 do { preempt_count() -= (val); } while (0)
27 #define inc_preempt_count_notrace() add_preempt_count_notrace(1)
28 #define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
29
30 #define preempt_disable_notrace() \
31 do { \
32 inc_preempt_count_notrace(); \
33 barrier(); \
34 } while (0)
35
36 #define preempt_enable_no_resched_notrace() \
37 do { \
38 barrier(); \
39 dec_preempt_count_notrace(); \
40 } while (0)
41
42 /* preempt_check_resched is OK to trace */
43 #define preempt_enable_notrace() \
44 do { \
45 preempt_enable_no_resched_notrace(); \
46 barrier(); \
47 preempt_check_resched(); \
48 } while (0)
49
50 #else /* !CONFIG_PREEMPT_COUNT */
51
52 #define preempt_disable() do { } while (0)
53 #define preempt_enable_no_resched() do { } while (0)
54 #define preempt_enable() do { } while (0)
55
56 #define preempt_disable_notrace() do { } while (0)
57 #define preempt_enable_no_resched_notrace() do { } while (0)
58 #define preempt_enable_notrace() do { } while (0)
59
60 #endif /* CONFIG_PREEMPT_COUNT */
1 #define inc_preempt_count() add_preempt_count(1)
1 # define add_preempt_count(val) do { preempt_count() += (val); } while (0)
1 #define preempt_count() (current_thread_info()->preempt_count)
#compiler-gcc.h
1 /* Optimization barrier */
2 /* The "volatile" is due to gcc bugs */
3 #define barrier() __asm__ __volatile__("": : :"memory")
/*
如果汇编指令修改了内存,但是GCC 本身却察觉不到,因为在输出部分没有描述,
此时就需要在修改描述部分增加"memory",告诉GCC 内存已经被修改,GCC 得知这个信息后,就会在这段指令之前,
插入必要的指令将前面因为优化Cache 到寄存器中的变量值先写回内存,如果以后又要使用这些变量再重新读取。
*/
#about LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
include/linux/lockdep.h
1 #define LOCK_CONTENDED(_lock, try, lock) \
2 lock(_lock)
#include/linux/spinlock.h
1 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
2 {
3 __acquire(lock);
4 arch_spin_lock(&lock->raw_lock);
5 }
#include/linux/compiler.h
1 # define __acquire(x) __context__(x,1)
//it's designed to test that the entry and exit contexts match,
//and that no path through a function is ever entered with conflicting contexts.
#about arch_spin_lock in arch/arm/include/asm
1 static inline void arch_spin_lock(arch_spinlock_t *lock)
2 {
3 unsigned long tmp;
4
5 __asm__ __volatile__( //循环测试
6 "1: ldrex %0, [%1]\n"
7 " teq %0, #0\n"
8 WFE("ne")
9 " strexeq %0, %2, [%1]\n"
10 " teqeq %0, #0\n"
11 " bne 1b"
12 : "=&r" (tmp)
13 : "r" (&lock->lock), "r" (1)
14 : "cc");
15
16 smp_mb();
17 }
1 #ifndef CONFIG_SMP
2 #define smp_mb() barrier()
3 #define smp_rmb() barrier()
4 #define smp_wmb() barrier()
5 #else
6 #define smp_mb() dmb()
7 #define smp_rmb() dmb()
8 #define smp_wmb() dmb()
9 #endif
2.顺便看下#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
最终会有如下操作
1 static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
2 {
3 local_irq_disable(); //disable irq
4 preempt_disable();
5 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
6 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
7 }
Thanks