菜菜CPP日记
分支预测建议:
http://www.cppblog.com/mysileng/archive/2014/09/29/208454.html
#ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #endif #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) #endif
内存对齐:
这个表达式的意思就是 d/a 向上取整(a是2的倍数)。也就是说 如果表达式的结果是n那么
(n - 1) *a<d <= n * a,
如果按a对齐那么至少要申请n这么大才能把d放里面。
#define align_ptr(p, a) (uint8_t*)(((uintptr_t)(p) + ((uintptr_t) a - 1)) & ~((uintptr_t) a - 1)) #define align(d, a) (((d) + (a - 1)) & ~(a - 1))
属性暗示:
http://www.cnblogs.com/sunyubo/archive/2010/12/20/2282084.html
#define ignore(exp) {int ignore __attribute__ ((unused)) = (exp);} // 内存对齐 struct my_unpacked_struct { char c; int i; }; struct my_packed_struct { char c; int i; struct my_unpacked_struct s; }__attribute__ ((__packed__));
汇编与锁:
http://blog.csdn.net/lu_ming/article/details/4984115
http://blog.csdn.net/maotianwang/article/details/9154159
/ 32位系统 static __inline__ void atomic32_add(volatile int32_t *v, int i) { __asm__ __volatile__("lock;" "addl %1,%0" : "=m" ((*v)) : "r" (i), "m" ((*v))); } static __inline__ int32_t atomic32_add_return(volatile int32_t *value, int32_t diff) { int32_t old = diff; __asm__ volatile ( "lock;" "xaddl %0, %1" :"+r" (diff), "+m" (*value) : : "memory"); return diff + old; } static __inline__ void atomic32_inc(volatile int32_t *value *v) { __asm__ __volatile__("lock;" "incl %0" : "=m" (*v) :"m" (*v)); } static __inline__ void atomic32_dec(volatile int32_t *value*v) { __asm__ __volatile__("lock;" "decl %0" : "=m" (*v) :"m" (*v)); } static __inline__ int32_t atomic_cmp_set(volatile int32_t *lock, int32_t old, int32_t set) { uint8_t res; __asm__ volatile ( "lock;" "cmpxchgl %3, %1; sete %0" : "=a" (res) : "m" (*lock), "a" (old), "r" (set) : "cc", "memory"); return res; } // #define trylock(lock) (*(lock) == 0 && atomic_cmp_set(lock, 0, 1)) #define unlock(lock) {__asm__ ("" ::: "memory"); *(lock) = 0;} // 自旋lock static __inline__ void spin_lock(volatile int32_t *lock) { int i,n; for(;;) { if(*lock == 0 && atomic_cmp_set(lock, 0, 1)) { return; } for(n = 1; n < 1024; n <<= 1) { for(i = 0; i < n; i++) { __asm__ (".byte 0xf3, 0x90"); } if(*lock == 0 && atomic_cmp_set(lock, 0, 1)) { return; } } } //?? sched_yield(); } #define spin_unlock unlock