Scope-based resource management for the kernel(基于域的内核资源管理)

参考

常用的API

FREE

kfree

DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))

kvfree

DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))

put_task

DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))

free_cpumask_var

DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T));

bitmap

DEFINE_FREE(bitmap, unsigned long *, if (_T) bitmap_free(_T))

fput

DEFINE_FREE(fput, struct file *, if (!IS_ERR_OR_NULL(_T)) fput(_T))

put_device

DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))

device_del

DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))

firmware

DEFINE_FREE(firmware, struct firmware *, release_firmware(_T))

free_percpu

DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))

CLASS

find_get_task

DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
	     find_get_task(pid), pid_t pid)

fd/fd_raw

DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd)

get_unused_fd

DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
	     get_unused_fd_flags(flags), unsigned flags)

Guard/Lock

raw_spinlock

DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
		    raw_spin_lock(_T->lock),
		    raw_spin_unlock(_T->lock))

raw_spinlock_try

DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))

raw_spinlock_nested

DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
		    raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
		    raw_spin_unlock(_T->lock))

raw_spinlock_irq

DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
		    raw_spin_lock_irq(_T->lock),
		    raw_spin_unlock_irq(_T->lock))

raw_spinlock_irq_try

DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))

raw_spinlock_irqsave

DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
		    raw_spin_lock_irqsave(_T->lock, _T->flags),
		    raw_spin_unlock_irqrestore(_T->lock, _T->flags),
		    unsigned long flags)

raw_spinlock_irqsave_try

DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
			 raw_spin_trylock_irqsave(_T->lock, _T->flags))

spinlock

DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
		    spin_lock(_T->lock),
		    spin_unlock(_T->lock))

spinlock_try

DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))

spinlock_irq

DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
		    spin_lock_irq(_T->lock),
		    spin_unlock_irq(_T->lock))

spinlock_irq_try

DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
			 spin_trylock_irq(_T->lock))

spinlock_irqsave

DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
		    spin_lock_irqsave(_T->lock, _T->flags),
		    spin_unlock_irqrestore(_T->lock, _T->flags),
		    unsigned long flags)

spinlock_irqsave_try

DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
			 spin_trylock_irqsave(_T->lock, _T->flags))

read_lock

DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
		    read_lock(_T->lock),
		    read_unlock(_T->lock))

read_lock_irq

DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
		    read_lock_irq(_T->lock),
		    read_unlock_irq(_T->lock))

read_lock_irqsave

DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
		    read_lock_irqsave(_T->lock, _T->flags),
		    read_unlock_irqrestore(_T->lock, _T->flags),
		    unsigned long flags)

write_lock

DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
		    write_lock(_T->lock),
		    write_unlock(_T->lock))

write_lock_irq

DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
		    write_lock_irq(_T->lock),
		    write_unlock_irq(_T->lock))

write_lock_irqsave

DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
		    write_lock_irqsave(_T->lock, _T->flags),
		    write_unlock_irqrestore(_T->lock, _T->flags),
		    unsigned long flags)

mutex

DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)

irq、irqsave

DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
DEFINE_LOCK_GUARD_0(irqsave,
		    local_irq_save(_T->flags),
		    local_irq_restore(_T->flags),
		    unsigned long flags)

rwsem

DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)

DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))

local_lock

DEFINE_GUARD(local_lock, local_lock_t __percpu*,
	     local_lock(_T),
	     local_unlock(_T))
DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
	     local_lock_irq(_T),
	     local_unlock_irq(_T))
DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
		    local_lock_irqsave(_T->lock, _T->flags),
		    local_unlock_irqrestore(_T->lock, _T->flags),
		    unsigned long flags)
DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
	     local_lock_nested_bh(_T),
	     local_unlock_nested_bh(_T))

rtnl

DEFINE_LOCK_GUARD_0(rtnl, rtnl_lock(), rtnl_unlock())

core_lock

DEFINE_LOCK_GUARD_1(core_lock, int,
		    sched_core_lock(*_T->lock, &_T->flags),
		    sched_core_unlock(*_T->lock, &_T->flags),
		    unsigned long flags)

rq_lock

DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
		    rq_lock(_T->lock, &_T->rf),
		    rq_unlock(_T->lock, &_T->rf),
		    struct rq_flags rf)

rq_lock_irq

DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq,
		    rq_lock_irq(_T->lock, &_T->rf),
		    rq_unlock_irq(_T->lock, &_T->rf),
		    struct rq_flags rf)

rq_lock_irqsave

DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
		    rq_lock_irqsave(_T->lock, &_T->rf),
		    rq_unlock_irqrestore(_T->lock, &_T->rf),
		    struct rq_flags rf)

double_raw_spinlock

#define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...)				\
__DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__)			\
static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2)	\
{ class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t;			\
  _lock; return _t; }

static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
{
	if (l1 > l2)
		swap(l1, l2);

	raw_spin_lock(l1);
	raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
}

static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2)
{
	raw_spin_unlock(l1);
	raw_spin_unlock(l2);
}

DEFINE_LOCK_GUARD_2(double_raw_spinlock, raw_spinlock_t,
		    double_raw_lock(_T->lock, _T->lock2),
		    double_raw_unlock(_T->lock, _T->lock2))

double_rq_lock

DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq,
		    double_rq_lock(_T->lock, _T->lock2),
		    double_rq_unlock(_T->lock, _T->lock2))

task_rq_lock

DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
		    _T->rq = task_rq_lock(_T->lock, &_T->rf),
		    task_rq_unlock(_T->rq, _T->lock, &_T->rf),
		    struct rq *rq; struct rq_flags rf)

cpus_read_lock

DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())

device

static inline int device_trylock(struct device *dev)
{
	return mutex_trylock(&dev->mutex);
}

static inline void device_unlock(struct device *dev)
{
	mutex_unlock(&dev->mutex);
}

DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))

task_lock

DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))

pci_dev

DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))

disable_irq

DEFINE_LOCK_GUARD_1(disable_irq, int,
		    disable_irq(*_T->lock), enable_irq(*_T->lock))

HARDIRQ

#define HARDIRQ_ENTER()				\
	local_irq_disable();			\
	__irq_enter();				\
	lockdep_hardirq_threaded();		\
	WARN_ON(!in_irq());

#define HARDIRQ_EXIT()				\
	__irq_exit();				\
	local_irq_enable();

DEFINE_LOCK_GUARD_0(HARDIRQ, HARDIRQ_ENTER(), HARDIRQ_EXIT())

NOTTHREADED_HARDIRQ

DEFINE_LOCK_GUARD_0(NOTTHREADED_HARDIRQ,
	do {
		local_irq_disable();
		__irq_enter();
		WARN_ON(!in_irq());
	} while(0), HARDIRQ_EXIT())

SOFTIRQ

DEFINE_LOCK_GUARD_0(SOFTIRQ, SOFTIRQ_ENTER(), SOFTIRQ_EXIT())

RCU

/* Define RCU guards, should go away when RCU has its own guard definitions */
DEFINE_LOCK_GUARD_0(RCU, rcu_read_lock(), rcu_read_unlock())
DEFINE_LOCK_GUARD_0(RCU_BH, rcu_read_lock_bh(), rcu_read_unlock_bh())
DEFINE_LOCK_GUARD_0(RCU_SCHED, rcu_read_lock_sched(), rcu_read_unlock_sched())

srcu

DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
		    _T->idx = srcu_read_lock(_T->lock),
		    srcu_read_unlock(_T->lock, _T->idx),
		    int idx)

preempt

DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())

示例

guard

guard(mutex)(&data->mutex);
guard(mutex)(&reset_gpio_lookup_mutex);
guard(rwsem_write)(&pwrseq->rw_lock);
guard(rwsem_read)(&pwrseq->rw_lock);
guard(raw_spinlock)(&dl_b->lock);
guard(raw_spinlock_irq)(&p->pi_lock);
guard(raw_spinlock_irqsave)(&priv->lock);
guard(preempt)();
guard(rcu)();
guard(srcu)(&lr->gdev->srcu);
guard(task_rq_lock)(p); // struct task_struct *p
guard(rq_lock_irqsave)(rq);  // struct rq *rq
guard(irq)();
guard(double_rq_lock)(dst, src); // struct rq *dst, *src
guard(core_lock)(&cpu); // unsigned int cpu
guard(cpus_read_lock)();

CLASS

struct gpio_chip_guard {
	struct gpio_device *gdev;
	struct gpio_chip *gc;
	int idx;
};

DEFINE_CLASS(gpio_chip_guard,
	     struct gpio_chip_guard,
	     srcu_read_unlock(&_T.gdev->srcu, _T.idx),
	     ({
		struct gpio_chip_guard _guard;

		_guard.gdev = desc->gdev;
		_guard.idx = srcu_read_lock(&_guard.gdev->srcu);
		_guard.gc = srcu_dereference(_guard.gdev->chip,
					     &_guard.gdev->srcu);

		_guard;
	     }),
	     struct gpio_desc *desc)

CLASS(gpio_chip_guard, guard)(desc);

scoped_guard

scoped_guard (mutex, &st->lock) {}
scoped_guard (rwsem_write, &pwrseq_sem) {}
scoped_guard (rwsem_read, &namespace_sem) {}
scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {}
scoped_guard (rcu) {}
scoped_guard (srcu, &desc->gdev->desc_srcu) {}
scoped_guard (srcu, &gpio_devices_srcu) {}
scoped_guard (irqsave) {}
scoped_guard (rq_lock_irqsave, rq) {}
scoped_guard (rq_lock, rq) {}
scoped_guard (spinlock, &lr->wait.lock) {}
scoped_guard (write_lock_irq, &tasklist_lock) {}

scoped_cond_guard

scoped_cond_guard(mutex_intr, return -EINTR, &pcu->cmd_mutex) {}
posted @ 2024-09-20 19:45  摩斯电码  阅读(39)  评论(0编辑  收藏  举报