深入理解linux网络技术内幕读书笔记(九)--中断与网络驱动程序
Table of Contents
接收到帧时通知驱动程序
轮询
例如,内核可以持续读取设备上的一个内存寄存器,或者当一个定时器到期时就回头检查哪个寄存器。
中断
此时,当特定事件发生时,设备驱动程序会代表内核指示设备产生硬件中断。内核将中断其他活动,然后调用一个驱动程序
所注册的处理函数,以满足设备的需要。当事件是接收到一个帧时,处理函数就会把该帧排入队列某处,然后通知内核。
中断处理程序
函数/宏 | 描述 |
---|---|
in_interrupt | 处于软硬件中断中,且抢占功能是关闭的 |
in_softirq | 处于软件中断中 |
in_irq | 处于硬件中断中 |
softirq_pending | 软件中断未决 |
local_softirq_pending | 本地软件中断未决 |
__raise_softirq_irqoff | 设置与输入的软IRQ类型相关联的标识,将该软IRQ标记为未决 |
raise_softirq_irqoff | 先关闭硬件中断,再调用__raise_softirq_irqoff,再恢复其原有状态 |
raise_softirq | |
__local_bh_enable | |
local_bh_enable | |
local_bh_disable | |
local_irq_disable | |
local_irq_enable | |
local_irq_save | |
local_irq_restore | |
spin_lock_bh | |
spin_unlock_bh |
抢占功能
- preempt_disable()
为当前任务关闭抢占功能。可以重复调用,递增一个引用计数器。
- preempt_enable()
- preempt_enable_no_resched()
开启抢占功能。preempt_enable_no_reched()只是递减一个引用计数器,使得其值为0时,可以让抢占再度开启。
下半部函数
内核2.4版本以后的下半部函数: 引入软IRQ
对并发的唯一限制就是何时,在一个CPU上每个软IRQ都只能有一个实例运行。
新式的软IRQ模型只有10种模型(include/linux/interrupt.h):
1: /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 2: frequency threaded job scheduling. For almost all the purposes 3: tasklets are more than enough. F.e. all serial device BHs et 4: al. should be converted to tasklets, not to softirqs. 5: */ 6: 7: enum 8: { 9: HI_SOFTIRQ=0, 10: TIMER_SOFTIRQ, 11: NET_TX_SOFTIRQ, 12: NET_RX_SOFTIRQ, 13: BLOCK_SOFTIRQ, 14: BLOCK_IOPOLL_SOFTIRQ, 15: TASKLET_SOFTIRQ, 16: SCHED_SOFTIRQ, 17: HRTIMER_SOFTIRQ, 18: RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 19: 20: NR_SOFTIRQS 21: };
网络代码如何使用软IRQ
网络子系统在net/core/dev.c中注册接收发送软中断:
open_softirq(NET_TX_SOFTIRQ, net_tx_action);
open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1: /* 2: * Initialize the DEV module. At boot time this walks the device list and 3: * unhooks any devices that fail to initialise (normally hardware not 4: * present) and leaves us with a valid list of present and active devices. 5: * 6: */ 7: 8: /* 9: * This is called single threaded during boot, so no need 10: * to take the rtnl semaphore. 11: */ 12: static int __init net_dev_init(void) 13: { 14: int i, rc = -ENOMEM; 15: 16: BUG_ON(!dev_boot_phase); 17: 18: if (dev_proc_init()) 19: goto out; 20: 21: if (netdev_kobject_init()) 22: goto out; 23: 24: INIT_LIST_HEAD(&ptype_all); 25: for (i = 0; i < PTYPE_HASH_SIZE; i++) 26: INIT_LIST_HEAD(&ptype_base[i]); 27: 28: if (register_pernet_subsys(&netdev_net_ops)) 29: goto out; 30: 31: /* 32: * Initialise the packet receive queues. 33: */ 34: 35: for_each_possible_cpu(i) { 36: struct softnet_data *sd = &per_cpu(softnet_data, i); 37: 38: memset(sd, 0, sizeof(*sd)); 39: skb_queue_head_init(&sd->input_pkt_queue); 40: skb_queue_head_init(&sd->process_queue); 41: sd->completion_queue = NULL; 42: INIT_LIST_HEAD(&sd->poll_list); 43: sd->output_queue = NULL; 44: sd->output_queue_tailp = &sd->output_queue; 45: #ifdef CONFIG_RPS 46: sd->csd.func = rps_trigger_softirq; 47: sd->csd.info = sd; 48: sd->csd.flags = 0; 49: sd->cpu = i; 50: #endif 51: 52: sd->backlog.poll = process_backlog; 53: sd->backlog.weight = weight_p; 54: sd->backlog.gro_list = NULL; 55: sd->backlog.gro_count = 0; 56: } 57: 58: dev_boot_phase = 0; 59: 60: /* The loopback device is special if any other network devices 61: * is present in a network namespace the loopback device must 62: * be present. Since we now dynamically allocate and free the 63: * loopback device ensure this invariant is maintained by 64: * keeping the loopback device as the first device on the 65: * list of network devices. Ensuring the loopback devices 66: * is the first device that appears and the last network device 67: * that disappears. 68: */ 69: if (register_pernet_device(&loopback_net_ops)) 70: goto out; 71: 72: if (register_pernet_device(&default_device_ops)) 73: goto out; 74: 75: open_softirq(NET_TX_SOFTIRQ, net_tx_action); 76: open_softirq(NET_RX_SOFTIRQ, net_rx_action); 77: 78: hotcpu_notifier(dev_cpu_callback, 0); 79: dst_init(); 80: dev_mcast_init(); 81: rc = 0; 82: out: 83: return rc; 84: } 85: 86: subsys_initcall(net_dev_init);
softnet_data结构
每个CPU都有其队列,用来接收进来的帧。数据结构为:
1: /* 2: * Incoming packets are placed on per-cpu queues 3: */ 4: struct softnet_data { 5: struct Qdisc *output_queue; 6: struct Qdisc **output_queue_tailp; 7: struct list_head poll_list; 8: struct sk_buff *completion_queue; 9: struct sk_buff_head process_queue; 10: 11: /* stats */ 12: unsigned int processed; 13: unsigned int time_squeeze; 14: unsigned int cpu_collision; 15: unsigned int received_rps; 16: 17: #ifdef CONFIG_RPS 18: struct softnet_data *rps_ipi_list; 19: 20: /* Elements below can be accessed between CPUs for RPS */ 21: struct call_single_data csd ____cacheline_aligned_in_smp; 22: struct softnet_data *rps_ipi_next; 23: unsigned int cpu; 24: unsigned int input_queue_head; 25: unsigned int input_queue_tail; 26: #endif 27: unsigned dropped; 28: struct sk_buff_head input_pkt_queue; 29: struct napi_struct backlog; 30: };