标签:网络
当数据包到达网络设备时,通常会触发硬件中断。系统在不支持软中断时,数据包的输入过程只能完全在硬件中断中处理。在这样的情况下,虽然可以完成数据包的输入,但硬件中断处理所占用的CPU资源过多,导致系统对其他硬件相应不够及时。
在有些情况下(某些嵌入式设备)下,数据包到达网络设备时并不会触发硬件中断,在这种情况下,只能通过定时器轮询网络设备的状态,当发现有数据包到达时,才从网络设备中读取数据包并输入到协议栈。这种情况下,数据包的输入及时完全需要依赖定时器触发的频率,如果频率过高,可能会过多地消耗CPU资源,而频率过低时,数据包的吞吐量便过分低下。
还有一种方法能极大提高网络处理速度,条件是需要支持硬件中断和软中断。当数据包到达网络设备时,会触发硬中断,将设备添加到轮询队列中,然后关闭硬件中断并激活软中断。在软中断中,读取轮询队列内网络设备中的数据包。
应用程序对套接口ioctl操作,如果是有关接口层的,则由dev_ioctl()和inet_ioctl()进行处理。
在对SIOCxIFxxx命令进行获取或设置操作时,都是通过ifreq结构来传递相应的值,或是将ifreq结构作为传递接口的一个部分
/*
* Interface request structure used for socket
* ioctl's. All interface ioctl's must have parameter
* definitions which begin with ifr_name. The
* remainder may be interface specific.
*/
struct ifreq
{
#define IFHWADDRLEN 6
union
{
char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
} ifr_ifrn;
union {
struct sockaddr ifru_addr;
struct sockaddr ifru_dstaddr;
struct sockaddr ifru_broadaddr;
struct sockaddr ifru_netmask;
struct sockaddr ifru_hwaddr;
short ifru_flags;
int ifru_ivalue;
int ifru_mtu;
struct ifmap ifru_map;
char ifru_slave[IFNAMSIZ]; /* Just fits the size */
char ifru_newname[IFNAMSIZ];
void __user * ifru_data;
struct if_settings ifru_settings;
} ifr_ifru;
};
static int __init net_dev_init(void)
{
int i, rc = -ENOMEM;
BUG_ON(!dev_boot_phase);
if (dev_proc_init())
goto out;
if (netdev_kobject_init())
goto out;
INIT_LIST_HEAD(&ptype_all);
for (i = 0; i < PTYPE_HASH_SIZE; i++)
INIT_LIST_HEAD(&ptype_base[i]);
if (register_pernet_subsys(&netdev_net_ops))
goto out;
/*
* Initialise the packet receive queues.
*/
for_each_possible_cpu(i) {
struct softnet_data *queue;
queue = &per_cpu(softnet_data, i);
skb_queue_head_init(&queue->input_pkt_queue);
queue->completion_queue = NULL;
INIT_LIST_HEAD(&queue->poll_list);
queue->backlog.poll = process_backlog;
queue->backlog.weight = weight_p;
queue->backlog.gro_list = NULL;
queue->backlog.gro_count = 0;
}
dev_boot_phase = 0;
/* The loopback device is special if any other network devices
* is present in a network namespace the loopback device must
* be present. Since we now dynamically allocate and free the
* loopback device ensure this invariant is maintained by
* keeping the loopback device as the first device on the
* list of network devices. Ensuring the loopback devices
* is the first device that appears and the last network device
* that disappears.
*/
if (register_pernet_device(&loopback_net_ops))
goto out;
if (register_pernet_device(&default_device_ops))
goto out;
open_softirq(NET_TX_SOFTIRQ, net_tx_action);
open_softirq(NET_RX_SOFTIRQ, net_rx_action);
hotcpu_notifier(dev_cpu_callback, 0);
dst_init();
dev_mcast_init();
rc = 0;
out:
return rc;
}系统启动时,net_dev_init()的初始化优先级是subsys_initcall,用来初始化相关接口层,如注册记录相关统计信息的proc文件,初始化每个CPU的softnet_data,注册网络报文输入/输出软中断以及处理函数,注册响应CPU状态变化的回调函数。
softnet_data结构
softnet_data结构描述了与网络软中断处理相关的报文输入和输出队列,每个CPU有一个单独的softnet_data实例,因此在操作该结构中的成员时不必加锁。该结构在接口层与网络层之间起着承上启下的作用。
/*
* Incoming packets are placed on per-cpu queues so that
* no locking is needed.
*/
struct softnet_data
{
struct Qdisc *output_queue;
struct sk_buff_head input_pkt_queue;
struct list_head poll_list;
struct sk_buff *completion_queue;
struct napi_struct backlog;
};output_queue
数据包输出软中断中输出数据包的网络设备队列。处于报文输出状态的网络设备添加到该队列上,在数据包输出软中断中,会遍历该队列,从网络设备的排队规则中获取数据包并输出。
input_pkt_queue
非NAPI的接口层缓存队列。对于非NAPI的驱动,通常在硬中断中或通过轮询读取报文后,调用netif_rx()将接收到的报文传递到上层,即先将报文缓存到input_pkt_queue队列中,然后产生一个数据包输入软中断,由软中断例程将报文传递到上层。
poll_list
网络设备轮询队列,处于报文接收状态的网络设备链接到该队列上,在数据包输入软中断中,会遍历该队列,通过轮询方式接收报文
NAPI是中断机制与轮询机制的混合体,能有效地提高网络处理速度。在网络负荷较重时,NAPI技术能显著减少由于接收到数据包而产生的硬件中断数量,对高速率、短长度的数据包的处理非常有效。
NAPI实现方法:当一批数据包中第一个数据包到达网络设备时,会以硬中断的方式通知系统;在硬中断例程中,系统将该设备添加到CPU的设备轮询队列中,并关闭中断,同时激活数据包软中断;由软中断例程遍历轮询队列的中的网络设备,从中读取数据包。这样,在内核从网络设备中接收报文的过程中,如有新的报文到来,NAPI也无需执行中断例程,而只需要维护网络设备轮询队列,就能读取到新的报文。
以e100_intr()为e100网络设备驱动程序的中断处理例程。当有网络数据包到达网络设备时,网络设备会触发中断,然后由e100_intr()进行处理
static irqreturn_t e100_intr(int irq, void *dev_id)
{
struct net_device *netdev = dev_id;
struct nic *nic = netdev_priv(netdev);
u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
stat_ack == stat_ack_not_present) /* Hardware is ejected */
return IRQ_NONE;
/* Ack interrupt(s) */
iowrite8(stat_ack, &nic->csr->scb.stat_ack);
/* We hit Receive No Resource (RNR); restart RU after cleaning */
if (stat_ack & stat_ack_rnr)
nic->ru_running = RU_SUSPENDED;
if (likely(napi_schedule_prep(&nic->napi))) {
e100_disable_irq(nic);
__napi_schedule(&nic->napi);
}
return IRQ_HANDLED;
}
net_rx_action()为网络输入软中断的处理例程,当网络设备有数据包输入时,非NAPI和NAPI的网络设备驱动程序,一般都会激活网络输入软中断进行处理,以提高系统的性能。
e100_poll()为e100网络设备驱动程序的轮询处理函数,在网络输入软中断处理中,通过函数指针调用该函数
static int e100_poll(struct napi_struct *napi, int budget)
{
struct nic *nic = container_of(napi, struct nic, napi);
unsigned int work_done = 0;
e100_rx_clean(nic, &work_done, budget);
e100_tx_clean(nic);
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
e100_enable_irq(nic);
}
return work_done;
}
e100_rx_clean()从网络设备中读取接收到报文,并由netif_receive_skb()输入到上层协议中,work_done为已读取的报文数。如果预算没有完全消耗掉,则说明该设备报文已经完全读完,需要从网络设备轮询队列中删除该网络设备,退出轮询模式,最后开中断。
netif_rx()将从网络设备中接收的报文加入到接口层缓存队列中,以便让上层协议来处理。一般情况下,插入队列的过程都是会成功,通过队列可以有效防止由于上层接收拥塞而导致丢弃已经接收报文的现象。通常用NAPI方式实现的网络设备驱动,不会调用该接口来接收报文。
DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
/**
* netif_rx - post buffer to the network code
* @skb: buffer to post
*
* This function receives a packet from a device driver and queues it for
* the upper (protocol) levels to process. It always succeeds. The buffer
* may be dropped during processing for congestion control or by the
* protocol layers.
*
* return values:
* NET_RX_SUCCESS (no congestion)
* NET_RX_DROP (packet was dropped)
*
*/
int netif_rx(struct sk_buff *skb)
{
struct softnet_data *queue;
unsigned long flags;
/* if netpoll wants it, pretend we never saw it */
if (netpoll_rx(skb))
return NET_RX_DROP;
if (!skb->tstamp.tv64)
net_timestamp(skb);
/*
* The code is rearranged so that the path is the most
* short when CPU is congested, but is still operating.
*/
local_irq_save(flags);
queue = &__get_cpu_var(softnet_data);
__get_cpu_var(netdev_rx_stat).total++;
if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
if (queue->input_pkt_queue.qlen) {
enqueue:
__skb_queue_tail(&queue->input_pkt_queue, skb);
local_irq_restore(flags);
return NET_RX_SUCCESS;
}
napi_schedule(&queue->backlog);
goto enqueue;
}
__get_cpu_var(netdev_rx_stat).dropped++;
local_irq_restore(flags);
kfree_skb(skb);
return NET_RX_DROP;
}
process_backlog()为非NAPI方式下,虚拟网络设备的轮询函数。当虚拟网络设备backlog添加到网络设备轮询队列后,在数据包输入软中断中会调用process_backlog()进行数据包输入。
static int process_backlog(struct napi_struct *napi, int quota)
{
int work = 0;
struct softnet_data *queue = &__get_cpu_var(softnet_data);
unsigned long start_time = jiffies;
napi->weight = weight_p;
do {
struct sk_buff *skb;
local_irq_disable();
skb = __skb_dequeue(&queue->input_pkt_queue);
if (!skb) {
__napi_complete(napi);
local_irq_enable();
break;
}
local_irq_enable();
netif_receive_skb(skb);
} while (++work < quota && jiffies == start_time);
return work;
}
packet_type结构为网络层输入接口,系统支持多种协议族,因此每个协议族都会实现一个报文例程。此结构的功能是在链路层和网络层之间起到了桥梁的作用。在以太网上,当以太网帧到达主机后,会根据协议族的报文类型调用相应网络层接收处理函数。
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
IPV4的packet_type结构实例为ip_packet_type定义如下,ip_rcv()是IP数据报的接收处理函数。
static struct packet_type ip_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_IP),
.func = ip_rcv,
.gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive,
.gro_complete = inet_gro_complete,
};netif_receive_skb()实现了将报文输入到上层协议,首先遍历ptype_all链表,输入一份报文到ptype_all链表输入接口,然后通过桥转发报文,如果转发成功,则无需再输入到本地,否则遍历ptype_base散列表,根据接收报文的传输层协议类型,调用对应的报文接收例程。
对已通过socket(AF_PACKET,SOCK_RAW,htons(ETH_P_ALL))创建的原始套接口,不但可以接收外部输入的数据包,而且对于由本地输出的数据包,如果满足条件,也同样能接收。
dev_queue_xmit_nit()就是用来接收由本地输出的数据包,在链路层的输出过程中,会调用此函数,将满足条件的数据包输入到RAW套接口。
每个CPU都有各自的softnet_data,通过情况下CPU都能处理softnet_data中的输出队列和输入队列等。当CPU状态发生变化时,有一个状态需要特殊处理,那就是CPU_DEAD,此时CPU已无法工作,因此需要将该CPU的softnet_data输入输出队列中的报文转交给其他CPU处理。为了能相应CPU状态的变化,在接口层初始化函数通过 hotcpu_notifier()注册了响应CPU状态变化的回调函数dev_cpu_callback()
static int dev_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *ocpu)
{
struct sk_buff **list_skb;
struct Qdisc **list_net;
struct sk_buff *skb;
unsigned int cpu, oldcpu = (unsigned long)ocpu;
struct softnet_data *sd, *oldsd;
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
return NOTIFY_OK;
local_irq_disable();
cpu = smp_processor_id();
sd = &per_cpu(softnet_data, cpu);
oldsd = &per_cpu(softnet_data, oldcpu);
/* Find end of our completion_queue. */
list_skb = &sd->completion_queue;
while (*list_skb)
list_skb = &(*list_skb)->next;
/* Append completion queue from offline CPU. */
*list_skb = oldsd->completion_queue;
oldsd->completion_queue = NULL;
/* Find end of our output_queue. */
list_net = &sd->output_queue;
while (*list_net)
list_net = &(*list_net)->next_sched;
/* Append output queue from offline CPU. */
*list_net = oldsd->output_queue;
oldsd->output_queue = NULL;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
netif_rx(skb);
return NOTIFY_OK;
}
标签:网络
原文地址:http://blog.csdn.net/wangpeihuixyz/article/details/38172509