[转]qemu和vhost user app的消息传递,guest memory 和vhos-user app的共享,guest和vhost-user app的通知机制

转自   https://www.cnblogs.com/ck1020/p/8341914.html

 

vhost-user是vhost-kernel又回到用户空间的实现,其基本思想和vhost-kernel很类似,不过之前在内核的部分现在由另外一个用户进程代替,可能是snapp或者dpdk等。vhost-user下,UNIX本地socket代替了之前kernel模式下的设备文件进行进程间的通信(qemu和vhost-user app),而通过mmap的方式把ram映射到vhost-user app的进程空间实现内存的共享。其他的部分和vhost-kernel原理基本一致。

本文主要分析涉及到的三个重要机制:qemu和vhost-user app的消息传递,guest memory和vhost-user app的共享,guest和vhost-user app的通知机制。

 

一、qemu和vhost-user app的消息传递

qemu和vhost-user app的消息传递是通过UNIX本地socket实现的,对应于kernel下每个ioctl的实现(如果是vhost-kernel,用ioctl),这里vhost-user app必须对每个ioctl 提供自己的处理,DPDK下在vhost-user.c文件下的vhost_user_msg_handler函数,这里有一个核心的数据结构:VhostUserMsg,该结构是消息传递的载体,整个结构并不复杂.

 1 typedef struct VhostUserMsg {
 2     union {
 3         uint32_t master; /* a VhostUserRequest value */   //qemu
 4         uint32_t slave;  /* a VhostUserSlaveRequest value*/ 例如 //dpdk
 5     } request;
 6 
 7 #define VHOST_USER_VERSION_MASK     0x3
 8 #define VHOST_USER_REPLY_MASK       (0x1 << 2)
 9 #define VHOST_USER_NEED_REPLY        (0x1 << 3)
10     uint32_t flags;
11     uint32_t size; /* the following payload size */
12     union {
13 #define VHOST_USER_VRING_IDX_MASK   0xff
14 #define VHOST_USER_VRING_NOFD_MASK  (0x1<<8)
15         uint64_t u64;
16         struct vhost_vring_state state;
17         struct vhost_vring_addr addr;
18         VhostUserMemory memory;
19         VhostUserLog    log;
20         struct vhost_iotlb_msg iotlb;
21         VhostUserCryptoSessionParam crypto_session;
22         VhostUserVringArea area;
23         VhostUserInflight inflight;
24     } payload;
25     int fds[VHOST_MEMORY_MAX_NREGIONS];
26     int fd_num;
27 } __attribute((packed)) VhostUserMsg;

 既然是传递消息,其中必须包含消息的种类、消息的内容、消息内容的大小。而这些也是该结构的主要部分,首个union便标志该消息的种类。接下来的Flags表明该消息本身的一些性质,如是否需要回复等。size就是payload的大小,接下来的union是具体的消息内容,最后的fds是关联每一个memory RAM的fd数组。消息种类如下:

 1 typedef enum VhostUserRequest {
 2     VHOST_USER_NONE = 0,
 3     VHOST_USER_GET_FEATURES = 1,
 4     VHOST_USER_SET_FEATURES = 2,
 5     VHOST_USER_SET_OWNER = 3,
 6     VHOST_USER_RESET_OWNER = 4,
 7     VHOST_USER_SET_MEM_TABLE = 5,
 8     VHOST_USER_SET_LOG_BASE = 6,
 9     VHOST_USER_SET_LOG_FD = 7,
10     VHOST_USER_SET_VRING_NUM = 8,
11     VHOST_USER_SET_VRING_ADDR = 9,
12     VHOST_USER_SET_VRING_BASE = 10,
13     VHOST_USER_GET_VRING_BASE = 11,
14     VHOST_USER_SET_VRING_KICK = 12,
15     VHOST_USER_SET_VRING_CALL = 13,
16     VHOST_USER_SET_VRING_ERR = 14,
17     VHOST_USER_GET_PROTOCOL_FEATURES = 15,
18     VHOST_USER_SET_PROTOCOL_FEATURES = 16,
19     VHOST_USER_GET_QUEUE_NUM = 17,
20     VHOST_USER_SET_VRING_ENABLE = 18,
21     VHOST_USER_SEND_RARP = 19,
22     VHOST_USER_NET_SET_MTU = 20,
23     VHOST_USER_SET_SLAVE_REQ_FD = 21,
24     VHOST_USER_IOTLB_MSG = 22,
25     VHOST_USER_CRYPTO_CREATE_SESS = 26,
26     VHOST_USER_CRYPTO_CLOSE_SESS = 27,
27     VHOST_USER_POSTCOPY_ADVISE = 28,
28     VHOST_USER_POSTCOPY_LISTEN = 29,
29     VHOST_USER_POSTCOPY_END = 30,
30     VHOST_USER_GET_INFLIGHT_FD = 31,
31     VHOST_USER_SET_INFLIGHT_FD = 32,
32     VHOST_USER_MAX = 33
33 } VhostUserRequest;

  到目前为止并不复杂,我们下面看下消息本身的初始化机制,socket-file的路径会作为参数传递进来,在main函数中examples/vhost/,调用us_vhost_parse_socket_path对参数中的socket-fiile参数进行解析,保存在静态数组socket_files中,而后在main函数中有一个for循环,针对每个socket-file,会调用rte_vhost_driver_register函数注册vhost 驱动,该函数的核心功能就是为每个socket-fie创建本地socket,通过create_unix_socket函数。vhost中的socket结构通过create_unix_socket描述。在注册驱动之后,会根据具体的特性设置features。在最后会通过rte_vhost_driver_start启动vhost driver,该函数倒是值得一看

下面函数vhost_register_unix_socket是spdk中的。

 1 int
 2 vhost_register_unix_socket(const char *path, const char *ctrl_name,
 3                uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features)
 4 {
 5     struct stat file_stat;
 6 #ifndef SPDK_CONFIG_VHOST_INTERNAL_LIB
 7     uint64_t features = 0;
 8 #endif
 9 
10     /* Register vhost driver to handle vhost messages. */
11     if (stat(path, &file_stat) != -1) {
12         if (!S_ISSOCK(file_stat.st_mode)) {
13             SPDK_ERRLOG("Cannot create a domain socket at path \"%s\": "
14                     "The file already exists and is not a socket.\n",
15                     path);
16             return -EIO;
17         } else if (unlink(path) != 0) {
18             SPDK_ERRLOG("Cannot create a domain socket at path \"%s\": "
19                     "The socket already exists and failed to unlink.\n",
20                     path);
21             return -EIO;
22         }
23     }
24 
25     if (rte_vhost_driver_register(path, 0) != 0) {//生成socket监听句柄
26         SPDK_ERRLOG("Could not register controller %s with vhost library\n", ctrl_name);
27         SPDK_ERRLOG("Check if domain socket %s already exists\n", path);
28         return -EIO;
29     }
30     if (rte_vhost_driver_set_features(path, virtio_features) ||
31         rte_vhost_driver_disable_features(path, disabled_features)) {
32         SPDK_ERRLOG("Couldn't set vhost features for controller %s\n", ctrl_name);
33 
34         rte_vhost_driver_unregister(path);
35         return -EIO;
36     }
37 //注册socket连接建立后的消息处理notify_op回调
38     if (rte_vhost_driver_callback_register(path, &g_spdk_vhost_ops) != 0) {//g_spdk_vhost_ops中new device会执行rc = vdev->backend->start_session(vsession);是什么时候执行?
39     //在 vhost_user_msg_handler函数后半部会执行 ops-> new_device
40         rte_vhost_driver_unregister(path);
41         SPDK_ERRLOG("Couldn't register callbacks for controller %s\n", ctrl_name);
42         return -EIO;
43     }
44 
45 #ifndef SPDK_CONFIG_VHOST_INTERNAL_LIB
46     rte_vhost_driver_get_protocol_features(path, &features);
47     features |= protocol_features;
48     rte_vhost_driver_set_protocol_features(path, features);
49 #endif
50 
51 //拉起一个监听线程,开始等待客户连接请求
52     if (rte_vhost_driver_start(path) != 0) {
53         SPDK_ERRLOG("Failed to start vhost driver for controller %s (%d): %s\n",
54                 ctrl_name, errno, spdk_strerror(errno));
55         rte_vhost_driver_unregister(path);
56         return -EIO;
57     }
58 
59     return 0;
60 }

 

 

 1 int
 2 rte_vhost_driver_start(const char *path)
 3 {
 4     struct vhost_user_socket *vsocket;
 5     static pthread_t fdset_tid;
 6 
 7     pthread_mutex_lock(&vhost_user.mutex);
 8     vsocket = find_vhost_user_socket(path);//根据路径通过find_vhost_user_socket函数找到对应的vhost_user_socket结构
 9     pthread_mutex_unlock(&vhost_user.mutex);
10 
11     if (!vsocket)
12         return -1;
13     /*创建一个线程监听fdset*/
14     if (fdset_tid == 0) {
15         int ret = pthread_create(&fdset_tid, NULL, fdset_event_dispatch,
16                      &vhost_user.fdset);
17         if (ret < 0)
18             RTE_LOG(ERR, VHOST_CONFIG,
19                 "failed to create fdset handling thread");
20     }
21 
22     if (vsocket->is_server)
23         return vhost_user_start_server(vsocket);
24     else
25         return vhost_user_start_client(vsocket);
26 }

 

 函数参数是对应的socket-file的路径,进入函数内部,首先便是根据路径通过find_vhost_user_socket函数找到对应的vhost_user_socket结构,所有的vhost_user_socket以一个数组的形式保存在vhost_user数据结构中。接下来如果该socket确实存在,就创建一个线程,处理vhost-user的fd, 这个作用我们后面再看,该线程绑定的函数为fdset_event_dispatch。这些工作完成后,就启动该socket了,起始qemu和vhost可以互做server和client,一般情况下vhost是作为server存在。所以这里就调用了vhost_user_start_server。这里就是我们常见的socket编程操作了,调用bind……然后listen……,没什么好说的。后面调用了fdset_add函数,这是就是vhost处理消息fd的一个单独的机制,

 1 static int
 2 vhost_user_start_server(struct vhost_user_socket *vsocket)
 3 {
 4     int fd = vsocket->socket_fd;
 5     const char *path = vsocket->path;
 6 
 7 ...
 8 ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
 9 ret = listen(fd, MAX_VIRTIO_BACKLOG);
10 ...
11  ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
12            NULL, vsocket);
13  //该函数为对应的fd注册了一个处理函数,当该fd有信号时,就调用该函数
14 ...}

 

 1 int
 2 fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, void *dat)
 3 {
 4     int i;
 5 
 6     if (pfdset == NULL || fd == -1)
 7         return -1;
 8 
 9     pthread_mutex_lock(&pfdset->fd_mutex);
10     i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
11     if (i == -1) {
12         fdset_shrink_nolock(pfdset);
13         i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
14         if (i == -1) {
15             pthread_mutex_unlock(&pfdset->fd_mutex);
16             return -2;
17         }
18     }
19 
20     fdset_add_fd(pfdset, i, fd, rcb, wcb, dat);
21     pthread_mutex_unlock(&pfdset->fd_mutex);
22 
23     return 0;
24 }

简单来说就是该函数为对应的fd注册了一个处理函数,当该fd有信号时,就调用该函数,这里就是vhost_user_server_new_connection。具体是如何实现的呢?看下fdset_add_fd

 1 static void
 2 fdset_add_fd(struct fdset *pfdset, int idx, int fd,
 3     fd_cb rcb, fd_cb wcb, void *dat)
 4 {
 5     struct fdentry *pfdentry = &pfdset->fd[idx];
 6     struct pollfd *pfd = &pfdset->rwfds[idx];
 7 
 8     pfdentry->fd  = fd;
 9     pfdentry->rcb = rcb;
10     pfdentry->wcb = wcb;
11     pfdentry->dat = dat;
12 
13     pfd->fd = fd;
14     pfd->events  = rcb ? POLLIN : 0;
15     pfd->events |= wcb ? POLLOUT : 0;
16     pfd->revents = 0;
17 }

 

 

这里分成了两部分,一个是fdentry,一个是pollfd。前者保存具体的信息,后者用作poll操作,方便线程监听fd。参数中函数指针为第三个参数,所以这里pfd->events就是POLLIN。那么再回到处理线程的处理函数fdset_event_dispatch中,该函数会监听vhost_user.fdset中的rwfds,当某个fd有信号时,则进入处理流程.

1 if (rcb && pfd->revents & (POLLIN | FDPOLLERR))
2                 rcb(fd, dat, &remove1);
3 if (wcb && pfd->revents & (POLLOUT | FDPOLLERR))
4                 wcb(fd, dat, &remove2);

 这里的rcb便是前面针对fd注册的回调函数。再次回到vhost_user_server_new_connection函数中,当某个fd有信号时,这里指对应socket-file的fd,则该函数被调用,建立连接,然后调用vhost_user_add_connection函数。既然连接已经建立,则需要对该连接进行vhost的一些设置了,包括创建virtio_net设备附加到连接上,设置device名字等等。而关键的一步是为该fd添加回调函数,刚才的回调函数用于建立连接,在连接建立后就需要设置函数处理socket的msg了,这里便是vhost_user_read_cb。到这里正式进入msg的部分。该函数中调用了vhost_user_msg_handler,而该函数正是处理socket msg的核心函数。到这里消息处理的部分便介绍完成了。

二、guest memory和vhost-user app的共享

虽然qemu和vhost通过socket建立了联系,但是这信息量毕竟有限,重点是要传递的数据,难不成通过socket传递的??当然不是,如果这样模式切换和数据复制估计会把系统撑死……这里主要也是用到共享内存的概念。核心机制和vhost-kernel类似,qemu也需要把guest的内存布局通过MSG传递给vhost-user,那么我们就从这里开始分析,在函数vhost_user_msg_handler中

1     case VHOST_USER_SET_MEM_TABLE:
2         ret = vhost_user_set_mem_table(dev, &msg);
3         break;

 在分析函数之前我们先看下几个数据结构

 1 /*对应qemu端的region结构*/
 2 typedef struct VhostUserMemoryRegion {
 3     uint64_t guest_phys_addr;//GPA of region
 4     uint64_t memory_size;    //size
 5     uint64_t userspace_addr;//HVA in qemu process
 6     uint64_t mmap_offset; //offset 
 7 } VhostUserMemoryRegion;
 8 
 9 typedef struct VhostUserMemory {
10     uint32_t nregions;//region num
11     uint32_t padding;
12     VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];//All region 
13 } VhostUserMemory;

 在vhost端,对应的数据结构为

1 struct rte_vhost_mem_region {
2     uint64_t guest_phys_addr;//GPA of region
3     uint64_t guest_user_addr;//HVA in qemu process
4     uint64_t host_user_addr;//HVA in vhost-user
5     uint64_t size;//size
6     void     *mmap_addr;//mmap base Address
7     uint64_t mmap_size;
8     int fd;//relative fd of region
9 };

 意义都比较容易理解就不在多说,在virtio_net结构中保存有指向当前连接对应的memory结构rte_vhost_memory

1 struct rte_vhost_memory {
2     uint32_t nregions;
3     struct rte_vhost_mem_region regions[];
4 };

 OK,下面看代码,代码虽然较多,但是意义都比较容易理解,只看核心部分吧:

 1 dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
 2         sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
 3     if (dev->mem == NULL) {
 4         RTE_LOG(ERR, VHOST_CONFIG,
 5             "(%d) failed to allocate memory for dev->mem\n",
 6             dev->vid);
 7         return -1;
 8     }
 9     /*region num*/
10     dev->mem->nregions = memory.nregions;
11 
12     for (i = 0; i < memory.nregions; i++) {
13         /*fd info*/
14         fd  = msg->fds[i];//qemu进程中的文件描述符??
15         reg = &dev->mem->regions[i];
16         /*GPA of specific region*/
17         reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
18         /*HVA in qemu address*/
19         reg->guest_user_addr = memory.regions[i].userspace_addr;//该region在qemu进程中的虚拟地址
20         reg->size            = memory.regions[i].memory_size;
21         reg->fd              = fd;
22         /*offset in region*/
23         mmap_offset = memory.regions[i].mmap_offset;
24         mmap_size   = reg->size + mmap_offset;
25 
26         /* mmap() without flag of MAP_ANONYMOUS, should be called
27          * with length argument aligned with hugepagesz at older
28          * longterm version Linux, like 2.6.32 and 3.2.72, or
29          * mmap() will fail with EINVAL.
30          *
31          * to avoid failure, make sure in caller to keep length
32          * aligned.
33          */
34         alignment = get_blk_size(fd);
35         if (alignment == (uint64_t)-1) {
36             RTE_LOG(ERR, VHOST_CONFIG,
37                 "couldn't get hugepage size through fstat\n");
38             goto err_mmap;
39         }
40         /*对齐*/
41         mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
42         /*执行映射,这里就是本进程的虚拟地址了,为何能映射另一个进程的文件描述符呢?*/
43         mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
44                  MAP_SHARED | MAP_POPULATE, fd, 0);
45 
46         if (mmap_addr == MAP_FAILED) {
47             RTE_LOG(ERR, VHOST_CONFIG,
48                 "mmap region %u failed.\n", i);
49             goto err_mmap;
50         }
51 
52         reg->mmap_addr = mmap_addr;
53         reg->mmap_size = mmap_size;
54         /*region Address in vhost process*/
55         reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
56                       mmap_offset;//该region在vhost进程中的虚拟地址
57 
58         if (dev->dequeue_zero_copy)
59             add_guest_pages(dev, reg, alignment);
60 
61         
62     }

首先就是为dev分配mem空间,由此我们也可以得到该结构的布局

 

 下面一个for循环对每个region先进行对应信息的复制,然后对该region的大小进行对其操作,接着通过mmap的方式对region关联的fd进行映射,这里便得到了region在vhost端的虚拟地址,但是region中GPA对应的虚拟地址还需要在mmap得到的虚拟地址上加上offset,该值也是作为参数传递进来的。到此,设置memory Table的工作基本完成,看下地址翻译过程呢?

 1 /* Converts QEMU virtual address to Vhost virtual address. */
 2 static uint64_t
 3 qva_to_vva(struct virtio_net *dev, uint64_t qva)
 4 {
 5     struct rte_vhost_mem_region *reg;
 6     uint32_t i;
 7 
 8     /* Find the region where the address lives. */
 9     for (i = 0; i < dev->mem->nregions; i++) {
10         reg = &dev->mem->regions[i];
11 
12         if (qva >= reg->guest_user_addr &&
13             qva <  reg->guest_user_addr + reg->size) {
14             return qva - reg->guest_user_addr +
15                    reg->host_user_addr;//qva在所属region中的偏移,qva - reg->guest_user_addr
16         }
17     }
18 
19     return 0;
20 }

相当简单把,核心思想是先使用QVA确定在哪一个region然后取地址在region中的偏移加上该region在vhost-user映射的实际有效地址即reg->host_user_addr字段。这部分还有一个核心思想是fd的使用,vhost_user_set_mem_table直接从MSG中获取到了fd,然后直接把FD进行mmap映射,这点一时间让我难以理解,FD不是仅仅在进程内部有效么?怎么也可以共享了??通过向开源社区请教,感叹自己的知识面实在狭窄,这是Unix下一种通用的传递描述符的方式,怎么说呢?就是进程A的描述符可以通过特定的调用传递给进程B,进程B在自己的描述符表中分配一个位置给该描述符指针,因此实际上进程B使用的并不是A的FD,而是自己描述符表中的FD,但是两个进程的FD却指向同一个描述符表,就像是增加了一个引用而已。后面会专门对该机制进行详解,本文仅仅了解该作用即可。

三、vhost-user app的通知机制

这里的通知机制和vhost kernel基本一致,都是通过eventfd的方式。因此这里就比较简单了

 qemu端的代码:

1  file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
2  r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
 1 static int vhost_user_set_vring_kick(struct vhost_dev *dev,
 2                                      struct vhost_vring_file *file)
 3 {
 4     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
 5 }
 6 static int vhost_set_vring_file(struct vhost_dev *dev,
 7                                 VhostUserRequest request,
 8                                 struct vhost_vring_file *file)
 9 {
10     int fds[VHOST_MEMORY_MAX_NREGIONS];
11     size_t fd_num = 0;
12     VhostUserMsg msg = {
13         .request = request,
14         .flags = VHOST_USER_VERSION,
15         .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
16         .size = sizeof(msg.payload.u64),
17     };
18 
19     if (ioeventfd_enabled() && file->fd > 0) {
20         fds[fd_num++] = file->fd;
21     } else {
22         msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
23     }
24 
25     if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
26         return -1;
27     }
28 
29     return 0;
30 }

 可以看到这里实质上也是把eventfd的描述符传递给vhost-user。再看vhost-user端,在vhost_user_set_vring_kick中,关键的一句

1 vq->kickfd = file.fd;

 其实这里的通知机制和kernel下没什么区别,不过是换到用户空间对eventfd进行操作而已,这里暂时不讨论了,后面有时间在补充!

 

posted @ 2020-03-17 15:11  yimuxi  阅读(1131)  评论(0编辑  收藏  举报