zeromq
zeromq是什么?
zeromq是一套专注于消息通信的网络库,把它称作消息队列其实不恰当,zeromq的竞争对手也不是kafka、rocketmq、memchedmq这些消息队列。
zeromq不是什么?
zeromq不是对socket的封装,可以认为zeromq在应用层和传输层之间又构建了一层。
zeromq怎么使用?
使用方式类似于socket,区别在于zeromq是面向消息的,而socket是面向字节流的,使用zeromq你不必考虑已经沿用了几十年的那套socket api该怎么去发送一条大消息,zeromq会保证消息完整性;使用zeromq你不必考虑I/O与程序阻塞、异步这些麻烦事,zeromq使用无锁的队列完成异步I/O;是用zeromq也不必考虑消息阻塞的问题,zeromq具有可以缓存消息的异步队列,必要时可以把消息缓存到磁盘;生产中会遇到各种路由问题,一对多、多对一、多对多的路由,zeromq提供灵活的现成模式供组合;生产中会有吞吐量和时延的考虑,你希望高的吞吐量还是尽可能短的时延,zeromq都可以满足你,1024字节的消息在4c的linux服务器可以吞吐量达到多少,我的测试结果是20w,512字节的吞吐量可以达到40w。
zeromq解决了socket的一系列不足,很适合减少重复造轮子的工作,在需要高性能网络通信的时候不妨考虑它。
zeromq的一些优点:
1.专注与网络通信,速度非常快,开销非常小。
2.传统意义的client启动前需要先启动server,zeromq不需要,可以按照任意顺序启动。
3.支持3种常见的模式,请求、应答模式;发布、订阅模式;管道模式;最可贵的是模式之间可以轻易的结合,能够适应异常复杂的组网。
4.支持多种进程间通信方式,inporc、tcp,broadcast、IPC,原谅我用了英文名称,我只使用了tcp的方式 ,其他的方式希望读者先做测评再考虑是否使用。
zeromq上手难度如何?
非常容易上手,就像使用一个简化的socket一样,下面会附加代码,根据代码很容易理解。
我用zeromq做了什么?
我们有一个线上的产品想要替换消息队列,要求是稳定的时延波动和非常低的时延。zeromq是选型之一,了解原理以后写了测试代码考察实际表现。
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <sys/time.h>
#include <string>
#include <stdlib.h>
static int send_time(void* socket);
static long long compare_time(void* begin);
static void * s_recv (void *socket) ;
static int s_send (void *socket, char *string);
static int bin_send(void* socket, void* buffer, int len);
static int deal_arg(int argc, char** argv);
static int pub_action();
static int sub_action();
static int send_time(void* socket)
{
struct timeval tv;
gettimeofday(&tv, NULL);
int size = sizeof(struct timeval);
bin_send(socket, &tv, size);
}
static int send_buffer_with_time(void* socket, void* buffer, int len)
{
struct timeval tv;
gettimeofday(&tv, NULL);
memcpy(buffer, &tv, sizeof(struct timeval));
bin_send(socket, &tv, len);
}
static long long compare_time(void* begin)
{
struct timeval tv;
gettimeofday(&tv, NULL);
struct timeval* tv_begin = (struct timeval*)begin;
long long usec = (tv.tv_sec - tv_begin->tv_sec) * 1000000 + (tv.tv_usec - tv_begin->tv_usec);
return usec;
}
static void * s_recv (void *socket) {
void *buffer = malloc(sizeof(struct timeval));
//void *buffer = malloc(2 * sizeof(struct timeval) + 1);
int size = zmq_recv (socket, buffer, sizeof(struct timeval), 0);
//int size = zmq_recv (socket, buffer, 2 * sizeof(struct timeval) + 1, 0);
if (size == -1)
return NULL;
return buffer;
}
static void* s_recv(void* socket, void* msg, int msg_len)
{
int size = zmq_recv(socket, msg, msg_len, 0);
if(size == -1)
return NULL;
return msg;
}
static int s_send (void *socket, char *string) {
int size = zmq_send (socket, string, strlen (string), 0);
return size;
}
static int bin_send(void* socket, void* buffer, int len)
{
int size = zmq_send(socket, buffer, len, 0);
return size == len;
}
struct config
{
std::string role;
std::string filter;
std::string host;
int port;
bool silent;
long long pub_count;
long long tps;
int msg_size;
};
struct config cf;
static int deal_arg(int argc, char** argv)
{
cf.silent = false;
cf.pub_count = -1;
int opt;
while ((opt = getopt(argc, argv, "r:f:h:p:sc:t:m:")) != -1)
{
switch (opt)
{
case 'r':
cf.role = optarg;
break;
case 'f':
cf.filter = optarg;
break;
case 'h':
cf.host = optarg;
break;
case 'p':
cf.port = atoi(optarg);
break;
case 's':
cf.silent = true;
break;
case 'c':
cf.pub_count = atoi(optarg);
break;
case 'm':
cf.msg_size = atoi(optarg);
break;
case 't':
cf.tps = atoi(optarg);
break;
}
}
}
long long average_time(long long usec)
{
static long long delay = 0;
static long long count = 0;
static timeval tv_begin = {0, 0};
static timeval tv_end = {0,0};
delay += usec;
count++;
gettimeofday(&tv_end, NULL);
if(((tv_end.tv_sec - tv_begin.tv_sec) * 1000000 + (tv_end.tv_usec - tv_begin.tv_usec)) > 1000000)
{
long long per_delay = delay/count;
printf("deal %ld msg within 1s.average spend. sec:%ld msec:%ld\n", count, per_delay/1000000, per_delay%1000000 == 0 ? 0 : (per_delay%1000000)/1000);
tv_begin.tv_sec = tv_end.tv_sec;
tv_begin.tv_usec = tv_end.tv_usec;
count = 0;
delay = 0;
}
}
//检查从上一次到现在是否流逝了sec秒
bool calc_sec(int sec)
{
static timeval tv_begin = {0, 0};
static timeval tv_end = {0,0};
gettimeofday(&tv_end, NULL);
if(((tv_end.tv_sec - tv_begin.tv_sec) * 1000000 + (tv_end.tv_usec - tv_begin.tv_usec)) > 1000000 * sec)
{
tv_begin.tv_sec = tv_end.tv_sec;
tv_begin.tv_usec = tv_end.tv_usec;
return true;
}
return false;
}
static int pub_action()
{
void *context = zmq_init(1);
void *pub = zmq_socket(context, ZMQ_PUB);
//std::string address = "tcp://" + cf.host + ":5555";
zmq_bind(pub, "tcp://*:5555");
sleep(1);
//zmq_bind(pub, address.c_str());
long long count = 0;
while(1)
{
if(cf.pub_count == -1)
{
send_time(pub);
}
else
{
count++;
if(count >cf.pub_count)
break;
send_time(pub);
}
}
zmq_close(pub);
zmq_term(context);
}
static int sub_action()
{
void *context = zmq_init(1);
void *sub = zmq_socket(context, ZMQ_SUB);
std::string address = "tcp://" + cf.host + ":5555";
zmq_connect(sub, address.c_str());
//zmq_connect(sub, "tcp://localhost.localdomain:5555");
zmq_setsockopt(sub, ZMQ_SUBSCRIBE, cf.filter.c_str(), cf.filter.length());
while(1)
{
void* recv = s_recv(sub);
long long usec = compare_time(recv);
free(recv);
if(cf.silent)
{
average_time(usec);
continue;
}
printf("sec:%ld msec:%ld usec%ld\n", usec/1000000, usec%1000000 == 0 ? 0 : (usec%1000000)/1000, usec%1000 );
}
zmq_close(sub);
zmq_term(context);
}
//server push消息到本地的5588
static int server_push_action()
{
void *context = zmq_init(1);
void *push = zmq_socket(context, ZMQ_PUSH);
//std::string address = "tcp://" + cf.host + ":5588";
//zmq_bind(push, address.c_str());
zmq_bind(push, "tcp://*:5588");
//发送的消息size
void* buffer = malloc(cf.msg_size);
while(1)
{
for(int i = 0; i < 1; i++)
{
send_buffer_with_time(push, buffer, cf.msg_size);
}
usleep(1);
/*
//连续发送tps条消息后进入休眠
for(int i = 0; i < cf.tps; i++)
{
send_buffer_with_time(push, buffer, cf.msg_size);
//send_time(push);
usleep(1);
}
*/
//usleep(1000);
/*
while(true)
{
if(calc_sec(1) == false)
{
usleep(10);
}
else
{
printf("1s passed, begin work.\n");
break;
}
}
*/
}
zmq_close(push);
zmq_term(context);
}
//客户端从server的5588端口pull消息,并push到collector的5599端口,collector和server要在一个主机上
static int client_forward_action()
{
void *context = zmq_init(1);
void *pull = zmq_socket(context, ZMQ_PULL);
void *push = zmq_socket(context, ZMQ_PUSH);
std::string serv_addr = "tcp://" + cf.host + ":5588";
zmq_connect(pull, serv_addr.c_str());
std::string coll_addr = "tcp://" + cf.host + ":5599";
zmq_connect(push, coll_addr.c_str());
//zmq_proxy(pull, push, NULL);
void* msg = malloc(cf.msg_size);
while(1)
{
void* recv = s_recv(pull, msg, cf.msg_size);
bin_send(push, recv, cf.msg_size);
//free(recv);
}
zmq_close(pull);
zmq_close(push);
zmq_term(context);
}
//采集计算每条消息花费时间,启动顺序应该是client、采集、server
static int collector_action()
{
void *context = zmq_init(1);
void *coll = zmq_socket(context, ZMQ_PULL);
//std::string address = "tcp://" + cf.host + ":5599";
//zmq_bind(coll, address.c_str());
zmq_bind(coll, "tcp://*:5599");
void* msg = malloc(cf.msg_size);
long long tps;
while(1)
{
void* recv = s_recv(coll, msg, cf.msg_size);
if(!recv)
{
printf("recv msg error.\n");
break;
}
if(calc_sec(1) == true)
{
fprintf(stderr, "tps:%d\n", tps);
tps = 0;
}
else
{
tps++;
}
long long usec = compare_time(recv);
printf("usec %ld\n", usec);
//printf("sec %ld msec %ld usec %ld\n", usec/1000000, usec%1000000 == 0 ? 0 : (usec%1000000)/1000, usec%1000 );
}
zmq_close(coll);
zmq_term(context);
}
int main(int argc, char** argv)
{
deal_arg(argc, argv);
if(cf.role.compare("pub") == 0)
{
pub_action();
}
else if(cf.role.compare("sub") == 0)
{
sub_action();
}
else if(cf.role.compare("coll") == 0)
{
collector_action();
}
else if(cf.role.compare("server") == 0)
{
server_push_action();
}
else if(cf.role.compare("client") == 0)
{
client_forward_action();
}
return 0;
}
zeromq测试的结果数据是相当优秀的,非常小的时延和极高的吞吐量,因为没有和其他的消息队列直接比较,量化的数据不具有意义,不再贴出。
zeromq对各种语言和平台的支持程度
官方号称支持所有的语言和所有的平台,看着比较浮夸是吧,我是信了。