boost.asio简单入坑
网络连接有短连接和长连接。顾名思义,connect之后一直不断开的是长连接,像公司用的数据Route模块;三次握手建立连接后收发一次数据后就断开,断开是四次握手,再次收发数据就再来三次握手建立连接是短连接。每种模式都有优缺点。短连接优点是:如果客户端数量大的话,可以让每个客户轮流请求到数据。缺点:每次连接(三次握手)断开(四次握手)都耗费资源。长连接相对的优点:省资源。缺点:客户量大就会有很多连接不上。各自的用途:长连接适合I/O频繁型的,有很多数据需要传输的,且连接数不需要太多的,像数据Route模块。短连接适合于网站,http服务,比如apache。目前银行接口模块中有bu采用的是短连接,它使用了boost.asio库,接下来简单介绍下如何入坑。
先来个客户端的例子:
#define BOOST_REGEX_NO_LIB #include <boost/asio.hpp> void asio_cli1() { boost::asio::io_service service;// 在栈中定义类io_service的对象,会自动调用类io_service的构造函数 // from_string会把字符串格式ip转成硬件识别的地址 // 类似于sockaddr_in.sin_addr.S_un.S_addr = inet_addr("10.243.141.16"); boost::asio::ip::tcp::endpoint ep(boost::asio::ip::address::from_string("10.243.141.16"), 9000); boost::asio::ip::tcp::socket sock(service);// 定义socket对象并调用了构造函数basic_stream_socket(boost::asio::io_service& io_service) sock.connect(ep); size_t len = sock.send(boost::asio::buffer("hello asio\n")); sock.close(); }
使用上比直接调用系统接口方便很多,还支持跨平台。然后我们可以在buffer中的数据换成各家银行格式的报文就可以模拟银行端的发起请求了。
服务器端的例子:
#define BOOST_REGEX_NO_LIB #include <boost/asio.hpp> using namespace std; void asio_svr() { try { boost::asio::io_service io; //boost::asio::ip::tcp::endpoint ep(boost::asio::ip::address::from_string("10.253.41.49"), 9000); //boost::asio::ip::tcp::acceptor acceptor(io, ep); boost::asio::ip::tcp::acceptor acceptor(io, boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), 9000)); cout<<acceptor.local_endpoint().address()<<endl; char buf[1024] = {0}; for( ; ; ) { boost::asio::ip::tcp::socket sock(io); acceptor.accept(sock); cout<<"cli:"<<sock.remote_endpoint().address()<<endl; sock.receive(boost::asio::buffer(buf)); cout<<buf<<endl; } } catch (std::exception& e) { std::cerr << e.what() << std::endl; } }
接下来整个复杂点的,异步模式,IOCP模型(I/O Completion Port)。例子源码来自于官方文档:http://www.boost.org/doc/libs/1_50_0/doc/html/boost_asio/example/allocation/server.cpp
其中1_50_0是版本号,可以修改成自己用的那个,每个版本下都有对应例子的。
#include <cstdlib> #include <iostream> #include <boost/aligned_storage.hpp> #include <boost/array.hpp> #include <boost/bind.hpp> #include <boost/enable_shared_from_this.hpp> #include <boost/noncopyable.hpp> #include <boost/shared_ptr.hpp> #include <boost/asio.hpp> using boost::asio::ip::tcp; // Class to manage the memory to be used for handler-based custom allocation. // It contains a single block of memory which may be returned for allocation // requests. If the memory is in use when an allocation request is made, the // allocator delegates allocation to the global heap. class handler_allocator : private boost::noncopyable { public: handler_allocator() : in_use_(false) { } void* allocate(std::size_t size) { if (!in_use_ && size < storage_.size) { in_use_ = true; return storage_.address(); } else { return ::operator new(size); } } void deallocate(void* pointer) { if (pointer == storage_.address()) { in_use_ = false; } else { ::operator delete(pointer); } } private: // Storage space used for handler-based custom memory allocation. boost::aligned_storage<1024> storage_; // Whether the handler-based custom allocation storage has been used. bool in_use_; }; // Wrapper class template for handler objects to allow handler memory // allocation to be customised. Calls to operator() are forwarded to the // encapsulated handler. template <typename Handler> class custom_alloc_handler { public: custom_alloc_handler(handler_allocator& a, Handler h) : allocator_(a), handler_(h) { } template <typename Arg1> void operator()(Arg1 arg1) { handler_(arg1); } template <typename Arg1, typename Arg2> void operator()(Arg1 arg1, Arg2 arg2) { handler_(arg1, arg2); } friend void* asio_handler_allocate(std::size_t size, custom_alloc_handler<Handler>* this_handler) { return this_handler->allocator_.allocate(size); } friend void asio_handler_deallocate(void* pointer, std::size_t /*size*/, custom_alloc_handler<Handler>* this_handler) { this_handler->allocator_.deallocate(pointer); } private: handler_allocator& allocator_; Handler handler_; }; // Helper function to wrap a handler object to add custom allocation. template <typename Handler> inline custom_alloc_handler<Handler> make_custom_alloc_handler( handler_allocator& a, Handler h) { return custom_alloc_handler<Handler>(a, h); } class session : public boost::enable_shared_from_this<session> { public: session(boost::asio::io_service& io_service) : socket_(io_service) { } tcp::socket& socket() { return socket_; } void start() { socket_.async_read_some(boost::asio::buffer(data_), make_custom_alloc_handler(allocator_, boost::bind(&session::handle_read, shared_from_this(), boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred))); } void handle_read(const boost::system::error_code& error, size_t bytes_transferred) { if (!error) { boost::asio::async_write(socket_, boost::asio::buffer(data_, bytes_transferred), make_custom_alloc_handler(allocator_, boost::bind(&session::handle_write, shared_from_this(), boost::asio::placeholders::error))); } } void handle_write(const boost::system::error_code& error) { if (!error) { socket_.async_read_some(boost::asio::buffer(data_), make_custom_alloc_handler(allocator_, boost::bind(&session::handle_read, shared_from_this(), boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred))); } } private: // The socket used to communicate with the client. tcp::socket socket_; // Buffer used to store data received from the client. boost::array<char, 1024> data_; // The allocator to use for handler-based custom memory allocation. handler_allocator allocator_; }; typedef boost::shared_ptr<session> session_ptr; class server { public: server(boost::asio::io_service& io_service, short port) : io_service_(io_service), acceptor_(io_service, tcp::endpoint(tcp::v4(), port)) { session_ptr new_session(new session(io_service_)); acceptor_.async_accept(new_session->socket(), boost::bind(&server::handle_accept, this, new_session, boost::asio::placeholders::error)); } void handle_accept(session_ptr new_session, const boost::system::error_code& error) { if (!error) { new_session->start(); } new_session.reset(new session(io_service_)); acceptor_.async_accept(new_session->socket(), boost::bind(&server::handle_accept, this, new_session, boost::asio::placeholders::error)); } private: boost::asio::io_service& io_service_; tcp::acceptor acceptor_; }; int main(int argc, char* argv[]) { try { if (argc != 2) { std::cerr << "Usage: server <port>\n"; return 1; } boost::asio::io_service io_service; using namespace std; // For atoi. server s(io_service, atoi(argv[1])); io_service.run(); } catch (std::exception& e) { std::cerr << "Exception: " << e.what() << "\n"; } return 0; }
在服务端启动后调试,看到的是在io_service.run()处阻塞了,run函数内调用了while语句,就会一直监听有没有客户端连接。在windows下实现在win_iocp_io_service.ipp文件内。官方给你例子比较复杂,那么来个简单点的,参考了http://blog.csdn.net/zhuky/article/details/5364685
写一个server类,代码如下:
#include <iostream> #include <string> #include <boost/asio.hpp> #include <boost/bind.hpp> #include <boost/smart_ptr.hpp> using namespace boost::asio; using boost::system::error_code; using ip::tcp; // 接收多个客户端连接 然后给客户端发送一条消息 // 如果发送成功就把发送的数据打印出来 class CAsyncService { public: CAsyncService(io_service &iosev, size_t port) :m_iosrv(iosev), m_acceptor(iosev, tcp::endpoint(tcp::v4(), port)) { } void Start() { // 开始等待连接(非阻塞) boost::shared_ptr<tcp::socket> psocket(new tcp::socket(m_iosrv)); // 触发的事件只有error_code参数,所以用boost::bind把socket绑定进去 m_acceptor.async_accept(*psocket, boost::bind(&CAsyncService::AcceptHandler, this, psocket, _1) ); } // 有客户端连接时AcceptHandler触发 void AcceptHandler(boost::shared_ptr<tcp::socket> psocket, error_code ec) { if(ec) return; // 继续等待连接 Start(); // 显示远程IP std::cout << psocket->remote_endpoint().address() << std::endl; // 发送信息(非阻塞) boost::shared_ptr<std::string> pstr(new std::string("Hello async world!")); psocket->async_write_some(buffer(*pstr), boost::bind(&CAsyncService::WriteHandler, this, pstr, _1, _2) ); } // 异步写操作完成后WriteHandler触发 void WriteHandler(boost::shared_ptr<std::string> pstr, error_code ec, size_t bytes_transferred) { if(ec) std::cout<< "发送失败!" << std::endl; else std::cout<< *pstr << " 已发送" << std::endl; } private: io_service &m_iosrv; ip::tcp::acceptor m_acceptor; };
再简单使用下main函数调用:
int main(int argc, char* argv[]) { io_service io; CAsyncService ayncserv(io, 9000); ayncserv.Start(); io.run();// 开始等待连接 return 0; }