【gRPC】C++异步服务端优化版,多服务接口样例

官方的C++异步服务端API样例可读性并不好,理解起来非常的费劲,各种状态机也并不明了,整个运行过程也容易读不懂,因此此处参考网上的博客进行了重写,以求顺利读懂。

C++异步服务端实例,详细注释版

gRPC使用C++实现异步服务端的基本逻辑:

  • 构建数据结构来存储需要处理的请求及其上下文信息,此处使用HandlerContext,相当于对到达的请求的封装
  • 首先注册各个接口的HandlerContext,放入完成队列CompletionQueue中,当请求到达时,根据类型封装进对应的HandlerContext,由于是异步客户端,需要保证后面到达的请求也有HandlerContext用,所以用一个就要再创建一个空的放回去
  • 运行完的接口,其HandlerContext需要销毁

以下代码的关键为run()方法中的逻辑以及HandlerContext的设置,每一步都有注释,可以详细观看

//官方样例的异步服务代码可读性太差了,状态机绕来绕去不直观,在这里参考网上的博客进行重写,类名随意
class AsyncTestServiceImplNew final
{
private:

  // 当前服务器的地址
  std::string server_address_;
  // 当前服务器的完成队列
  std::unique_ptr<ServerCompletionQueue> cq_;
  // 当前服务器的异步服务
  TestService::AsyncService service_;
  // 服务器实例
  std::unique_ptr<Server> server_;
  
  struct HandlerContextBase
  {
    int type_;          //请求的接口是哪个,1表示http,2表示download,3表示upload,后续有需要可以再添加
    int status_;        //当前处理状态,1表示处理请求构建响应,2表示发送响应
    ServerContext ctx_; // rpc服务的上下文信息
  };
  //请求的上下文结构
  template <typename RequestType, typename ResponseType>
  struct HandlerContext : public HandlerContextBase
  {
    RequestType req_;                                   //请求数据类型
    ResponseType resp_;                                 //响应数据类型
    ServerAsyncResponseWriter<ResponseType> responder_; //响应器
    HandlerContext() : responder_(&ctx_) {}             //构造方法
  };
  // 定义好各个接口的上下文
  typedef HandlerContext<HttpRequest, HttpResponse> HandlerHttpContext;
  typedef HandlerContext<DownloadRequest, DownloadResponse> HandlerDownloadContext;
  typedef HandlerContext<UploadRequest, UploadResponse> HandlerUploadContext;
  
public:
  ~AsyncTestServiceImplNew()
  {
    server_->Shutdown();
    // 关闭服务器后也要关闭完成队列
    cq_->Shutdown();
  }

  //构造时传入IP:Port即可
  AsyncTestServiceImplNew(std::string server_address) : server_address_(server_address) {}

  // 服务器与队列的关闭放入了析构函数中
  void Run()
  {
    // std::string server_address = "localhost:50052";
    // 服务器构建器
    ServerBuilder builder;
    // 服务器IP与端口指定,第二个参数表示该通道不经过身份验证
    builder.AddListeningPort(server_address_, grpc::InsecureServerCredentials());
    // 注册服务
    builder.RegisterService(&service_);
    // 为当前服务器创建完成队列
    cq_ = builder.AddCompletionQueue();
    // 构建并启动服务器
    server_ = builder.BuildAndStart();
    std::cout << "AysncTestServer_New is listening on " << server_address_ << std::endl;

    // 为各个接口创建请求上下文,然后注册请求到服务端
    HandlerHttpContext *http_context = new HandlerHttpContext;
    http_context->type_ = 1;
    http_context->status_ = 1;
    HandlerDownloadContext *download_context = new HandlerDownloadContext;
    download_context->type_ = 2;
    download_context->status_ = 1;
    HandlerUploadContext *upload_context = new HandlerUploadContext;
    upload_context->type_ = 3;
    upload_context->status_ = 1;

    // 注册服务,参数从前到后分别是:rpc服务上下文,rpc请求对象,异步响应器,新的rpc请求使用的完成队列,通知完成使用的完成队列,唯一标识tag标识当前这次请求的上下文
    service_.Requesthttp(&http_context->ctx_, &http_context->req_, &http_context->responder_, cq_.get(), cq_.get(), http_context);
    service_.Requestdownload(&download_context->ctx_, &download_context->req_, &download_context->responder_, cq_.get(), cq_.get(), download_context);
    service_.Requestupload(&upload_context->ctx_, &upload_context->req_, &upload_context->responder_, cq_.get(), cq_.get(), upload_context);

    //创建线程池,用于运行请求的接口
    ThreadPool pool(THREAD_POOL_SIZE);//THTREAD_POOL_SIZE自行定义
    //不断从完成队列中取出请求,这里的请求都是在上面注册过的
    while (true)
    {
      HandlerContextBase *handler_context = nullptr;
      bool ok = false;
      GPR_ASSERT(cq_->Next((void **)&handler_context, &ok));
      GPR_ASSERT(ok);

      //请求接口的类型,1是http,2是download,3是upload
      int type = handler_context->type_;
      //根据状态分别处理,1表示要进行接口调用,2表示已经完成,可以销毁该请求上下文了
      if (handler_context->status_ == 2)
      {
        switch (type)
        {
        case 1:
          delete (HandlerHttpContext *)handler_context;
          break;
        case 2:
          delete (HandlerDownloadContext *)handler_context;
          break;
        case 3:
          delete (HandlerUploadContext *)handler_context;
          break;
        }
        continue;
      }
      //从完成队列中取出来了一个请求上下文来处理当前请求,就需要再放回去一个给后续到达的请求用
      switch (type)
      {
      case 1:
      {
        HandlerHttpContext *http_context = new HandlerHttpContext;
        http_context->type_ = 1;
        http_context->status_ = 1;
        // 注册服务,参数从前到后分别是:rpc服务上下文,rpc请求对象,异步响应器,新的rpc请求使用的完成队列,通知完成使用的完成队列,唯一标识tag标识当前这次请求的上下文
        service_.Requesthttp(&http_context->ctx_, &http_context->req_, &http_context->responder_, cq_.get(), cq_.get(), http_context);
      }
      break;
      case 2:
      {
        HandlerDownloadContext *download_context = new HandlerDownloadContext;
        download_context->type_ = 2;
        download_context->status_ = 1;
        service_.Requestdownload(&download_context->ctx_, &download_context->req_, &download_context->responder_, cq_.get(), cq_.get(), download_context);
      }
      break;
      case 3:
      {
        HandlerUploadContext *upload_context = new HandlerUploadContext;
        upload_context->type_ = 3;
        upload_context->status_ = 1;
        service_.Requestupload(&upload_context->ctx_, &upload_context->req_, &upload_context->responder_, cq_.get(), cq_.get(), upload_context);
      }
      break;
      }

      //当前请求上下文的任务进行执行,放入线程池中去运行
      pool.enqueue([type, handler_context, this]()
                   {
                     switch (type)
                     {
                     case 1:
                     {
                       HandlerHttpContext *h = (HandlerHttpContext *)handler_context;
                       Status status = http(&h->ctx_, &h->req_, &h->resp_);
                       h->status_ = 2; //设置状态为完成接口调用,准备进行响应
                       //调用responder_进行异步的响应发送,三个参数分别为发送的响应、状态码、请求处理在服务端的唯一tag
                       h->responder_.Finish(h->resp_, status, handler_context);
                     }
                     break;
                     case 2:
                     {
                       HandlerDownloadContext *h = (HandlerDownloadContext *)handler_context;
                       Status status = download(&h->ctx_, &h->req_, &h->resp_);
                       h->status_ = 2;
                       h->responder_.Finish(h->resp_, status, handler_context);
                     }
                     break;
                     case 3:
                     {
                       HandlerUploadContext *h = (HandlerUploadContext *)handler_context;
                       Status status = upload(&h->ctx_, &h->req_, &h->resp_);
                       h->status_ = 2;
                       h->responder_.Finish(h->resp_, status, handler_context);
                     }
                     break;
                     }
                   });
    }
  }

private:
  Status http(ServerContext *context, const HttpRequest *request,
              HttpResponse *response)
  {
    response->set_httpresult("http is ok");
    return Status::OK;
  }

  Status download(ServerContext *context, const DownloadRequest *request,
                  DownloadResponse *response)
  {
    response->set_downloadresult("download is ok");
    return Status::OK;
  }
  Status upload(ServerContext *context, const UploadRequest *request,
                UploadResponse *response)
  {
    response->set_uploadresult("upload is ok");
    return Status::OK;
  }
};

参考博文:https://www.cnblogs.com/oloroso/p/11345266.html

线程池源码

其中可以使用线程池同时运行多个RPC请求的接口,线程池的代码此处也一并放出来了,来源于github
github地址:https://github.com/progschj/ThreadPool.git

#ifndef THREAD_POOL_H
#define THREAD_POOL_H

#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>

class ThreadPool
{
public:
    ThreadPool(size_t);
    template <class F, class... Args>
    auto enqueue(F &&f, Args &&...args)
        -> std::future<typename std::result_of<F(Args...)>::type>;
    ~ThreadPool();

private:
    // need to keep track of threads so we can join them
    std::vector<std::thread> workers;
    // the task queue
    std::queue<std::function<void()>> tasks;

    // synchronization
    std::mutex queue_mutex;
    std::condition_variable condition;
    bool stop;
};

// the constructor just launches some amount of workers
inline ThreadPool::ThreadPool(size_t threads)
    : stop(false)
{
    for (size_t i = 0; i < threads; ++i)
        workers.emplace_back(
            [this]
            {
                for (;;)
                {
                    std::function<void()> task;

                    {
                        std::unique_lock<std::mutex> lock(this->queue_mutex);
                        this->condition.wait(lock,
                                             [this]
                                             { return this->stop || !this->tasks.empty(); });
                        if (this->stop && this->tasks.empty())
                            return;
                        task = std::move(this->tasks.front());
                        this->tasks.pop();
                    }

                    task();
                }
            });
}

// add new work item to the pool
template <class F, class... Args>
auto ThreadPool::enqueue(F &&f, Args &&...args)
    -> std::future<typename std::result_of<F(Args...)>::type>
{
    using return_type = typename std::result_of<F(Args...)>::type;

    auto task = std::make_shared<std::packaged_task<return_type()>>(
        std::bind(std::forward<F>(f), std::forward<Args>(args)...));

    std::future<return_type> res = task->get_future();
    {
        std::unique_lock<std::mutex> lock(queue_mutex);

        // don't allow enqueueing after stopping the pool
        if (stop)
            throw std::runtime_error("enqueue on stopped ThreadPool");

        tasks.emplace([task]()
                      { (*task)(); });
    }
    condition.notify_one();
    return res;
}

// the destructor joins all threads
inline ThreadPool::~ThreadPool()
{
    {
        std::unique_lock<std::mutex> lock(queue_mutex);
        stop = true;
    }
    condition.notify_all();
    for (std::thread &worker : workers)
        worker.join();
}

#endif
posted @ 2022-09-15 16:33  缙云烧饼  阅读(1479)  评论(0编辑  收藏  举报