serving 源码
main.cc
tensorflow::serving::main::Server server;
const auto& status = server.BuildAndStart(options);
server.cc
builder.RegisterService(model_service_.get());
builder.RegisterService(prediction_service_.get());
builder.RegisterService(profiler_service_.get());
prediction_service_impl.cc
::grpc::Status Predict()
::grpc::Status GetModelMetadata()
::grpc::Status Classify()
::grpc::Status Regress()
::grpc::Status MultiInference()
predict_impl.cc
TensorflowPredictor::Predict(run_options, core_, *request, response);
TensorflowPredictor::PredictWithModelSpec(run_options, core, request.model_spec(), request, response). //ServerCore
core->GetServableHandle(model_spec, &bundle)
internal::RunPredict(run_options, bundle->meta_graph_def, bundle.id().version, core->predict_response_tensor_serialization_option(), bundle->session.get(), request, response, thread_pool_options)
server.cc
ServerCore::Create(std::move(options), &server_core_)