.net core 源码解析-web app是如何启动并接收处理请求(二) kestrel的启动
上篇讲到.net core web app是如何启动并接受请求的,下面接着探索kestrel server是如何完成此任务的。
1.kestrel server的入口KestrelServer.Start(Microsoft.AspNetCore.Hosting.Server.IHttpApplication)
FrameFactory创建的frame实例最终会交给libuv的loop回调接收请求。但是在这过程中还是有很多的初始化工作需要做的。后面我们就管中窥豹来看一看。
public void Start<TContext>(IHttpApplication<TContext> application)
{
var engine = new KestrelEngine(new ServiceContext
{
FrameFactory = context =>
{
return new Frame<TContext>(application, context);
},
AppLifetime = _applicationLifetime,
Log = trace,
ThreadPool = new LoggingThreadPool(trace),
DateHeaderValueManager = dateHeaderValueManager,
ServerOptions = Options
});
//启动引擎。完成libuv的配置和启动
engine.Start(threadCount);
//针对绑定的多个地址创建server来接收请求。也就是针对ip:port来启动tcp监听
foreach (var address in _serverAddresses.Addresses.ToArray())
{
engine.CreateServer(ipv4Address);
}
}
2.启动kestrel engine。engine.Start(threadCount);
启动绑定的端口*最大处理线程的thread。并初始化libuv组件。
每一个线程初始化libuv,注册loop回调等,并启动libuv。
public void Start(int count)
{
for (var index = 0; index < count; index++)
{
Threads.Add(new KestrelThread(this));
}
foreach (var thread in Threads)
{
thread.StartAsync().Wait();
}
}
private void ThreadStart(object parameter)
{
lock (_startSync)
{
var tcs = (TaskCompletionSource<int>) parameter;
try
{
//初始化loop
_loop.Init(_engine.Libuv);
//注册loop回调
//EnqueueCloseHandle:持有的资源释放后的回调方法,回调往queue内增加一个item,事件循环该queue完成资源的最终释放
_post.Init(_loop, OnPost, EnqueueCloseHandle);
//注册心跳定时器
_heartbeatTimer.Init(_loop, EnqueueCloseHandle);
//启动心跳定时器
_heartbeatTimer.Start(OnHeartbeat, timeout: HeartbeatMilliseconds, repeat: HeartbeatMilliseconds);
_initCompleted = true;
tcs.SetResult(0);
}
catch (Exception ex)
{
tcs.SetException(ex);
return;
}
}
try
{
//当前线程执行到Run()这里会挂起
_loop.Run();
//应用程序stop,shutdown之类的情况,libuv唤醒当前线程,完成资源清理
if (_stopImmediate)
{
// thread-abort form of exit, resources will be leaked
//线程中止形式的退出,资源会被泄露。
return;
}
// run the loop one more time to delete the open handles
//再次运行循环以删除打开的句柄
_post.Reference();
_post.Dispose();
_heartbeatTimer.Dispose();
// Ensure the Dispose operations complete in the event loop.
//确保事件循环中的Dispose操作完成。
_loop.Run();
_loop.Dispose();
}
catch (Exception ex)
{
_closeError = ExceptionDispatchInfo.Capture(ex);
// Request shutdown so we can rethrow this exception
// in Stop which should be observable.
//请求关闭,以便我们可以重新抛出此异常在停止应该是可观察的。
_appLifetime.StopApplication();
}
finally
{
_threadTcs.SetResult(null);
}
}
3.libuv启动完成之后,接着就是处理订阅注册tcp了。
回到1的kestrel的start中。接着执行engine.CreateServer(ipv4Address);,这里和.net 里面的tcplistener不太一样。.net里面就是listener bind,start,accept就好了。而libuv涉及到一个多路io复用的概念,这也是为什么使用他能高并发的原因。
public IDisposable CreateServer(ServerAddress address)
{
var usingPipes = address.IsUnixPipe;
var pipeName = (Libuv.IsWindows ? @"\\.\pipe\kestrel_" : "/tmp/kestrel_") + Guid.NewGuid().ToString("n");
var single = Threads.Count == 1;
var first = true;
foreach (var thread in Threads)
{
if(single){}//single就不考虑,这种情况真是环境是不会这样玩的
else if (first)
{
//根据当前平台创建tcp listener
var listener = usingPipes
? (ListenerPrimary)new PipeListenerPrimary(ServiceContext)
: new TcpListenerPrimary(ServiceContext);
listener.StartAsync(pipeName, address, thread).Wait();
}
else
{
//如果是多次对同一个ip:port做监听
var listener = usingPipes
? (ListenerSecondary)new PipeListenerSecondary(ServiceContext)
: new TcpListenerSecondary(ServiceContext);
listener.StartAsync(pipeName, address, thread).Wait();
}
first = false;
}
}
tcplistener启动细节,这里就只看TcpListenerPrimary了。
首先说明一下TcpListenerPrimary这个类的继承关系:TcpListenerPrimary -->ListenerPrimary -->Listener。这样才有助于后续代码的理解。
后续代码到处都能看到thread.post/postaysnc的代码。这玩意的意思是把传入的action放到libuv loop中,并激活异步完成回调。libuv另一个重要的概念各种回调。
1.接着上面的代码,我们进入TcpListenerPrimary.StartAsync()方法。方法在ListenerPrimary中。
public async Task StartAsync(string pipeName, ServerAddress address, KestrelThread thread)
{
_pipeName = pipeName;
await StartAsync(address, thread).ConfigureAwait(false);
await Thread.PostAsync(state => ((ListenerPrimary)state).PostCallback(), this).ConfigureAwait(false);
}
2.接着上面的代码进入StartAsync(address, thread)。他是父类Listener的方法。
public Task StartAsync(ServerAddress address, KestrelThread thread)
{
ServerAddress = address; Thread = thread;
var tcs = new TaskCompletionSource<int>(this);
Thread.Post(state =>
{
var tcs2 = (TaskCompletionSource<int>)state;
var listener = ((Listener)tcs2.Task.AsyncState);
//创建socket
listener.ListenSocket = listener.CreateListenSocket();
////socket监听,libu注册监听并设置回调函数,最大队列。
ListenSocket.Listen(Constants.ListenBacklog, ConnectionCallback, this);
tcs2.SetResult(0);
}, tcs);
return tcs.Task;
}
protected override UvStreamHandle CreateListenSocket()
{
//初始化socket并bind到address
var socket = new UvTcpHandle(Log);
socket.Init(Thread.Loop, Thread.QueueCloseHandle);
//是否使用Nagle's algorithm算法。
socket.NoDelay(ServerOptions.NoDelay);
socket.Bind(ServerAddress);
// If requested port was "0", replace with assigned dynamic port.
ServerAddress.Port = socket.GetSockIPEndPoint().Port;
return socket;
}
在接着上面的代码ListenSocket.Listen成功之后,libuv回调ConnectionCallback函数。
进入ConnectionCallback函数,完成重要的listen Accept.
step1:listen成功libuv回调ConnectionCallback方法。
step2:初始化接收请求socket,并将之关联到监听socket
step3:适配接收请求socket,如果是第一次适配的话则创建connection
step4:创建connection并启动
step5:new connection 关联 Frame
step6:启动frame
step7:由Connection类调用一次以开始RequestProcessingAsync循环。
step8:循环接收请求,接收请求到之后交给上层程序处理
private static void ConnectionCallback(UvStreamHandle stream, int status, Exception error, object state)
{
var listener = (Listener)state;
listener.OnConnection(stream, status);//step 1
}
protected override void OnConnection(UvStreamHandle listenSocket, int status)//step 2
{
var acceptSocket = new UvTcpHandle(Log);
acceptSocket.Init(Thread.Loop, Thread.QueueCloseHandle);
acceptSocket.NoDelay(ServerOptions.NoDelay);
listenSocket.Accept(acceptSocket);
DispatchConnection(acceptSocket);
}
protected override void DispatchConnection(UvStreamHandle socket)// step 3
{
var index = _dispatchIndex++ % (_dispatchPipes.Count + 1);
if (index == _dispatchPipes.Count)
{
base.DispatchConnection(socket);
}
else
{
DetachFromIOCP(socket);
var dispatchPipe = _dispatchPipes[index];
var write = new UvWriteReq(Log);
write.Init(Thread.Loop);
write.Write2(dispatchPipe, _dummyMessage, socket,
(write2, status, error, state) =>
{
write2.Dispose();
((UvStreamHandle)state).Dispose();
},
socket);
}
}
protected virtual void DispatchConnection(UvStreamHandle socket)//step 4
{
var connection = new Connection(this, socket);
connection.Start();
}
private Func<ConnectionContext, Frame> FrameFactory => ListenerContext.ServiceContext.FrameFactory;
public Connection(ListenerContext context, UvStreamHandle socket) : base(context)//step 5
{
SocketInput = new SocketInput(Thread.Memory, ThreadPool, _bufferSizeControl);
SocketOutput = new SocketOutput(Thread, _socket, this, ConnectionId, Log, ThreadPool);
//重点代码在这里,FrameFactory是一个委托,是KestrelServer.Start中注册的action
_frame = FrameFactory(this);
}
public void Start()//step 6
{
Log.ConnectionStart(ConnectionId);
// Start socket prior to applying the ConnectionFilter
_socket.ReadStart(_allocCallback, _readCallback, this);
_frame.Start();
}
/// <summary>
/// Called once by Connection class to begin the RequestProcessingAsync loop.
/// </summary>
public void Start()//step 7
{
Reset();
_requestProcessingTask =
Task.Factory.StartNew(
(o) => ((Frame)o).RequestProcessingAsync(),
this,
default(CancellationToken),
TaskCreationOptions.DenyChildAttach,
TaskScheduler.Default).Unwrap();
}
/// <summary>
/// 主循环消耗套接字输入,将其解析为协议帧,并调用应用程序委托,只要套接字打算保持打开。
/// 从此循环得到的任务将保留在服务器需要时使用的字段中以排除和关闭所有当前活动的连接。
/// </summary>
public override async Task RequestProcessingAsync()
{
while (!_requestProcessingStopping)
{
InitializeHeaders();
var context = _application.CreateContext(this);
await _application.ProcessRequestAsync(context).ConfigureAwait(false);
}
}