Flask 源码简析
Flask 源码简析
以最简单的 Demo 示例
@app.before_request
def br1():
print('br1')
@app.before_request
def br2():
print('br2')
@app.after_request
def ar1():
print('ar1')
@app.after_request
def ar2():
print('ar2')
app.run()
启动前
收集路由
通过app.route
实现,实现很简单,如下
class Flask:
# ...
def route(self, rule, **options):
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
# 简化了下大概就是这样
rule = self.url_rule_class(rule, methods=methods, **options)
self.url_map.add(rule)
if view_func is not None:
self.view_functions[endpoint] = view_func
收集完路由后 app 中属性如下:
- app.url_map:
# Rule储存路由到endpoint的映射关系
- _rules: [<Rule '/hello' (HEAD, OPTIONS, GET) -> h_view>, <Rule '/' (HEAD, OPTIONS, GET) -> index>, <Rule '/static/<filename>' (HEAD, OPTIONS, GET) -> static>]
# 储存endpoint到Rule的关系(相当于上一项反着储存)
- _rules_by_endpoint: {'static': [<Rule '/static/<filename>' (HEAD, OPTIONS, GET) -> static>], 'index': [<Rule '/' (HEAD, OPTIONS, GET) -> index>], 'h_view': [<Rule '/hello' (HEAD, OPTIONS, GET) -> h_view>]}
- app.view_functions
- 储存endpoint 到view(视图函数)的映射关系
有了 url_map 和 view_functions,我们就可以通过路由找到对应的 endpoint,进而找到对应的视图函数.
收集拦截器
以before_request
为例
class Flask:
# ...
def before_request(self, f):
# 其实就是这样
# a = {None:[]}
# a[None].append(f)
self.before_request_funcs.setdefault(None, []).append(f)
return f
执行结束后属性如下:
- app.before_request_funcs: {None: [<function br1 at 0x11fded730>, <function br2 at 0x11fe30bf8>]}
- app.ater_request_funcs: {None: [<function ar1 at 0x11fe30c80>, <function ar2 at 0x11fe30d08>]}
启动:app.run()
这个过程分为两部分
- make_server
- listen
创建 server
调用栈如下:
make_server, serving.py:655
inner, serving.py:774
run_simple, serving.py:814
run, app.py:841
看下 make_server,是一个工厂函数,通过不同的参数创建不同类型的 Server,app.run()默认使用的是 BaseWSGIServer
def make_server(host=None, port=None, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None, fd=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context, fd=fd)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context, fd=fd)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context, fd=fd)
启动服务
调用栈如下
serve_forever, socketserver.py:222
serve_forever, serving.py:612
inner, serving.py:777
run_simple, serving.py:814
run, app.py:841
看下 serve_forever 做了什么
- 启动一个循环;
- select 网络事件;
- 当有请求到来时,执行
self._handle_request_noblock()
def serve_forever(self, poll_interval=0.5):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__is_shut_down.clear()
try:
with _ServerSelector() as selector:
selector.register(self, selectors.EVENT_READ)
while not self.__shutdown_request:
ready = selector.select(poll_interval)
if self.__shutdown_request:
break
if ready:
self._handle_request_noblock()
self.service_actions()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
至于_handle_request_noblock
做了什么,我们在下一节处理请求中分析.
处理请求
创建 socket,接收数据,构造 environ,调用 WSGI app(我们的 Flask 实例)
请求到来后,调用上一步中的_handle_request_noblock
这里涉及到 socket 编程的知识,大致流程就是:
- server 端 accept 后创建一个和客户端对应的 socket;
- 处理这个 socket 的数据;
def _handle_request_noblock(self):
# 简化后
request, client_address = self.get_request()
self.process_request(request, client_address)
这个调用栈比较深,最终调用了 run_wsgi,也就是调用了 app 中的__run__
方法.
run_wsgi, serving.py:208
handle_one_request, serving.py:328
handle, server.py:426
handle, serving.py:293
__init__, socketserver.py:720
finish_request, socketserver.py:360
process_request, socketserver.py:347
_handle_request_noblock, socketserver.py:316
serve_forever, socketserver.py:237
serve_forever, serving.py:612
inner, serving.py:777
run_simple, serving.py:814
run, app.py:841
看下run_wsgi
方法,大致流程如下:
- 读取 socket 数据;
- 解析 http 请求;
- 构造 environ 对象;
- 定义 start_response 可调用对象;
- 执行 app(environ, start_response);
以上也是一个标准 WSGI serve 的处理流程.
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
self.environ = environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
code = int(code)
self.send_response(code, msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if not ('content-length' in header_keys or
environ['REQUEST_METHOD'] == 'HEAD' or
code < 200 or code in (204, 304)):
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert isinstance(data, bytes), 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
Flask 处理请求
下面就进入了 Falsk 的处理逻辑(准确的说上面流程都是werkzeug
执行的).
上面app(environ, start_response)
,其实调用的是app.__call__
方法.
full_dispatch_request, app.py:1607
wsgi_app, app.py:1982
__call__, app.py:1997
执行请求拦截器
在full_dispatch_request
中,先执行 preprocess_request,如果有返回值,就直接构造响应返回. 所以在拦截器中要慎用 return;
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
return self.finalize_request(rv)
看看preprocess_request
做了什么
- 处理 url;
- 执行 before_request 拦截器;
def preprocess_request(self):
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
分发路由
- 通过 url.endpoint 找到对应的视图函数;
- 执行视图函数;
def dispatch_request(self):
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
构造响应
执行响应拦截器
process_response, app.py:1848
finalize_request, app.py:1632
full_dispatch_request, app.py:1615
wsgi_app, app.py:1982
__call__, app.py:1997
process_response:
- 执行 after_request 拦截器
- 从代码中我们可以看到,和 before_request 不同,after_request 执行时传了一个参数
response
并返回了response
,所以after_request 装饰的函数必须接受一个响应类型参数并将响应返回. - 另外,我们在代码中看到了
reversed
将 after_request_funcs 容器进行了反转,所以响应拦截器执行是后进先出的.
- 从代码中我们可以看到,和 before_request 不同,after_request 执行时传了一个参数
def process_response(self, response):
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
return response
构造 WSGI Response
__call__, wrappers.py:1325
wsgi_app, app.py:1989
__call__, app.py:1997
剩下的就是构造 WSGI server 响应了:
def __call__(self, environ, start_response):
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter