源码 连接池 设计

 

问题:GRPC应用中是否必要建立连接池

实践:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import (
    "LabGO/utils"
    "fmt"
    "runtime"
 
    "sync"
    "time"
 
    "google.golang.org/grpc"
    "google.golang.org/grpc/encoding/gzip"
)
 
 
 
var ConnPoolSize *int // 连接池最大可用连接数
 
var ConnPool []*grpc.ClientConn // 连接池
var ConnPoolThisInUse []bool // 连接当前使用状态
var ConnPoolLock sync.Mutex  // 锁
 
// 在连接池中填入可用连接
func InitConnPool() {
 
    ConnPoolSize = &conf.Config.GrpcConnPoolSize // 从环境配置中读取连接池大小
    ConnPool = make([]*grpc.ClientConn, *ConnPoolSize)
    ConnPoolThisInUse = make([]bool, *ConnPoolSize)
    for i := 0; i < *ConnPoolSize; i++ {
        c, err := NewConn()
        if err != nil {
            // TODO
            defer func() {
                log.SRELog.Error(err)
            }()
            continue
        }
        ConnPool[i] = c
    }
}
 
// 检查指定连接是否可用
func chekcConnState(c *grpc.ClientConn) bool {
    defer func() {
 
        if e := recover(); e != nil {
            var buf = make([]byte, 8192)
            runtime.Stack(buf, true)
 
            err := fmt.Errorf("recover: %v Stack:%v", e, string(buf))
            log.SRELog.Error(err)
 
        }
        log.SRELog.Info("Leave chekcConnState")
    }()
 
    if c == nil {
        return false
    }
    switch c.GetState().String() {
    case "IDLE", "CONNECTING", "READY":
        return true
    case "TRANSIENT_FAILURE", "SHUTDOWN":
        err := c.Close()
        if err != nil {
            log.SRELog.Error(err)
        }
        return false
    default:
        err := c.Close()
        if err != nil {
            log.SRELog.Error(err)
        }
        return false
    }
}
 
// 维持连接池中的连接可用
func CheckConnPool() {
    log.SRELog.Info("IN CheckConnPool", " ", len(ConnPool))
    for i, c := range ConnPool {
        if !chekcConnState(c) {
            ConnPoolLock.Lock()
            c1, err := NewConn()
            if err != nil {
                // TODO
                defer func() {
                    log.SRELog.Error(err)
                }()
                ConnPoolLock.Unlock()
                continue
            }
            ConnPool[i] = c1
            ConnPoolLock.Unlock()
        }
    }
}
 
// 阻塞式获取可用连接
func GetAConnWithBlock() (*grpc.ClientConn, int) {
    defer func() {
 
        if e := recover(); e != nil {
            var buf = make([]byte, 8192)
            runtime.Stack(buf, true)
 
            err := fmt.Errorf("recover: %v Stack:%v", e, string(buf))
            log.SRELog.Error(err)
 
        }
        log.SRELog.Info("Leave GetAConnWithBlock")
    }()
 
    log.SRELog.Info("IN GetAConnWithBlock ", len(ConnPool))
    for {
        for i, c := range ConnPool {
            if chekcConnState(c) && !ConnPoolThisInUse[i] {
                ConnPoolLock.Lock()
                ConnPoolThisInUse[i] = true
                ConnPoolLock.Unlock()
                return c, i
            }
        }
        time.Sleep(time.Second)
    }
}
 
// 获取可用连接,返回grpc客户端
func NewCli() (pb.CISClient, int) {
    c, i := GetAConnWithBlock()
    log.SRELog.Info("NewCli", c)
    return NewClient(c), i
}
     
// 生成连接
func NewConn() (*grpc.ClientConn, error) {
    // TODO
  
 
    var opts = func() []grpc.DialOption {
          
        return opts
    }()
 
    var target = utils.ConcatStr(conf.Config.SrvAddr, ":50053")
 
    grpc.UseCompressor(gzip.Name)
    return grpc.Dial(target, opts...)
}
// 生成客户端
func NewClient(conn *grpc.ClientConn) pb.CISClient {
    return pb.NewCISClient(conn)
}
 
// 调用,获取可用客户端
gCli, idC := server.NewCli()
 
defer func() {
    server.ConnPoolLock.Lock()
    server.ConnPoolThisInUse[idC] = false
    server.ConnPoolLock.Unlock()
}()
 
req, err := gCli.TestMethod(ctx, &pb.TestMethodReq{...})

  

 

从可用连接池中取出最后一个,设置过期时间、标记为使用状态、连接吃减少一个连接;

    freeConn     []*driverConn // free connections ordered by returnedAt oldest to newest   最旧的

如果其中没有

    // Prefer a free connection, if possible.
    last := len(db.freeConn) - 1
    if strategy == cachedOrNewConn && last >= 0 {
        // Reuse the lowest idle time connection so we can close
        // connections which remain idle as soon as possible.
        conn := db.freeConn[last]
        db.freeConn = db.freeConn[:last]
        conn.inUse = true
        if conn.expired(lifetime) {
            db.maxLifetimeClosed++
            db.mu.Unlock()
            conn.Close()
            return nil, driver.ErrBadConn
        }
        db.mu.Unlock()

 

 

Go\src\database\sql\sql.go

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
// conn returns a newly-opened or cached *driverConn.
func (db *DB) conn(ctx context.Context, strategy connReuseStrategy) (*driverConn, error) {
    db.mu.Lock()
    if db.closed {
        db.mu.Unlock()
        return nil, errDBClosed
    }
    // Check if the context is expired.
    select {
    default:
    case <-ctx.Done():
        db.mu.Unlock()
        return nil, ctx.Err()
    }
    lifetime := db.maxLifetime
 
    // Prefer a free connection, if possible.
    last := len(db.freeConn) - 1
    if strategy == cachedOrNewConn && last >= 0 {
        // Reuse the lowest idle time connection so we can close
        // connections which remain idle as soon as possible.
        conn := db.freeConn[last]
        db.freeConn = db.freeConn[:last]
        conn.inUse = true
        if conn.expired(lifetime) {
            db.maxLifetimeClosed++
            db.mu.Unlock()
            conn.Close()
            return nil, driver.ErrBadConn
        }
        db.mu.Unlock()
 
        // Reset the session if required.
        if err := conn.resetSession(ctx); errors.Is(err, driver.ErrBadConn) {
            conn.Close()
            return nil, err
        }
 
        return conn, nil
    }
 
    // Out of free connections or we were asked not to use one. If we're not
    // allowed to open any more connections, make a request and wait.
    if db.maxOpen > 0 && db.numOpen >= db.maxOpen {
        // Make the connRequest channel. It's buffered so that the
        // connectionOpener doesn't block while waiting for the req to be read.
        req := make(chan connRequest, 1)
        reqKey := db.nextRequestKeyLocked()
        db.connRequests[reqKey] = req
        db.waitCount++
        db.mu.Unlock()
 
        waitStart := nowFunc()
 
        // Timeout the connection request with the context.
        select {
        case <-ctx.Done():
            // Remove the connection request and ensure no value has been sent
            // on it after removing.
            db.mu.Lock()
            delete(db.connRequests, reqKey)
            db.mu.Unlock()
 
            atomic.AddInt64(&db.waitDuration, int64(time.Since(waitStart)))
 
            select {
            default:
            case ret, ok := <-req:
                if ok && ret.conn != nil {
                    db.putConn(ret.conn, ret.err, false)
                }
            }
            return nil, ctx.Err()
        case ret, ok := <-req:
            atomic.AddInt64(&db.waitDuration, int64(time.Since(waitStart)))
 
            if !ok {
                return nil, errDBClosed
            }
            // Only check if the connection is expired if the strategy is cachedOrNewConns.
            // If we require a new connection, just re-use the connection without looking
            // at the expiry time. If it is expired, it will be checked when it is placed
            // back into the connection pool.
            // This prioritizes giving a valid connection to a client over the exact connection
            // lifetime, which could expire exactly after this point anyway.
            if strategy == cachedOrNewConn && ret.err == nil && ret.conn.expired(lifetime) {
                db.mu.Lock()
                db.maxLifetimeClosed++
                db.mu.Unlock()
                ret.conn.Close()
                return nil, driver.ErrBadConn
            }
            if ret.conn == nil {
                return nil, ret.err
            }
 
            // Reset the session if required.
            if err := ret.conn.resetSession(ctx); errors.Is(err, driver.ErrBadConn) {
                ret.conn.Close()
                return nil, err
            }
            return ret.conn, ret.err
        }
    }
 
    db.numOpen++ // optimistically
    db.mu.Unlock()
    ci, err := db.connector.Connect(ctx)
    if err != nil {
        db.mu.Lock()
        db.numOpen-- // correct for earlier optimism
        db.maybeOpenNewConnections()
        db.mu.Unlock()
        return nil, err
    }
    db.mu.Lock()
    dc := &driverConn{
        db:         db,
        createdAt:  nowFunc(),
        returnedAt: nowFunc(),
        ci:         ci,
        inUse:      true,
    }
    db.addDepLocked(dc, dc)
    db.mu.Unlock()
    return dc, nil
}

  

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
// driverConn wraps a driver.Conn with a mutex, to
// be held during all calls into the Conn. (including any calls onto
// interfaces returned via that Conn, such as calls on Tx, Stmt,
// Result, Rows)
type driverConn struct {
    db        *DB
    createdAt time.Time
 
    sync.Mutex  // guards following
    ci          driver.Conn
    needReset   bool // The connection session should be reset before use if true.
    closed      bool
    finalClosed bool // ci.Close has been called
    openStmt    map[*driverStmt]bool
 
    // guarded by db.mu
    inUse      bool
    returnedAt time.Time // Time the connection was created or returned.
    onPut      []func()  // code (with db.mu held) run when conn is next returned
    dbmuClosed bool      // same as closed, but guarded by db.mu, for removeClosedStmtLocked
}

  

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
// putConn adds a connection to the db's free pool.
// err is optionally the last error that occurred on this connection.
func (db *DB) putConn(dc *driverConn, err error, resetSession bool) {
    if !errors.Is(err, driver.ErrBadConn) {
        if !dc.validateConnection(resetSession) {
            err = driver.ErrBadConn
        }
    }
    db.mu.Lock()
    if !dc.inUse {
        db.mu.Unlock()
        if debugGetPut {
            fmt.Printf("putConn(%v) DUPLICATE was: %s\n\nPREVIOUS was: %s", dc, stack(), db.lastPut[dc])
        }
        panic("sql: connection returned that was never out")
    }
 
    if !errors.Is(err, driver.ErrBadConn) && dc.expired(db.maxLifetime) {
        db.maxLifetimeClosed++
        err = driver.ErrBadConn
    }
    if debugGetPut {
        db.lastPut[dc] = stack()
    }
    dc.inUse = false
    dc.returnedAt = nowFunc()
 
    for _, fn := range dc.onPut {
        fn()
    }
    dc.onPut = nil
 
    if errors.Is(err, driver.ErrBadConn) {
        // Don't reuse bad connections.
        // Since the conn is considered bad and is being discarded, treat it
        // as closed. Don't decrement the open count here, finalClose will
        // take care of that.
        db.maybeOpenNewConnections()
        db.mu.Unlock()
        dc.Close()
        return
    }
    if putConnHook != nil {
        putConnHook(db, dc)
    }
    added := db.putConnDBLocked(dc, nil)
    db.mu.Unlock()
 
    if !added {
        dc.Close()
        return
    }
}
 
// Satisfy a connRequest or put the driverConn in the idle pool and return true
// or return false.
// putConnDBLocked will satisfy a connRequest if there is one, or it will
// return the *driverConn to the freeConn list if err == nil and the idle
// connection limit will not be exceeded.
// If err != nil, the value of dc is ignored.
// If err == nil, then dc must not equal nil.
// If a connRequest was fulfilled or the *driverConn was placed in the
// freeConn list, then true is returned, otherwise false is returned.
func (db *DB) putConnDBLocked(dc *driverConn, err error) bool {
    if db.closed {
        return false
    }
    if db.maxOpen > 0 && db.numOpen > db.maxOpen {
        return false
    }
    if c := len(db.connRequests); c > 0 {
        var req chan connRequest
        var reqKey uint64
        for reqKey, req = range db.connRequests {
            break
        }
        delete(db.connRequests, reqKey) // Remove from pending requests.
        if err == nil {
            dc.inUse = true
        }
        req <- connRequest{
            conn: dc,
            err:  err,
        }
        return true
    } else if err == nil && !db.closed {
        if db.maxIdleConnsLocked() > len(db.freeConn) {
            db.freeConn = append(db.freeConn, dc)
            db.startCleanerLocked()
            return true
        }
        db.maxIdleClosed++
    }
    return false
}

  

 

posted @   papering  阅读(71)  评论(0编辑  收藏  举报
编辑推荐:
· AI与.NET技术实操系列:基于图像分类模型对图像进行分类
· go语言实现终端里的倒计时
· 如何编写易于单元测试的代码
· 10年+ .NET Coder 心语,封装的思维:从隐藏、稳定开始理解其本质意义
· .NET Core 中如何实现缓存的预热?
阅读排行:
· 25岁的心里话
· 闲置电脑爆改个人服务器(超详细) #公网映射 #Vmware虚拟网络编辑器
· 基于 Docker 搭建 FRP 内网穿透开源项目(很简单哒)
· 零经验选手,Compose 一天开发一款小游戏!
· 一起来玩mcp_server_sqlite,让AI帮你做增删改查!!
历史上的今天:
2021-09-08 BSD Networking Implementation Notes
2020-09-08 一份基准代码,多份部署 显式声明依赖关系 在环境中存储配置 把后端服务当作附加资源 严格分离构建和运行 以一个或多个无状态进程运行应用 通过端口绑定提供服务 通过进程模型进行扩展 快速启动和优雅终止可最大化健壮性 尽可能的保持开发,预发布,线上环境相同 把日志当作事件流 后台管理任务当作一次性进程运行
2020-09-08 微服务,如何拆分服务是精髓
2020-09-08 把组织和系统横向打通,提升研发效率; 建设稳态与敏态融合的基础设施,为应用赋能; 打造大中台体系,建设共享中台及能力中心,这也是目前业界的一个趋势与方向; 把目前零散的技术架构建设成体系化的技术架构,给业务提供整体的技术支撑
2020-09-08 为什么从REST转向gRPC 需要流式传输搜索结果,也就是在有第一批结果时就开始传输
2020-09-08 Frame of Reference and Roaring Bitmaps
2020-09-08 亿级用户下的新浪微博平台架构 前端机(提供 API 接口服务),队列机(处理上行业务逻辑,主要是数据写入),存储(mc、mysql、mcq、redis 、HBase等)
点击右上角即可分享
微信分享提示