wukong引擎源码分析之索引——part 3 文档评分 无非就是将docid对应的fields信息存储起来,为搜索结果rank评分用
之前的文章分析过,接受索引请求处理的代码在segmenter_worker.go里:
func (engine *Engine) segmenterWorker() {
for {
request := <-engine.segmenterChannel //关键
tokensMap := make(map[string][]int)
numTokens := 0
if !engine.initOptions.NotUsingSegmenter && request.data.Content != "" {
// 当文档正文不为空时,优先从内容分词中得到关键词
segments := engine.segmenter.Segment([]byte(request.data.Content))
for _, segment := range segments {
token := segment.Token().Text()
if !engine.stopTokens.IsStopToken(token) {
tokensMap[token] = append(tokensMap[token], segment.Start())
}
}
numTokens = len(segments)
} else {
// 否则载入用户输入的关键词
for _, t := range request.data.Tokens {
if !engine.stopTokens.IsStopToken(t.Text) {
tokensMap[t.Text] = t.Locations
}
}
numTokens = len(request.data.Tokens)
}
// 加入非分词的文档标签
for _, label := range request.data.Labels {
if !engine.initOptions.NotUsingSegmenter {
if !engine.stopTokens.IsStopToken(label) {
tokensMap[label] = []int{}
}
} else {
tokensMap[label] = []int{}
}
}
indexerRequest := indexerAddDocumentRequest{
document: &types.DocumentIndex{
DocId: request.docId,
TokenLength: float32(numTokens),
Keywords: make([]types.KeywordIndex, len(tokensMap)),
},
}
iTokens := 0
for k, v := range tokensMap {
indexerRequest.document.Keywords[iTokens] = types.KeywordIndex{
Text: k,
// 非分词标注的词频设置为0,不参与tf-idf计算
Frequency: float32(len(v)),
Starts: v}
iTokens++
}
var dealDocInfoChan = make(chan bool, 1)
indexerRequest.dealDocInfoChan = dealDocInfoChan
engine.indexerAddDocumentChannels[request.shard] <- indexerRequest
rankerRequest := rankerAddDocRequest{
docId: request.docId,
fields: request.data.Fields,
dealDocInfoChan: dealDocInfoChan,
}
engine.rankerAddDocChannels[request.shard] <- rankerRequest
}
}
上面代码的作用就是在统计词频和单词位置(注意:tag也是作为搜索的单词,不过其词频是0,而无法参与tf-idf计算),并封装为indexerRequest,发送给engine.indexerAddDocumentChannels[request.shard]
此外,红色部分代码是在为文档评分做准备,engine/ranker_worker.go:
func (engine *Engine) rankerAddDocWorker(shard int) { for { request := <-engine.rankerAddDocChannels[shard] //关键 docInfo := engine.rankers[shard].AddDoc(request.docId, request.fields, request.dealDocInfoChan) // save if engine.initOptions.UsePersistentStorage { engine.persistentStorageIndexDocumentChannels[shard] <- persistentStorageIndexDocumentRequest{ typ: "info", docId: request.docId, docInfo: docInfo, } } } }
AddDoc无非就是将docid对应的fields信息存储起来,为搜索结果rank评分用!
// 给某个文档添加评分字段 func (ranker *Ranker) AddDoc(docId uint64, fields interface{}, dealDocInfoChan <-chan bool) *types.DocInfo { if ranker.initialized == false { log.Fatal("排序器尚未初始化") } <-dealDocInfoChan // 等待索引器处理完成 ranker.DocInfosShard.Lock() defer ranker.DocInfosShard.Unlock() if _, found := ranker.DocInfosShard.DocInfos[docId]; !found { ranker.DocInfosShard.DocInfos[docId] = new(types.DocInfo) ranker.DocInfosShard.NumDocuments++ } ranker.DocInfosShard.DocInfos[docId].Fields = fields return ranker.DocInfosShard.DocInfos[docId] }
标签:
搜索引擎
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 记一次.NET内存居高不下排查解决与启示
· 探究高空视频全景AR技术的实现原理
· 理解Rust引用及其生命周期标识(上)
· 浏览器原生「磁吸」效果!Anchor Positioning 锚点定位神器解析
· 没有源码,如何修改代码逻辑?
· 全程不用写代码,我用AI程序员写了一个飞机大战
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· 记一次.NET内存居高不下排查解决与启示
· 白话解读 Dapr 1.15:你的「微服务管家」又秀新绝活了
· DeepSeek 开源周回顾「GitHub 热点速览」