Lucene.net 2.0 中文分词后语法高亮问题
Lucene.net 2.0 src包里自带了Highlighter.Net-2.0.0,可以用来实现语法高亮。
//定义多条件搜索分析器
BooleanQuery bquery = new BooleanQuery();
//定义分词器
Analyzer analyzer = new StandardAnalyzer();
//多条件搜索拆分器
MultiFieldQueryParser parser = new MultiFieldQueryParser(new string[] { "title", "content" }, analyzer);
query = parser.Parse(key);
bquery.Add(query, BooleanClause.Occur.MUST);
DateTime now = DateTime.Now;
//初始化搜索器
//实现分布式搜索
List<IndexReader> indexreaders = new List<IndexReader>();
string[] dirs = Directory.GetDirectories(dir);
if (searchtype == SearchType.None)
{
foreach (string item in dirs)
{
//System.Web.HttpContext.Current.Response.Write(item);
indexreaders.Add(IndexReader.Open(Path.Combine(Path.Combine(dir, item), "Save")));
}
}
else
{
//System.Web.HttpContext.Current.Response.Write(searchtype.ToString());
indexreaders.Add(IndexReader.Open(Path.Combine(Path.Combine(dir, searchtype.ToString()), "Save")));
}
MultiReader reader = new MultiReader(indexreaders.ToArray());
indexSearcher = new IndexSearcher(reader);
Hits hits = null;
hits = indexSearcher.Search(bquery);
timer = (DateTime.Now - now).TotalMilliseconds;
int count = hits.Length();
/* 计算显示的条目 */
int start = (pageNo - 1) * 10;
int end = pageNo * 10 > count ? count : pageNo * 10;
//Response.Write(readerhelper.MyQuery.ToString());
/* 语法高亮显示设置 */
Highlighter highlighter = new Highlighter(new QueryScorer(query ));
highlighter.SetTextFragmenter(new SimpleFragmenter(100));
for (int i = start; i < end; i++)
{
Lucene.Net.Documents.Document doc = hits.Doc(i);
System.String text = doc.Get("content");
//添加结尾,保证结尾特殊符号不被过滤
string title = doc.Get("title") + "+aaaaaaaaa";
Lucene.Net.Analysis.TokenStream tokenStream = analyzer .TokenStream("content", new System.IO.StringReader(text));
Lucene.Net.Analysis.TokenStream titkeStream = analyzer .TokenStream("title", new System.IO.StringReader(title));
System.String result = highlighter.GetBestFragments(tokenStream, text, 2, "...");
string tresult = highlighter.GetBestFragments(titkeStream, title, 0, "..");
//祛除标题结尾标记
if (tresult.Length > 10)
tresult = tresult.Remove(tresult.Length - 10, 10);
if (string.IsNullOrEmpty(tresult))
tresult = title.Remove(title.Length - 10, 10);
//未标注内容读取
if (string.IsNullOrEmpty(result))
{
if (text.Length > 100)
result = text.Substring(0, 100);
else
result = text;
}
if (result.Length < text.Length)
result = result + "...";
}
这是使用lucene.net的自带分词器StandardAnalyzer,有个弊端是一个字就认为是一个词。现在我们要用自己的中文词义分词器——MyAnalyzer的话,问题来了。Highlighter一直报错。为什么会出现这种情况呢?那是因为,中文分词器,分词完成一般有分割符号。比如,对于词“沪江英语”,分割完成后变成"沪江 英语"。也就是说,返回的Token是(沪江,0,2)(英语,3,5)。而如果用Highlighter,它想要取得词是(沪江,0,2)(英语,2,4),这个就是因为空格而产生的偏差,引起了Highlighter的报错。修改Highlighter比较麻烦,要对分词器做特殊处理也比较难。可以使用字典解释器,分割结果后再高亮。比如,取得"沪江英语"的分词结果"沪江 英语",然后,把分词结果传递给Highlighter。弊端是,搜索结果会凭空出现很多空格。俺们懒人自然有懒人的解决办法。那就是用MyAnalyzer实现索引和搜索,用StandardAnalyzer实现高亮:
//定义多条件搜索分析器
BooleanQuery bquery = new BooleanQuery();
//定义分词器
Analyzer analyzer = new MyAnalyzer();
Analyzer highanalyzer = new StandardAnalyzer();
MultiFieldQueryParser parser = new MultiFieldQueryParser(new string[] { "title", "content" }, highanalyzer );
Query highquery = parser .Parse(key);
//多条件搜索拆分器
MultiFieldQueryParser parser = new MultiFieldQueryParser(new string[] { "title", "content" }, analyzer);
query = parser.Parse(key);
bquery.Add(query, BooleanClause.Occur.MUST);
DateTime now = DateTime.Now;
//初始化搜索器
//实现分布式搜索
List<IndexReader> indexreaders = new List<IndexReader>();
string[] dirs = Directory.GetDirectories(dir);
if (searchtype == SearchType.None)
{
foreach (string item in dirs)
{
//System.Web.HttpContext.Current.Response.Write(item);
indexreaders.Add(IndexReader.Open(Path.Combine(Path.Combine(dir, item), "Save")));
}
}
else
{
//System.Web.HttpContext.Current.Response.Write(searchtype.ToString());
indexreaders.Add(IndexReader.Open(Path.Combine(Path.Combine(dir, searchtype.ToString()), "Save")));
}
MultiReader reader = new MultiReader(indexreaders.ToArray());
indexSearcher = new IndexSearcher(reader);
Hits hits = null;
hits = indexSearcher.Search(bquery);
timer = (DateTime.Now - now).TotalMilliseconds;
int count = hits.Length();
/* 计算显示的条目 */
int start = (pageNo - 1) * 10;
int end = pageNo * 10 > count ? count : pageNo * 10;
//Response.Write(readerhelper.MyQuery.ToString());
/* 语法高亮显示设置 */
Highlighter highlighter = new Highlighter(new QueryScorer(highquery));
highlighter.SetTextFragmenter(new SimpleFragmenter(100));
for (int i = start; i < end; i++)
{
Lucene.Net.Documents.Document doc = hits.Doc(i);
System.String text = doc.Get("content");
//添加结尾,保证结尾特殊符号不被过滤
string title = doc.Get("title") + "+aaaaaaaaa";
Lucene.Net.Analysis.TokenStream tokenStream = highanalyzer .TokenStream("content", new System.IO.StringReader(text));
Lucene.Net.Analysis.TokenStream titkeStream = highanalyzer .TokenStream("title", new System.IO.StringReader(title));
System.String result = highlighter.GetBestFragments(tokenStream, text, 2, "...");
string tresult = highlighter.GetBestFragments(titkeStream, title, 0, "..");
//祛除标题结尾标记
if (tresult.Length > 10)
tresult = tresult.Remove(tresult.Length - 10, 10);
if (string.IsNullOrEmpty(tresult))
tresult = title.Remove(title.Length - 10, 10);
//未标注内容读取
if (string.IsNullOrEmpty(result))
{
if (text.Length > 100)
result = text.Substring(0, 100);
else
result = text;
}
if (result.Length < text.Length)
result = result + "...";
}
这样的结果还是很友好的全文完。
文章作者:
蔚洋(Super)
文章出处:
http://www.cnblogs.com/SUPERAI
欢迎转载,转载时请注明出处。谢谢合作。