lucence.net+盘古分词
第一步:
添加盘古和lucence的dll引用
第二步:
拷贝Dict文件夹到项目 demo里面是Dictionaries 不过官方建议改成Dict
然后把所有项右击属性 改为“如果较新则复制”
第三步
创建索引库,这里是一次性把数据库里的数据 循环写入了,正常项目里可以把循环打开,每保存一个数据 执行一遍
/// <summary> /// 创建索引库 /// </summary> public void CreateContent() { string indexPath = @"C:\lucenedir";//注意和磁盘上文件夹的大小写一致,否则会报错。将创建的分词内容放在该目录下。 FSDirectory directory = FSDirectory.Open(new DirectoryInfo(indexPath), new NativeFSLockFactory());//指定索引文件(打开索引目录) FS指的是就是FileSystem bool isUpdate = IndexReader.IndexExists(directory);//IndexReader:对索引进行读取的类。该语句的作用:判断索引库文件夹是否存在以及索引特征文件是否存在。 if (isUpdate) { //同时只能有一段代码对索引库进行写操作。当使用IndexWriter打开directory时会自动对索引库文件上锁。 //如果索引目录被锁定(比如索引过程中程序异常退出),则首先解锁(提示一下:如果我现在正在写着已经加锁了,但是还没有写完,这时候又来一个请求,那么不就解锁了吗?这个问题后面会解决) if (IndexWriter.IsLocked(directory)) { IndexWriter.Unlock(directory); } } IndexWriter writer = new IndexWriter(directory, new PanGuAnalyzer(), !isUpdate, Luc ene.Net.Index.IndexWriter.MaxFieldLength.UNLIMITED);//向索引库中写索引。这时在这里加锁。 List<Books> list= bookService.loadEntities(c=>true).ToList();//查出表中所有的数据 foreach (var book in list){ Document document = new Document();//表示一篇文档。 //Field.Store.YES:表示是否存储原值。只有当Field.Store.YES在后面才能用doc.Get("number")取出值来.Field.Index. NOT_ANALYZED:不进行分词保存 document.Add(new Field("Id", book.id.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED)); //Field.Index. ANALYZED:进行分词保存:也就是要进行全文的字段要设置分词 保存(因为要进行模糊查询) //Lucene.Net.Documents.Field.TermVector.WITH_POSITIONS_OFFSETS:不仅保存分词还保存分词的距离。 document.Add(new Field("title", book.title, Field.Store.YES, Field.Index.ANALYZED, Lucene.Net.Documents.Field.TermVector.WITH_POSITIONS_OFFSETS)); document.Add(new Field("content", book.content, Field.Store.YES, Field.Index.ANALYZED, Lucene.Net.Documents.Field.TermVector.WITH_POSITIONS_OFFSETS)); writer.AddDocument(document); } writer.Close();//会自动解锁。 directory.Close();//不要忘了Close,否则索引结果搜不到 } }
第四步
对搜索内容分词 然后搜索
/// <summary> /// 搜索 /// </summary> public List<SearchResultViewModel> SearchBookContent() { string indexPath = @"C:\lucenedir"; string kw = Request["txtSearchContent"]; string[] keywords = Common.WebCommon.PanGuSplitWord(kw);//盘古分词 FSDirectory directory = FSDirectory.Open(new DirectoryInfo(indexPath), new NoLockFactory()); IndexReader reader = IndexReader.Open(directory, true); IndexSearcher searcher = new IndexSearcher(reader); //搜索条件 如果想同时搜索两个条件 比如 标题+内容 就别用这个搜索类了 换一个 booleanQuery PhraseQuery query = new PhraseQuery(); foreach (string word in keywords)//先用空格,让用户去分词,空格分隔的就是词“计算机 专业” { query.Add(new Term("content", word)); } //query.Add(new Term("body","语言"));--可以添加查询条件,两者是add关系.顺序没有关系. // query.Add(new Term("body", "大学生")); // query.Add(new Term("content", kw));//body中含有kw的文章 query.SetSlop(100);//多个查询条件的词之间的最大距离.在文章中相隔太远 也就无意义.(例如 “大学生”这个查询条件和"简历"这个查询条件之间如果间隔的词太多也就没有意义了。) //TopScoreDocCollector是盛放查询结果的容器 TopScoreDocCollector collector = TopScoreDocCollector.create(1000, true); searcher.Search(query, null, collector);//根据query查询条件进行查询,查询结果放入collector容器 ScoreDoc[] docs = collector.TopDocs(0, collector.GetTotalHits()).scoreDocs;//得到所有查询结果中的文档,GetTotalHits():表示总条数 TopDocs(300, 20);//表示得到300(从300开始),到320(结束)的文档内容. //可以用来实现分页功能 List<SearchResultViewModel> searchResultList = new List<SearchResultViewModel>(); for (int i = 0; i < docs.Length; i++) { // //搜索ScoreDoc[]只能获得文档的id,这样不会把查询结果的Document一次性加载到内存中。降低了内存压力,需要获得文档的详细内容的时候通过searcher.Doc来根据文档id来获得文档的详细内容对象Document. int docId = docs[i].doc;//得到查询结果文档的id(Lucene内部分配的id) Document doc = searcher.Doc(docId);//找到文档id对应的文档详细信息 SearchResultViewModel viewModel = new SearchResultViewModel(); viewModel.Id = int.Parse(doc.Get("id")); viewModel.Title = doc.Get("title"); viewModel.Url = "/Book/ShowDetail/?id=" + viewModel.Id; viewModel.Content =Common.WebCommon.CreateHightLight(kw, doc.Get("content")); searchResultList.Add(viewModel); } SearchDetails searchDetail = new SearchDetails(); searchDetail.KeyWords = kw; searchDetail.Id = Guid.NewGuid(); searchDetail.SearchDateTime = DateTime.Now; searchDetailService.AddEntity(searchDetail); return searchResultList; }
helper代码
using Lucene.Net.Analysis; using Lucene.Net.Analysis.PanGu; using PanGu; using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Security.Cryptography; using System.Text; using System.Threading.Tasks; namespace CZBK.HeiMaOA.Common { public class WebCommon { /// <summary> /// 对字符串进行MD5运算 /// </summary> /// <param name="str"></param> /// <returns></returns> public static string Md5String(string str) { MD5 md5 = MD5.Create(); byte[] buffer = System.Text.Encoding.UTF8.GetBytes(str); byte[]md5Buffer= md5.ComputeHash(buffer); StringBuilder sb = new StringBuilder(); foreach (byte b in md5Buffer) { sb.Append(b.ToString("x2")); } return sb.ToString(); } //对输入的搜索条件进行分词 public static string[] PanGuSplitWord(string str) { List<string> list = new List<string>(); Analyzer analyzer = new PanGuAnalyzer(); TokenStream tokenStream = analyzer.TokenStream("", new StringReader(str)); Lucene.Net.Analysis.Token token = null; while ((token = tokenStream.Next()) != null) { list.Add(token.TermText()); } return list.ToArray(); } // /创建HTMLFormatter,参数为高亮单词的前后缀 public static string CreateHightLight(string keywords, string Content) { PanGu.HighLight.SimpleHTMLFormatter simpleHTMLFormatter = new PanGu.HighLight.SimpleHTMLFormatter("<font color=\"red\">", "</font>"); //创建Highlighter ,输入HTMLFormatter 和盘古分词对象Semgent PanGu.HighLight.Highlighter highlighter = new PanGu.HighLight.Highlighter(simpleHTMLFormatter, new Segment()); //设置每个摘要段的字符数 highlighter.FragmentSize = 150; //获取最匹配的摘要段 return highlighter.GetBestFragment(keywords, Content); } } }
booleanQuery Demo
/// <summary> /// 查询信息 /// </summary> /// <param name="dic"></param> /// <returns></returns> //public string SearchInfo(Dictionary<string, string> dic,string lucenePath List<Query> listQuery, List<BooleanClause.Occur> listQueryType, List<SortField> listSortField) public string SearchHouse(Dictionary<string, string> dic) { JsonEntityHelper jsonEntityHelper = new JsonEntityHelper(); string result = ""; int pgIndex = 1;//当前页码 int pgSize = 10;//每页数据默认10条 LuceneResult lr = new LuceneResult(); #region 拼接查询条件 List<Query> listQuery = new List<Query>();//查询内容 List<BooleanClause.Occur> listQueryType = new List<BooleanClause.Occur>();//关联关系 一般默认是AND List<SortField> listSortField = new List<SortField>();//排序顺序 //分页大小 每页条数 if (dic.ContainsKey("PS") && dic["PS"] != null) { pgSize = Convert.ToInt32(dic["PS"]); if (pgSize <= 0) { pgSize = 5; } } //搜索内容 if (dic.ContainsKey("RS") && dic["RS"] != "")//没有搜索结果 { BooleanQuery bq = new BooleanQuery(); ////字符串类型 bq.Add(LucenceHelper.GetQueryWildcard("KEYNAME", "*" + dic["RS"] + "*"), BooleanClause.Occur.SHOULD); bq.Add(LucenceHelper.GetQueryWildcard("KEYPINYIN", "*" + dic["RS"].ToUpper() + "*"), BooleanClause.Occur.SHOULD); bq.Add(LucenceHelper.GetQueryWildcard("KEYABBR", "*" + dic["RS"].ToUpper() + "*"), BooleanClause.Occur.SHOULD); listQuery.Add(bq); listQueryType.Add(BooleanClause.Occur.MUST); //默认按照Rank 倒序 listSortField.Add(LucenceHelper.GetSortField("KEYTYPE", SortField.DOUBLE, true));// 倒序 string[] FieldName = new string[] { "KEYTYPE", "KEYABBR", "KEYNAME", "KEYURL", "DISNAME", "ONLINESALES" }; //Modify by diyaguang 2016.3.4 修改 Lucence 路径, string lucenePath = string.Format(LucenceConfig.AutoCompletePath_HouseSecond, areaInfo.AreaKey.Replace("_", "").ToString()); lr = LucenceHelper.PanguSearchInfo(lucenePath, FieldName, pgIndex, pgSize, listQuery, listQueryType, listSortField); SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<span><b>", "</b></span>"); Highlighter highlighter = new Highlighter(simpleHTMLFormatter, new PanGu.Segment()); highlighter.FragmentSize = 50; if (lr != null && lr.DataList != null && lr.DataList.Count > 0) { foreach (string str in lr.DataList) { string temp = str; temp = System.Text.RegularExpressions.Regex.Replace(temp, @"RE=\d{1,8},", ""); temp = System.Text.RegularExpressions.Regex.Replace(temp, @"PG=\d{1,8},", "PG=1"); var obj = DynamicJson.Parse(temp); string GetUrl = dic["curUrl"].ToString(); string[] Parames = (GetUrl + "," + obj.KEYURL).Split(','); GetUrl = CommonUrl.GetPageUrl(Parames); string DisName = highlighter.GetBestFragment(dic["RS"], obj.DISNAME); if (DisName == "") { DisName = obj.DISNAME; } if (obj.ONLINESALES != "0") { result += "<li><a href=\"" + GetUrl + "\" onclick=\"ga('send', 'pageview', '" + dic["RS"] + "');\">" + DisName + "待售(" + obj.ONLINESALES + ")</a></li>"; } else { result += "<li><a href=\"" + GetUrl + "\">" + DisName + "</a></li>"; } } } } #endregion return result; }
参开文档
http://www.cnblogs.com/birdshover/category/152283.html
http://www.cnblogs.com/piziyimao/archive/2013/01/31/2887072.html