springBoot2.x集成es7.x实现常见操作(增、删、该、查、分组)
集成es#
引入依赖#
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-elasticsearch</artifactId>
</dependency>
配置#
在properties中增加下面配置
elasticsearch.host=127.0.0.1
elasticsearch.port=9200
@Configuration
@ConfigurationProperties(prefix = "elasticsearch")
public class EsConfig {
private String host;
private Integer port;
@Bean(destroyMethod = "close")
public RestHighLevelClient client() {
return new RestHighLevelClient(RestClient.builder(
new HttpHost(host, port, "http")
));
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
}
这样就可以了
基本操作前提#
需要的类
public class ObjectToMapUtils {
/**
* 将对象转换成Map
* @param bean
* @param <T>
* @return
*/
public static <T> Map<String, Object> beanToMap(T bean) {
Map<String, Object> map = new HashMap<>();
if (bean != null) {
BeanMap beanMap = BeanMap.create(bean);
for (Object key : beanMap.keySet()) {
if(beanMap.get(key) != null)
map.put(key + "", beanMap.get(key));
}
}
return map;
}
}
@Data
public class DocBean {
private String id;
private String firstCode;
private String secordCode;
private String content;
private Integer type;
private Date createdAt;
private Long startAt;
public DocBean(String id, String firstCode, String secordCode, String content, Integer type) {
this.id = id;
this.firstCode = firstCode;
this.secordCode = secordCode;
this.content = content;
this.type = type;
this.createdAt = new Date();
this.startAt = System.currentTimeMillis();
}
public DocBean() {
}
}
新增#
public interface IElasticService {
void save(DocBean docBean) throws IOException;
}
@Service
public class IElasticServiceImpl implements IElasticService {
@Resource
private RestHighLevelClient client;
private static final String NBA_INDEX = "record_traffic-2020-12-10";
/**
* 新增操作
* @param docBean
* @throws IOException
*/
@Override
public void save(DocBean docBean) throws IOException {
IndexRequest request = new IndexRequest(NBA_INDEX).id(String.valueOf(docBean.getId())).source(ObjectToMapUtils.beanToMap(docBean));
IndexResponse response = client.index(request, RequestOptions.DEFAULT);
System.out.println(JSONObject.toJSON(response));
}
}
批量新增#
@Override
public void batchSave(List<DocBean> list) throws IOException {
if (CollectionUtils.isEmpty(list)) {
return;
}
BulkRequest bulkRequest = new BulkRequest ();
bulkRequest.timeout(new TimeValue(60, TimeUnit.SECONDS));
list.stream().forEach(item -> {
bulkRequest.add(new IndexRequest("batch_record_traffic").id(String.valueOf(item.getId())).source(ObjectToMapUtils.beanToMap(item)));
});
// 执行请求
BulkResponse bulkResponse = client.bulk (bulkRequest, RequestOptions.DEFAULT);
// 响应 判断是否执行成功
RestStatus status = bulkResponse.status ();
log.info("批量添加返回状态:{}", status);
}
修改#
/**
* 更新数据
* @param docBean
* @throws IOException
*/
@Override
public void update(DocBean docBean) throws IOException {
UpdateRequest request = new UpdateRequest(NBA_INDEX, docBean.getId()).doc(ObjectToMapUtils.beanToMap(docBean));
client.update(request, RequestOptions.DEFAULT);
}
删除#
根据单个id删除#
@Override
public void deleteById(String id) throws IOException {
DeleteRequest deleteRequest = new DeleteRequest(NBA_INDEX, id);
client.delete(deleteRequest, RequestOptions.DEFAULT);
}
根据id批量删除#
/***
* 删除操作。批量删除通过ids
* @param ids
* @throws IOException
*/
@Override
public void deleteByIds(String[] ids) throws IOException {
DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(NBA_INDEX);
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
boolQueryBuilder.must(QueryBuilders.idsQuery().addIds(ids));
deleteByQueryRequest.setQuery(boolQueryBuilder);
client.deleteByQuery(deleteByQueryRequest, RequestOptions.DEFAULT);
}
查询#
根据id进行查询#
@Override
public Map<String,Object> getEs(String id) throws IOException {
GetRequest getRequest = new GetRequest(NBA_INDEX,id);
GetResponse response = client.get(getRequest,RequestOptions.DEFAULT);
return response.getSource();
}
... 后面会继续扩展
分组和统计数量#
@Override
public Map<String, Long> getTypeGroupBy() throws IOException {
Map<String, Long> map = new LinkedHashMap<>();
SearchRequest searchRequest = new SearchRequest();
searchRequest.indices("record_traffic*");
//指定分组字段,terms指定别名,field指定字段名
TermsAggregationBuilder aggregation = AggregationBuilders.terms("content")
//聚合字段名
.field("content.keyword")
.size(100)
// 降序
.order(BucketOrder.count(false));
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.aggregation(aggregation);
//执行查询
searchRequest.source(searchSourceBuilder);
SearchResponse response = client.search(searchRequest, RequestOptions.DEFAULT);
Terms byAgeAggregation = response.getAggregations().get("content");
List<? extends Terms.Bucket> buckets = byAgeAggregation.getBuckets();
for (Terms.Bucket buck: buckets) {
map.put(buck.getKeyAsString(), buck.getDocCount());
}
return map;
}
分类:
springBoot2.3
, Elasticsearch
标签:
分组
, Elasticsearch
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· .NET Core 中如何实现缓存的预热?
· 从 HTTP 原因短语缺失研究 HTTP/2 和 HTTP/3 的设计差异
· AI与.NET技术实操系列:向量存储与相似性搜索在 .NET 中的实现
· 基于Microsoft.Extensions.AI核心库实现RAG应用
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· 阿里巴巴 QwQ-32B真的超越了 DeepSeek R-1吗?
· 如何调用 DeepSeek 的自然语言处理 API 接口并集成到在线客服系统
· 【译】Visual Studio 中新的强大生产力特性
· 2025年我用 Compose 写了一个 Todo App