springboot整合mybatis

--主配置文件(.yml)

mybatis:
  mapper-locations: classpath:mapper/*.xml
  type-aliases-package: com.dhht.model
  config-location: classpath:mybatis-config.xml

--启动类添加dao层扫描注解

@MapperScan({"com.dhht.dao","com.dhht.client.dao"})

--自定义mybatis-config.xml配置文件(看自己需求)

<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE configuration
PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-config.dtd">
<configuration>
<!--<settings>
<setting name="logImpl" value="STDOUT_LOGGING" />
<setting name="mapUnderscoreToCamelCase" value="true" />
</settings>-->
<settings>
<setting name="logImpl" value="STDOUT_LOGGING" />
<setting name="callSettersOnNulls" value="true"/>
</settings>

<!-- 命名空间,需要采用bean的方式操作时需要在此注入 -->
<typeAliases>
<typeAlias type="com.dhht.util.page.Param" alias="pm"/>
<typeAlias type="com.dhht.util.page.Page" alias="page"/>
<typeAlias type="java.util.Map" alias="map"/>
<package name="com.dhht.model"/>
</typeAliases>
<!--<plugins>
<plugin interceptor="com.dhht.util.page.PagePlugin">
<property name="dialect" value="mysql"/>
<property name="pageSqlId" value=".*QueryPage.*"/>
</plugin>
</plugins>-->
<!--PageHelper分页插件-->
<!--<plugins>
<plugin interceptor="com.github.pagehelper.PageInterceptor">
<property name="dialect" value="mysql"/>
</plugin>
</plugins>-->

</configuration>

--映射文件id自增长(此为mysql数据库方式,oracle会有所不同)

<insert id="addCropAtta" useGeneratedKeys="true" keyProperty="id" parameterType="com.dhht.model.Attachment">
insert into lv_attachment (flag, in_date,
is_deleted,image_type, attachment_no,name, path, url,crop_no)
values (#{flag,jdbcType=INTEGER}, #{inDate,jdbcType=TIMESTAMP},#{isDeleted,jdbcType=CHAR},#{imageType,jdbcType=VARCHAR},
#{attachmentNo,jdbcType=VARCHAR},#{name,jdbcType=VARCHAR},#{path,jdbcType=VARCHAR},#{url,jdbcType=VARCHAR},#{cropNo,jdbcType=VARCHAR})
</insert>

--批量插入/更新

int insertBatch(List<Map<String,Object>> list);-- dao接口层

<insert id="insertBatch" parameterType="java.util.List" >
insert into se_seal_order_seal (ID,SEAL_ORDER_ID,SEAL_ID)
values
<foreach collection="list" item="item" index="index" separator=",">
(#{item.id},#{item.orderId},#{item.sealId})
</foreach>
</insert>

int updateBatchByMap(List<Map<String,Object>> list);-- dao接口层
<update id="updateBatchByMap" parameterType="java.util.List">
<foreach collection="list" item="item" separator=";">
update lv_attachment
set is_deleted = #{item.isDeleted},
flag = #{item.flag},
image_type = #{item.imageType},
crop_no = #{item.cropNo},
seal_no = #{item.sealNo},
crop_uuid = #{item.cropUuid},
seal_uuid = #{item.sealUuid}
where id = #{item.id}
</foreach>
</update>

--常用标签使用-(排序时使用${xx}传参数)

<select id="getMarkersiteListNew" parameterType="java.util.Map" resultType="java.util.Map">
SELECT
ms.id,ms.name,ms.address,ms.mobile AS phone,ce.score,mse.longitude,mse.latitude,spa.min_price,lsad.avg_time,s.seal_count as `count`,sp.`METERIAL_PRICE` AS gmPrice,
<choose>
<when test="isEmpLaLo != null and isEmpLaLo != ''">
CALCU_LAT_LON_DISTANCE(${latitude},${longitude},mse.`LATITUDE`,mse.`LONGITUDE`) AS distance_t
</when>
<otherwise>
NULL AS distance_t
</otherwise>
</choose>
FROM
lv_marker_site ms
LEFT JOIN se_marker_site_expansion mse ON mse.`MARKER_SITE_ID`=ms.`id`
LEFT JOIN (SELECT MIN(pa.`METERIAL_PRICE`) AS min_price,MAX(pa.`METERIAL_PRICE`) AS max_price,pa.`MARKER_SITE_ID` FROM se_price_allocation pa GROUP BY pa.`MARKER_SITE_ID`) spa
ON spa.MARKER_SITE_ID=ms.`id`
LEFT JOIN (SELECT COUNT(ls.`id`) AS seal_count,ls.MARKER_SITE_ID FROM lv_seal ls WHERE ls.`status` in ('1','4','5','6','7') and (ls.FETCH_DATE &gt;= DATE_SUB(CURDATE(), INTERVAL 3 MONTH)) GROUP BY ls.MARKER_SITE_ID) s
ON (s.MARKER_SITE_ID=ms.`id`)
LEFT JOIN (SELECT sce.`MARKER_SITE_ID`,AVG(IF(ISNULL(sce.`SEAL_QUALITY`),0,sce.`SEAL_QUALITY`)+IF(ISNULL(sce.`SERVICE_QUALITY`),0,sce.`SERVICE_QUALITY`))/2 AS score FROM se_customer_evaluation sce GROUP BY sce.`MARKER_SITE_ID`) ce
ON ce.MARKER_SITE_ID=ms.`id`
LEFT JOIN (SELECT lsd.`marker_site_id`,AVG(DATEDIFF(lsd.`fetch_date`,lsd.`fill_date`)) AS avg_time FROM lv_seal lsd WHERE lsd.`fetch_date` IS NOT NULL AND lsd.`fill_date` IS NOT NULL GROUP BY lsd.`marker_site_id`) lsad
ON lsad.marker_site_id=ms.`id`
LEFT JOIN se_price_allocation sp ON (sp.MARKER_SITE_ID=ms.`id` AND sp.METERIAL_TYPE='05')
<where>
mse.BUSINESS_STATUS=1
<if test="adcode != null and adcode != ''">
AND ms.`city_no`= ${adcode}
</if>
<if test="search != null and search != ''">
AND ms.`name` LIKE CONCAT('%',#{search},'%')
</if>
</where>
<choose>
<when test="sort eq 'count'">
ORDER BY `count` desc,gmPrice IS NULL,gmPrice,score DESC,avg_time IS NULL,avg_time
</when>
<when test="sort eq 'price'">
ORDER BY gmPrice IS NULL,gmPrice ${orb},score DESC,avg_time IS NULL,avg_time
</when>
<when test="sort eq 'distance'">
ORDER BY distance_t IS NULL,distance_t,ms.id,gmPrice IS NULL,gmPrice,score DESC,avg_time IS NULL,avg_time
</when>
<when test="sort eq 'time'">
ORDER BY avg_time IS NULL,avg_time
</when>
<otherwise>
ORDER BY score desc,gmPrice IS NULL,gmPrice,avg_time IS NULL,avg_time
</otherwise>
</choose>
</select>

--常见设置与问题处理

设置返回的map的属性为null,结果依然有key
<setting name="callSettersOnNulls" value="true"/>
设置sql打印
<setting name="logImpl" value="STDOUT_LOGGING" />

<select中列名不能出现重复,否则抛出SQL错误Duplicate column name 'NAME'

--配置mysql数据源(.yml)

<!-- https://mvnrepository.com/artifact/com.alibaba/druid-spring-boot-starter -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid-spring-boot-starter</artifactId>
<version>1.1.10</version>
</dependency>

spring:
datasource:
name: qggz_ww
type: com.alibaba.druid.pool.DruidDataSource
#druid相关配置
druid:
#监控统计拦截的filters
filters: stat
driver-class-name: com.mysql.jdbc.Driver
#基本属性
#url: jdbc:mysql://localhost:3306/qggz_ww?useSSL=true&useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true
# 本地库
url: jdbc:mysql://localhost:3306/local_hnyz?useSSL=true&useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true
username: root
password: root
# 演示库
#url: jdbc:mysql://127.0.0.21:3307/pro_seal_hn_ww_v1.0?useSSL=true&useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true
#username: root
#password: 123456

#配置初始化大小/最小/最大
initial-size: 1
min-idle: 1
max-active: 20
#获取连接等待超时时间
max-wait: 60000
#间隔多久进行一次检测,检测需要关闭的空闲连接
time-between-eviction-runs-millis: 60000
#一个连接在池中最小生存的时间
min-evictable-idle-time-millis: 300000
validation-query: SELECT 'x'
test-while-idle: true
test-on-borrow: false
test-on-return: false
#打开PSCache,并指定每个连接上PSCache的大小。oracle设为true,mysql设为false。分库分表较多推荐设置为false
pool-prepared-statements: false
max-pool-prepared-statement-per-connection-size: 20

 

posted @ 2019-07-12 19:26  qing222  阅读(184)  评论(0编辑  收藏  举报