开发工具类
一、前言
在工作中,难免遇到各种各样的问题,每个人似乎都有一套自己的解决方案。而我,又不想每次解决完问题就把东西扔了,捡了芝麻,丢了西瓜,什么时候才能进步勒?学习要靠积累,毕竟量变才能引起质变嘛。所以写了这篇博文,不定时更新自己项目中遇到的问题、踩过的那些坑......
二、项目
1、文件流
java 将两张图片合成一张图片
/** * 图片合并 */ public String joinImage(String url1, String url2) { try (InputStream is1 = getImgConn(url1); InputStream is2 = getImgConn(url2)) { BufferedImage image1 = ImageIO.read(is1); BufferedImage image2 = ImageIO.read(is2); BufferedImage combined = new BufferedImage(image1.getWidth() * 2, image1.getHeight(), BufferedImage.TYPE_INT_RGB); Graphics g = combined.getGraphics(); g.drawImage(image1, 0, 0, null); g.drawImage(image2, image1.getWidth(), 0, null); String imgURL = System.currentTimeMillis() + ".jpg"; ImageIO.write(combined, "JPG", new File("/home/picFiles", imgURL)); return "/home/picFiles/" + imgURL; } catch (IOException e) { e.printStackTrace(); } return null; } //读文件 private InputStream getImgConn(String url) { try { URL url1 = new URL(url); URLConnection urlConnection = url1.openConnection(); InputStream is1 = urlConnection.getInputStream(); //先读入内存 ByteArrayOutputStream buf = new ByteArrayOutputStream(8192); byte[] b = new byte[1024]; int len; while ((len = is1.read(b)) != -1) { buf.write(b, 0, len); } is1 = new ByteArrayInputStream(buf.toByteArray()); return is1; } catch (MalformedURLException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } return null; }
/** * 图片合并 */ public static String joinImage(File file1, File file2) { try { BufferedImage image1 = ImageIO.read(file1); BufferedImage image2 = ImageIO.read(file2); BufferedImage combined = new BufferedImage(image1.getWidth() * 2, image1.getHeight(), BufferedImage.TYPE_INT_RGB); Graphics g = combined.getGraphics(); g.drawImage(image1, 0, 0, null); g.drawImage(image2, image1.getWidth(), 0, null); String imgURL = System.currentTimeMillis() + ".jpg"; ImageIO.write(combined, "JPG", new File("/home/picFiles", imgURL)); return "/home/picFiles/" + imgURL; } catch (IOException e) { e.printStackTrace(); } return null; }
java 图片像素、尺寸校验
@Slf4j public class ImagesFormatUtil { /** * 图片的像素判断 * * @param file 文件 * @param imageWidth 图片宽度 * @param imageHeight 图片高度 * @return true:上传图片宽度和高度都小于等于规定最大值 */ public static Boolean checkImageElement(File file, int imageWidth, int imageHeight) { if (!file.exists()) { return false; } try { BufferedImage bufferedImage = ImageIO.read(file); int width = bufferedImage.getWidth(); int height = bufferedImage.getHeight(); if (height == imageHeight && width == imageWidth) { return true; } } catch (IOException e) { log.error("ImageIO Read IOException:{}", file.getName(), e); } return false; } /** * 校验图片比例 * * @param file 图片 * @param imageWidth 宽 * @param imageHeight 高 * @return true:符合要求 * @throws IOException */ public static boolean checkImageScale(File file, int imageWidth, int imageHeight) { if (!file.exists()) { return false; } try { BufferedImage bufferedImage = ImageIO.read(file); int width = bufferedImage.getWidth(); int height = bufferedImage.getHeight(); if (imageHeight != 0 && height != 0) { int scale1 = imageHeight / imageWidth; int scale2 = height / width; if (scale1 == scale2) { return true; } } } catch (IOException e) { log.error("ImageIO Read IOException:{}", file.getName(), e); } return false; } }
java 删除文件目录
/** * 删除文件目录,必须先删除文件目录下的文件 * @param path 要删除的文件目录 */ public static void deleteDir(String path) { File dir = new File(path); if (dir.exists()) { File[] tmp = dir.listFiles(); for (int i = 0; i < tmp.length; i++) { if (tmp[i].isDirectory()) { deleteDir(path + "/" + tmp[i].getName()); } else { tmp[i].delete(); } } dir.delete(); } }
ftp 传输工具类 — 基于 apache 的 commons-net(未解决文件目录创建问题)
public class FtpClient { private static final Logger logger = LoggerFactory.getLogger(FtpClient.class); //PORT(主动)方式的连接过程是:客户端向服务器的FTP端口(默认是21)发送连接请求,服务器接受连接,建立一条命令链路当需要传送数据时, //客户端在命令链路上用PORT命令告诉服务器:我打开了XXXX端口,你过来连接我于是服务器从20端口向客户端的XXXX端口发送连接请求,建立一条数据链路来传送数据 public static final Integer MODE_PORT = 1;//主动 //PASV(被动)方式的连接过程是:客户端向服务器的FTP端口(默认是21)发送连接请求,服务器接受连接,建立一条命令链路当需要传送数据时, //服务器在命令链路上用PASV命令告诉客户端:我打开了XXXX端口,你过来连接我于是客户端向服务器的XXXX端口发送连接请求,建立一条数据链路来传送数据 public static final Integer MODE_PASV = 2;//被动 private String host; private Integer port = 21; //默认端口是21 private String user; private String password; private boolean ftps = false; //默认使用ftp传输 private Integer connectMode = MODE_PORT;//默认为主动 /** * 构造函数 * * @param ftpHost ftp 地址 * @param user 用户名 * @param password 密码 * @param ftps 是否使用ftps * @param connectMode 请求方式 FtpClient.MODE_PORT 主动 FtpClient.MODE_PASV 被动 */ public FtpClient(String ftpHost, Integer port, String user, String password, boolean ftps, Integer connectMode) { this.host = ftpHost; this.port = port; this.user = user; this.password = password; this.ftps = ftps; this.connectMode = connectMode; } public FtpClient(String ftpHost, String user, String password, boolean ftps) { this.host = ftpHost; this.user = user; this.password = password; this.ftps = ftps; } public FtpClient(String ftpHost, String user, String password) { this.host = ftpHost; this.user = user; this.password = password; } /** * 认证账号密码 * * @return */ private FTPClient authentication() throws IOException { FTPClient ftpClient; if (ftps) { ftpClient = new FTPSClient(); } else { ftpClient = new FTPClient(); } logger.info(host + "----" + port); ftpClient.connect(host, port); boolean loginSuccess = ftpClient.login(user, password); if (connectMode.equals(MODE_PASV)) { ftpClient.enterLocalPassiveMode();//pasv模式 } if (!loginSuccess) { logger.error("ftp loginFail:" + ftpClient.getReplyCode()); } return ftpClient; } private void createDir(String uploadPath) { } /** * @param @param is * @param @param targetName * @param @param uploadPath * @Title: upload * @Description: 上传 */ public Boolean upload(InputStream is, String targetName, String uploadPath) { boolean success = false; FTPClient ftpClient = null; try { ftpClient = authentication(); //设置上传目录,上传目录必须存在 boolean changeWorkingDirectory = ftpClient.changeWorkingDirectory(new String(uploadPath.toString().getBytes("GBK"), "iso-8859-1")); logger.info("设置上传目录是否成功:" + changeWorkingDirectory); if (changeWorkingDirectory) { //设置文件类型(二进制) ftpClient.setFileType(FTPClient.BINARY_FILE_TYPE); ftpClient.setBufferSize(1024); ftpClient.setControlEncoding("GBK"); success = ftpClient.storeFile(targetName, is); } logger.info("服务端回馈:" + success + "---" + ftpClient.getReplyString()); } catch (IOException e) { logger.error("ftp upload err", e); } finally { IOUtils.closeQuietly(is); try { if (ftpClient != null && ftpClient.isConnected()) { ftpClient.disconnect(); } } catch (IOException e) { logger.error("close ftp err", e); } } return success; } /** * @param @param is * @param @param targetName * @param @param uploadPath * @Title: upload * @Description: 上传 */ public Boolean upload(File file, String targetName, String uploadPath) throws IllegalArgumentException, FileNotFoundException { return upload(new FileInputStream(file), targetName, uploadPath); } /** * @param remotePath ftp远程服务器上的路径 * @param fileName 要下载的文件名 * @param localPath 要保存的本地路径 * @return */ public Boolean downLoad(String remotePath, String fileName, String localPath) { boolean success = false; FTPClient ftpClient = null; try { ftpClient = authentication(); ftpClient.changeWorkingDirectory(new String(remotePath.toString().getBytes("GBK"), "iso-8859-1")); File file = new File(localPath + File.separatorChar + fileName); OutputStream outputStream = new FileOutputStream(file); success = ftpClient.retrieveFile(fileName, outputStream); outputStream.close(); } catch (SocketException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } finally { try { if (ftpClient != null && ftpClient.isConnected()) { ftpClient.disconnect(); } } catch (IOException e) { logger.error("close ftp err", e); } } return success; } }
读取 zip 文件
public void zipRead(InputStream inputStream) throws IOException { // 构造zip输入流 ZipInputStream zip = new ZipInputStream(inputStream, Charset.forName("gbk")); ZipEntry tmpEntity; while ((tmpEntity = zip.getNextEntry()) != null) { byte[] buf = new byte[1024]; int num; ByteArrayOutputStream baos = new ByteArrayOutputStream(); while ((num = zip.read(buf, 0, buf.length)) != -1) { baos.write(buf, 0, num); } baos.flush(); baos.close(); // 处理转换文件 handleFile(tmpEntity.getName(), baos.toByteArray()); } zip.close(); inputStream.close(); } private void handleFile(String fileName, byte[] tmpByte) { InputStream inputStream = new ByteArrayInputStream(tmpByte); BufferedReader br = new BufferedReader(new InputStreamReader(inputStream, Charset.forName("gbk"))); String line; // 循环解析文件 try { log.info("##下载文件名称fileName:" + fileName); while ((line = br.readLine()) != null) { log.info(line); } } catch (IOException e) { e.printStackTrace(); } }
csv 文件操作 — 基于 apache 的 commons-csv
/** * CSV 文件写入 * * @param content 文件内容 * @param file 写入的文件目录 */ public static void csvWriter(List<List<Object>> content, File file) { CSVFormat csvFormat = CSVFormat.DEFAULT; try (FileWriter fileWriter = new FileWriter(file, true); CSVPrinter csvPrinter = new CSVPrinter(fileWriter, csvFormat)) { //处理内容 if (!CollectionUtils.isEmpty(content)) { csvPrinter.printRecords(content); } csvPrinter.flush(); } catch (IOException e) { e.printStackTrace(); } }
2、工具类
反射工具类 — 从子类向上遍历,得到类型属性和方法
public class ReflectUtils { private ReflectUtils() { } /** * 反射取值 */ public static Object getFieldValue(Object instance, String fieldName) throws IllegalAccessException { Field field = getField(instance.getClass(), fieldName); if (field == null) { return null; } field.setAccessible(true); return field.get(instance); } /** * 反射赋值 */ public static void setFieldValue(Object instance, String fieldName, Object fieldValue) throws IllegalAccessException { Field field = getField(instance.getClass(), fieldName); if (field == null) { return; } field.setAccessible(true); field.set(instance, fieldValue); } /** * 执行方法 */ public static Object invokeMethod(Object instance, String methodName, Object... args) throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { Method method = getMethod(instance.getClass(), methodName, args); if (method == null) { return null; } method.setAccessible(true); return method.invoke(instance, args); } /** * 查找 Field 从子类往上找 * * @param aClass * @param fieldName * @return * @throws NoSuchFieldException */ private static Field getField(Class<?> aClass, String fieldName) { try { return aClass.getDeclaredField(fieldName); } catch (NoSuchFieldException e) { Class<?> superclass = aClass.getSuperclass(); if (superclass.equals(Object.class)) { return null; } return getField(superclass, fieldName); } } /** * 查找 method,从子类往上找 * * @param aClass * @param methodName * @param args * @return * @throws NoSuchMethodException */ private static Method getMethod(Class<?> aClass, String methodName, Object... args) { Class<?>[] parameterTypes = new Class<?>[args.length]; for (int i = 0; i < args.length; i++) { parameterTypes[i] = args[i].getClass(); } try { return aClass.getDeclaredMethod(methodName, parameterTypes); } catch (NoSuchMethodException e) { Class<?> superclass = aClass.getSuperclass(); if (superclass.equals(Object.class)) { return null; } return getMethod(superclass, methodName, args); } } }
yaml 读取工具类
public class YamlUtils { private static final Logger logger = LoggerFactory.getLogger(YamlUtils.class); private YamlUtils() { } public static Map<String, Object> load(String fileName) { YamlMapFactoryBean yamlMapFactoryBean = new YamlMapFactoryBean(); yamlMapFactoryBean.setResources(new ClassPathResource(fileName)); logger.info("load properties file from : " + fileName); return yamlMapFactoryBean.getObject(); } public static String getFromDefault(String key) { Map<String, Object> map = load("application.yml"); return String.valueOf(map.get(key)); } }
properties 读取工具类
public class PropertyUtils { private static final Logger logger = LoggerFactory.getLogger(PropertyUtils.class); private static final Pattern PATTERN = Pattern.compile("\\$\\{([^\\}]+)\\}"); public PropertyUtils() { } public static String get(Properties properties, String key) { String value = properties.getProperty(key); if (value == null) { logger.warn("get null value by key " + key + " from this properties !"); return null; } Matcher matcher = PATTERN.matcher(value); StringBuffer buffer = new StringBuffer(); while (matcher.find()) { String matcherKey = matcher.group(1); String matcherValue = properties.getProperty(matcherKey); if (matcherValue != null) { matcher.appendReplacement(buffer, matcherValue); } } matcher.appendTail(buffer); return buffer.toString(); } public static Properties loadResourcesProperties(String path) { Properties properties = new Properties(); try (InputStream inputStream = new FileInputStream(path)) { properties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); } catch (IOException var3) { var3.printStackTrace(); } return properties; } public static Properties loadClassPathProperties(String fileName) { Properties properties = new Properties(); try (InputStream inputStream = PropertyUtils.class.getResourceAsStream(fileName)) { if (inputStream == null) { logger.error("Can not found properties file : [" + fileName + "]"); return properties; } properties.load(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); } catch (IOException var6) { var6.printStackTrace(); } return properties; } }
json 处理工具类 — 基于 spring jackson
public class JsonUtils { private static final Logger log = LoggerFactory.getLogger(JsonUtils.class); private static ObjectMapper objectMapper = new ObjectMapper(); static { // objectMapper.setSerializationInclusion(JsonInclude.Include.ALWAYS); objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); objectMapper.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); } public static <T> T readValue(String jsonStr, Class<T> clazz) throws IOException { return objectMapper.readValue(jsonStr, clazz); } public static String writeJsonStr(Object obj) throws JsonProcessingException { return objectMapper.writeValueAsString(obj); } public static String writePrettyJsonStr(Object obj) throws JsonProcessingException { return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(obj); } public static void writeJsonStream(OutputStream out, Object obj) throws IOException { objectMapper.writeValue(out, obj); } public static <T> List<T> readListValue(String jsonStr, Class<T> clazz) throws IOException { JavaType javaType = objectMapper.getTypeFactory().constructParametricType(List.class, clazz); return objectMapper.readValue(jsonStr, javaType); } public static ArrayNode readArray(String jsonStr) throws IOException { JsonNode node = objectMapper.readTree(jsonStr); if (node.isArray()) { return (ArrayNode) node; } return null; } public static JsonNode readNode(String jsonStr) throws IOException { return objectMapper.readTree(jsonStr); } public static ArrayNode newArrayNode() { return objectMapper.createArrayNode(); } public static ObjectNode newJsonNode() { return objectMapper.createObjectNode(); } public static String toJSONString(Object object) { try { return writeJsonStr(object); } catch (JsonProcessingException e) { log.error("JsonUtils toJSONString error. object:{}", object.toString(), e); } return ""; } public static byte[] toJSONBytes(Object object) { byte[] bytes = new byte[0]; try { bytes = objectMapper.writeValueAsBytes(object); } catch (JsonProcessingException e) { log.error("JsonUtils toJSONBytes error. object:{}", object.toString(), e); } return bytes; } }
spring Bean 工具类
@Component public class SpringContextHolder implements ApplicationContextAware { private static ApplicationContext applicationContext; public SpringContextHolder() { } public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { applicationContext = applicationContext; } public static Object getBean(String id) throws BeansException { return applicationContext.getBean(id); } public static <T> T getBean(Class<T> clz) throws BeansException { return applicationContext.getBean(clz); } public static String[] getBeanNames() { return applicationContext.getBeanDefinitionNames(); } public static boolean containsBean(String id) { return applicationContext.containsBean(id); } public static boolean isSingleton(String id) throws NoSuchBeanDefinitionException { return applicationContext.isSingleton(id); } public static Class getType(String id) throws NoSuchBeanDefinitionException { return applicationContext.getType(id); } public static String[] getAliases(String id) throws NoSuchBeanDefinitionException { return applicationContext.getAliases(id); } }
Base64加密工具类 — 基于java8
public class Base64Utils { private static final Logger logger = LoggerFactory.getLogger(Base64Utils.class); private static final Base64.Decoder DECODER; private static final Base64.Encoder ENCODER; /** * 文件读取缓冲区大小 */ private static final int CACHE_SIZE = 1024; private Base64Utils() { } static { DECODER = Base64.getDecoder(); ENCODER = Base64.getEncoder(); } /** * BASE64字符串解码为二进制数据 * * @param base64 字符串 * @return 字节数组 */ public static byte[] decode(String base64) { return DECODER.decode(base64.replaceAll("\r|\n", "")); } /** * <p> * 二进制数据编码为BASE64字符串 * </p> * * @param bytes 字节数组 * @return string */ public static String encode(byte[] bytes) { return new String(ENCODER.encode(bytes)); } /** * 将文件编码为BASE64字符串;大文件慎用,可能会导致内存溢出 * * @param filePath 文件绝对路径 * @return string */ public static String encodeFile(String filePath) { byte[] bytes = fileToByte(filePath); return encode(bytes); } /** * BASE64字符串转回文件 * * @param filePath 文件绝对路径 * @param base64 编码字符串 */ public static void decodeToFile(String filePath, String base64) { byte[] bytes = decode(base64); byteArrayToFile(bytes, filePath); } /** * 文件转换为二进制数组 * * @param filePath 文件路径 * @return byte */ public static byte[] fileToByte(String filePath) { byte[] data = new byte[0]; File file = new File(filePath); if (file.exists()) { try (FileInputStream in = new FileInputStream(file)) { ByteArrayOutputStream out = new ByteArrayOutputStream(2048); byte[] cache = new byte[CACHE_SIZE]; int nRead; while ((nRead = in.read(cache)) != -1) { out.write(cache, 0, nRead); out.flush(); } out.close(); in.close(); data = out.toByteArray(); } catch (IOException e) { logger.error("Base64Utils fileToByte IOException.filePath:{}", filePath, e); } } return data; } /** * 二进制数组转换为文件 * * @param bytes 二进制数据 * @param filePath 文件生成目录 */ public static void byteArrayToFile(byte[] bytes, String filePath) { try (InputStream in = new ByteArrayInputStream(bytes)) { File destFile = new File(filePath); if (!destFile.getParentFile().exists()) { destFile.getParentFile().mkdirs(); } destFile.createNewFile(); OutputStream out = new FileOutputStream(destFile); byte[] cache = new byte[CACHE_SIZE]; int nRead; while ((nRead = in.read(cache)) != -1) { out.write(cache, 0, nRead); out.flush(); } out.close(); } catch (IOException e) { logger.error("Base64Utils byteArrayToFile IOException.filePath:{}", filePath, e); } } }
md5 加密工具类 — 可加密文件流
public class Md5Utils { private static final Logger logger = LoggerFactory.getLogger(Md5Utils.class); private static char[] HEX_DIGITS = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; private static ThreadLocal<MessageDigest> digestThreadLocal = new ThreadLocal<MessageDigest>() { @Override protected MessageDigest initialValue() { try { return MessageDigest.getInstance("MD5"); } catch (NoSuchAlgorithmException nsaex) { logger.error("Md5Utils 初始化失败,MessageDigest 不支持 MD5", nsaex); } return null; } }; private Md5Utils() { } /** * 生成字符串的 Md5 校验值 * * @param str * @return */ public static String encode(String str) { return encode(str.getBytes()); } /** * 生成文件流的 Md5 校验值 * * @param inputStream * @return * @throws IOException */ public static String encode(InputStream inputStream) { MessageDigest messageDigest = digestThreadLocal.get(); messageDigest.reset(); byte[] buffer = new byte[1024]; int numRead; try { while ((numRead = inputStream.read(buffer)) > 0) { messageDigest.update(buffer, 0, numRead); } } catch (IOException e) { logger.error("Md5Utils encode inputStream read error", e); } finally { try { inputStream.close(); } catch (IOException e) { logger.error("Md5Utils encode inputStream close error", e); } } return bufferToHex(messageDigest.digest()); } public static String encode(byte[] bytes) { MessageDigest messageDigest = digestThreadLocal.get(); messageDigest.reset(); messageDigest.update(bytes); return bufferToHex(messageDigest.digest()); } private static String bufferToHex(byte[] bytes) { return bufferToHex(bytes, 0, bytes.length); } private static String bufferToHex(byte[] bytes, int m, int n) { StringBuffer stringbuffer = new StringBuffer(2 * n); int k = m + n; for (int l = m; l < k; l++) { appendHexPair(bytes[l], stringbuffer); } return stringbuffer.toString().toUpperCase(); } private static void appendHexPair(byte bt, StringBuffer stringbuffer) { // 取字节中高 4 位的数字转换, >>> char c0 = HEX_DIGITS[(bt & 0xf0) >> 4]; // 为逻辑右移,将符号位一起右移,此处未发现两种符号有何不同 // 取字节中低 4 位的数字转换 char c1 = HEX_DIGITS[bt & 0xf]; stringbuffer.append(c0); stringbuffer.append(c1); } }
3、类型转换和格式校验
对象转换为 Map 集合 — 基于反射实现
public class MapUtils { private MapUtils() { } public static Map<String, Object> convertObjToMap(Object obj) { Map<String, Object> resultMap = build(); List<Field> fieldList = buildFieldList(obj.getClass(), null); if (CollectionUtils.isNotEmpty(fieldList)) { try { for (Field field : fieldList) { field.setAccessible(true); Object o = field.get(obj); resultMap.put(field.getName(), o); } } catch (IllegalAccessException e) { e.printStackTrace(); } } return resultMap; } private static List<Field> buildFieldList(Class<?> aClass, List<Field> fieldList) { if (CollectionUtils.isEmpty(fieldList)) { fieldList = new ArrayList<>(); } fieldList.addAll(Arrays.asList(aClass.getDeclaredFields())); Class<?> superclass = aClass.getSuperclass(); if (superclass.equals(Object.class)) { return fieldList; } buildFieldList(superclass, fieldList); return fieldList; } }
4、日志输出归档
lo4j.xml
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE log4j:configuration PUBLIC "-//log4j/log4j Configuration//EN" "log4j.dtd"> <log4j:configuration> <appender name="console" class="org.apache.log4j.ConsoleAppender"> <layout class="org.apache.log4j.PatternLayout"> <param name='ConversionPattern' value='[%d]-[%p]-[Thread: %t]-[%C.%M():%L]: %m%n' /> </layout> </appender> <appender name="debugFile" class="org.apache.log4j.RollingFileAppender"> <param name="file" value="/usr/local/yycx/logs/admin/debug.log" /> <param name="append" value="true" /> <layout class="org.apache.log4j.PatternLayout"> <param name='ConversionPattern' value='[%d]-[%p]-[Thread: %t]-[%C.%M():%L]: %m%n' /> </layout> <filter class="org.apache.log4j.varia.LevelRangeFilter"> <param name="levelMin" value="DEBUG"/> <param name="levelMax" value="DEBUG"/> </filter> </appender> <appender name="infoFile" class="org.apache.log4j.RollingFileAppender"> <param name="file" value="/usr/local/yycx/logs/admin/info.log" /> <param name="append" value="true" /> <layout class="org.apache.log4j.PatternLayout"> <param name='ConversionPattern' value='[%d]-[%p]-[Thread: %t]-[%C.%M():%L]: %m%n' /> </layout> <filter class="org.apache.log4j.varia.LevelRangeFilter"> <param name="LevelMin" value="INFO"/> <param name="LevelMax" value="INFO"/> </filter> </appender> <appender name="errorFile" class="org.apache.log4j.RollingFileAppender"> <param name="file" value="/usr/local/yycx/logs/admin/error.log" /> <param name="append" value="true" /> <layout class="org.apache.log4j.PatternLayout"> <param name='ConversionPattern' value='[%d]-[%p]-[Thread: %t]-[%C.%M():%L]: %m%n' /> </layout> <filter class="org.apache.log4j.varia.LevelRangeFilter"> <param name="LevelMin" value="ERROR"/> <param name="LevelMax" value="ERROR"/> </filter> </appender> <root> <level value="error"/> <appender-ref ref="console"/> <appender-ref ref="infoFile"/> <appender-ref ref="debugFile"/> <appender-ref ref="errorFile"/> </root> </log4j:configuration>
logback.xml
<?xml version="1.0" encoding="UTF-8"?> <!-- 说明: 1、日志级别及文件 日志记录采用分级记录,级别与日志文件名相对应,不同级别的日志信息记录到不同的日志文件中 例如:error级别记录 到log_error_xxx.log或log_error.log(该文件为当前记录的日志文件),而log_error_xxx.log为归档日志,日志文件按日期记录, 同一天内,若日志文件大小等于或大于100M,则按0、1、2...顺序分别命名 例如log-level-2013-12-21.0.log其它级别的日志也是如此。 2、文件路径 可在 <property name="LOG_PATH" value="/var/jmcui"/> 中配置自己想要的文件路径 3、Appender FILEERROR对应error级别,文件名以log-error-xxx.log形式命名 ,FILEWARN对应warn级别,文件名以log-warn-xxx.log 形式命名 FILEINFO对应info级别,文件名以log-info-xxx.log形式命名,FILEDEBUG对应debug级别,文件名以log-debug-xxx.log形 式命名 stdout将日志信息输出到控制上,为方便开发测试使用 --> <configuration> <property name="LOG_PATH" value="/var/jmcui"/> <!-- 日志记录器,日期滚动记录 --> <appender name="FILEERROR" class="ch.qos.logback.core.rolling.RollingFileAppender"> <!-- 正在记录的日志文件的路径及文件名 --> <file>${LOG_PATH}/log_error.log</file> <!-- 日志记录器的滚动策略,按日期,按大小记录 --> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <!-- 归档的日志文件的路径,例如今天是2013-12-21日志,当前写的日志文件路径为file节点指定,可以将此文件与file指定文件 路径设置为不同路径,从而将当前日志文件或归档日志文件置不同的目录。而2013-12-21的日志文件在由fileNamePattern指 定。%d{yyyy-MM-dd}指定日期格式,%i指定索引 --> <fileNamePattern>${LOG_PATH}/log-error-%d{yyyy-MM-dd}.%i.log </fileNamePattern> <!-- 除按日志记录之外,还配置了日志文件不能超过100M,若超过100M,日志文件会以索引0开始, 命名日志文件,例如log-error-2013-12-21.0.log --> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <!-- 追加方式记录日志 --> <append>true</append> <!-- 日志文件的格式 --> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%-5p [%d][%mdc{mdc_userId}] %C:%L - %m %n</pattern> <charset>utf-8</charset> </encoder> <!-- 此日志文件只记录error级别的 --> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>error</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <appender name="FILEWARN" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_PATH}/log_warn.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_PATH}/log-warn-%d{yyyy-MM-dd}.%i.log </fileNamePattern> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <append>true</append> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%-5p [%d][%mdc{mdc_userId}] %C:%L - %m %n</pattern> <charset>utf-8</charset> </encoder> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>WARN</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <appender name="FILEINFO" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_PATH}/log_info.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_PATH}/log-info-%d{yyyy-MM-dd}.%i.log </fileNamePattern> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <append>true</append> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%-5p [%d][%mdc{mdc_userId}] %C:%L - %m %n</pattern> <charset>utf-8</charset> </encoder> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>INFO</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <appender name="FILEDEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_PATH}/log_debug.log</file> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_PATH}/log-debug-%d{yyyy-MM-dd}.%i.log </fileNamePattern> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>100MB</maxFileSize> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <append>true</append> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%-5p [%d][%mdc{mdc_userId}] %C:%L - %m %n</pattern> <charset>utf-8</charset> </encoder> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>DEBUG</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <appender name="stdout" class="ch.qos.logback.core.ConsoleAppender"> <Target>System.out</Target> <encoder> <pattern>%-5p [%d][%mdc{mdc_userId}] %C:%L - %m %n</pattern> <charset>utf-8</charset> </encoder> <!-- 此日志appender是为开发使用,只配置最底级别,控制台输出的日志级别是大于或等于此级别的日志信息 --> <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <level>DEBUG</level> </filter> </appender> <appender name="FILTER_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender"> <File>${LOG_PATH}/log_filter.log</File> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss} : %m%n</pattern> </encoder> <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <level>INFO</level> </filter> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_PATH}/log_filter.%d{yyyy-MM-dd}</fileNamePattern> </rollingPolicy> </appender> <logger name="FILTER_INFO_LOGGER" additivity="false" level="INFO"> <appender-ref ref="FILTER_INFO"/> </logger> <appender name="INTEREST_BEARING_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender"> <File>${LOG_PATH}/log_interest_bearing.log</File> <encoder> <pattern>%d{yyyy-MM-dd HH:mm:ss} : %m%n</pattern> </encoder> <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <level>INFO</level> </filter> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_PATH}/log_interest_bearing.%d{yyyy-MM-dd}</fileNamePattern> </rollingPolicy> </appender> <logger name="INTEREST_BEARING_LOGGER" additivity="true" level="INFO"> <appender-ref ref="INTEREST_BEARING_INFO"/> </logger> <!-- 为单独的包配置日志级别,若root的级别大于此级别, 此处级别也会输出 应用场景:生产环境一般不会将日志级别设置为trace或debug,但是为详细的记录SQL语句的情况, 可将hibernate的级别设置为debug,如此一来,日志文件中就会出现hibernate的debug级别日志, 而其它包则会按root的级别输出日志 --> <logger name="org.springframework" level="DEBUG"/> <logger name="com.ibatis" level="DEBUG"/> <logger name="com.ibatis.common.jdbc.SimpleDataSource" level="DEBUG"/> <logger name="com.ibatis.common.jdbc.ScriptRunner" level="DEBUG"/> <logger name="com.ibatis.sqlmap.engine.impl.SqlMapClientDelegate" level="DEBUG"/> <logger name="java.sql.Connection" level="DEBUG"/> <logger name="java.sql.Statement" level="DEBUG"/> <logger name="java.sql.PreparedStatement" level="DEBUG"/> <!-- 生产环境,将此级别配置为适合的级别,以名日志文件太多或影响程序性能 --> <root level="INFO"> <appender-ref ref="FILEDEBUG"/> <appender-ref ref="FILEINFO"/> <appender-ref ref="FILEWARN"/> <appender-ref ref="FILEERROR"/> <!-- 生产环境将请stdout去掉 --> <appender-ref ref="stdout"/> </root> </configuration>
5、其他
二维码生成
<dependency> <groupId>net.glxn.qrgen</groupId> <artifactId>javase</artifactId> <version>RELEASE</version> </dependency>
@RequestMapping(value = "/QrCode", method = RequestMethod.GET) @ApiOperation(value = "下载二维码", httpMethod = "GET") public void download(HttpServletResponse response) { try { Map<EncodeHintType, Object> hints = new HashMap<>(4); hints.put(EncodeHintType.MARGIN, 0); hints.put(EncodeHintType.CHARACTER_SET, "UTF-8"); BitMatrix bitMatrix = new QRCodeWriter().encode("二维码链接", BarcodeFormat.QR_CODE, 250, 250, hints); //1.去白边 int[] rec = bitMatrix.getEnclosingRectangle(); int resWidth = rec[2] + 1; int resHeight = rec[3] + 1; BitMatrix resMatrix = new BitMatrix(resWidth, resHeight); resMatrix.clear(); for (int i = 0; i < resWidth; i++) { for (int j = 0; j < resHeight; j++) { if (bitMatrix.get(i + rec[0], j + rec[1])) { resMatrix.set(i, j); } } } // 2 int width = resMatrix.getWidth(); int height = resMatrix.getHeight(); BufferedImage image = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB); for (int x = 0; x < width; x++) { for (int y = 0; y < height; y++) { image.setRGB(x, y, resMatrix.get(x, y) == true ? Color.BLACK.getRGB() : Color.WHITE.getRGB()); } } response.setHeader("Content-Disposition", "attachment; filename=" + URLEncoder.encode("二维码.png", "UTF-8")); response.setContentType("application/octet-stream"); response.setCharacterEncoding("utf-8"); ImageIO.write(image, "png", response.getOutputStream()); } catch (Exception e) { e.printStackTrace(); } }
6、手写一个一致性 hash 算法
public class Shard<S> { // S类封装了机器节点的信息 ,如name、password、ip、port等 /** * 虚拟节点 */ private TreeMap<Long, S> nodes; /** * 真实机器节点 */ private List<S> shards; /** * 每个机器节点关联的虚拟节点个数 */ private final int NODE_NUM = 100; public Shard(List<S> shards) { super(); this.shards = shards; init(); } /** * 初始化一致性 hash 环 */ private void init() { nodes = new TreeMap<>(); // 每个真实机器节点都需要关联虚拟节点 for (int i = 0; i != shards.size(); ++i) { final S shardInfo = shards.get(i); for (int n = 0; n < NODE_NUM; n++) { // 一个真实机器节点关联NODE_NUM个虚拟节点 nodes.put(hash("SHARD-" + i + "-NODE-" + n), shardInfo); } } } public S getShardInfo(String key) { // 沿环的顺时针找到一个虚拟节点 SortedMap<Long, S> tail = nodes.tailMap(hash(key)); if (tail.size() == 0) { return nodes.get(nodes.firstKey()); } // 返回该虚拟节点对应的真实机器节点的信息 return tail.get(tail.firstKey()); } /** * MurMurHash算法,是非加密HASH算法,性能很高, * 比传统的CRC32,MD5,SHA-1(这两个算法都是加密HASH算法,复杂度本身就很高,带来的性能上的损害也不可避免) * 等HASH算法要快很多,而且据说这个算法的碰撞率很低. * http://murmurhash.googlepages.com/ */ private Long hash(String key) { ByteBuffer buf = ByteBuffer.wrap(key.getBytes()); int seed = 0x1234ABCD; ByteOrder byteOrder = buf.order(); buf.order(ByteOrder.LITTLE_ENDIAN); long m = 0xc6a4a7935bd1e995L; int r = 47; long h = seed ^ (buf.remaining() * m); long k; while (buf.remaining() >= 8) { k = buf.getLong(); k *= m; k ^= k >>> r; k *= m; h ^= k; h *= m; } if (buf.remaining() > 0) { ByteBuffer finish = ByteBuffer.allocate(8).order( ByteOrder.LITTLE_ENDIAN); // for big-endian version, do this first: // finish.position(8-buf.remaining()); finish.put(buf).rewind(); h ^= finish.getLong(); h *= m; } h ^= h >>> r; h *= m; h ^= h >>> r; buf.order(byteOrder); return h; } }
7、手写一个 LRU 算法
public class LRUCache { private int cacheSize; private int currentSize; private CacheNode head; private CacheNode tail; private HashMap<Integer, CacheNode> nodes; class CacheNode { CacheNode prev; CacheNode next; int key; int value; } public LRUCache(int cacheSize) { cacheSize = cacheSize; currentSize = 0; nodes = new HashMap<>(cacheSize); } public void set(Integer key, Integer value) { //添加新元素 if (nodes.get(key) == null) { CacheNode node = new CacheNode(); node.key = key; node.value = value; nodes.put(key, node); //移动到表头 moveToHead(node); //进行lru操作 if (currentSize > cacheSize) { removeTail(); } else { currentSize++; } } else {//更新元素值 CacheNode node = nodes.get(key); //移动到表头 moveToHead(node); node.value = value; } } private void removeTail() { if (tail != null) { nodes.remove(tail.key); if (tail.prev != null) tail.prev.next = null; tail = tail.prev; } } private void moveToHead(CacheNode node) { //链表中间的元素 if (node.prev != null) { node.prev.next = node.next; } if (node.next != null) { node.next.prev = node.prev; } //移动到表头 node.prev = null; if (head == null) { head = node; } else { node.next = head; head.prev = node; } head = node; //更新tail //node就是尾部元素 if (tail == node) { //下移一public class LRUCache { private int cacheSize; private int currentSize; private CacheNode head; private CacheNode tail; private HashMap<Integer, CacheNode> nodes; class CacheNode { CacheNode prev; CacheNode next; int key; int value; } public LRUCache(int cacheSize) { cacheSize = cacheSize; currentSize = 0; nodes = new HashMap<>(cacheSize); } public void set(Integer key, Integer value) { //添加新元素 if (nodes.get(key) == null) { CacheNode node = new CacheNode(); node.key = key; node.value = value; nodes.put(key, node); //移动到表头 moveToHead(node); //进行lru操作 if (currentSize > cacheSize) { removeTail(); } else { currentSize++; } } else {//更新元素值 CacheNode node = nodes.get(key); //移动到表头 moveToHead(node); node.value = value; } } private void removeTail() { if (tail != null) { nodes.remove(tail.key); if (tail.prev != null) tail.prev.next = null; tail = tail.prev; } } private void moveToHead(CacheNode node) { //链表中间的元素 if (node.prev != null) { node.prev.next = node.next; } if (node.next != null) { node.next.prev = node.prev; } //移动到表头 node.prev = null; if (head == null) { head = node; } else { node.next = head; head.prev = node; } head = node; //更新tail //node就是尾部元素 if (tail == node) { //下移一位 tail = tail.prev; } //缓存里就一个元素 if (tail == null) { tail = node; } } public int get(int key) { if (nodes.get(key) != null) { CacheNode node = nodes.get(key); moveToHead(node); return node.value; } return 0; } } 位 tail = tail.prev; } //缓存里就一个元素 if (tail == null) { tail = node; } } public int get(int key) { if (nodes.get(key) != null) { CacheNode node = nodes.get(key); moveToHead(node); return node.value; } return 0; } }
/** * @Author 1099442418@qq.com * @Date 2020/11/2 13:08 * @Description 利用 LinkedHashMap<K,V> 实现 LRU 算法 */ public class LRUCache<K, V> extends LinkedHashMap<K, V> { private final int CACHE_SIZE; /** * 能缓存多少数据 * * @param cacheSize 缓存的数据量 */ public LRUCache(int cacheSize) { // true 表示让 LinkedHashMap 按照访问顺序来进行排序,最近访问的放在头部,最老访问的放在尾部 // false 表示按照插入顺序来进行排序 super((int) Math.ceil(cacheSize / 0.75) + 1, 0.75f, true); CACHE_SIZE = cacheSize; } @Override protected boolean removeEldestEntry(Entry<K, V> eldest) { return size() > CACHE_SIZE; } }