springboot中自定义日志监听器与minIO对象存储服务的使用
本博主要记录个人在工作中遇到的问题与解决方案,仅供日后回顾,若对大家有帮助请点赞,谢谢!
1. springboot中自定义日志监听器
场景描述:有这么一个需求,我们开发的springboot服务打成jar包后需要制作成docker镜像供后面在k8s中使用,但日志打印中有个别属性值需要通过环境变量获取,从而达到灵活性,因此我们不能将该属性值固定的配置在logback.xml文件中,因此需要自定义一个日志文件监听器达到这个目的,当springboot启动时会加载日志配置文件,在该过程中会去加载,实例化和调用该监听器里的方法来获取环境变量里的属性值从而设置到日志上下文中,在打印日志的时候就可以获取该变量。
步骤:
1.自定义日志监听器类并继承 ContextAwareBase 和实现两个接口 LoggerContextListener,LifeCycle
public class LoggerStartupListener extends ContextAwareBase implements LoggerContextListener, LifeCycle { private boolean started = false; @Override public void start() { if( started ){ return; } //获取环境变量POD_ID String sysProp_PodID = System.getenv("POD_ID");
//将POD_ID加入上下文 context.putProperty("POD_ID", sysProp_PodID); //这里可以设定其他想要的参数 context.putProperty("OTHERPROP", "others"); System.out.println("logback参数加载"); started = true; } @Override public void stop() {} @Override public boolean isStarted() { return started; } @Override public boolean isResetResistant() { return true; } @Override public void onStart( LoggerContext context ) {} @Override public void onReset( LoggerContext context ) {} @Override public void onStop( LoggerContext context ) {} @Override public void onLevelChange(Logger logger, Level level ) {} }
2. 在日志配置文件logback-spring.xml中配置该日志上下文监听器
<?xml version="1.0" encoding="UTF-8"?> <configuration debug="true" > <!--定义日志文件的存储地址 勿在 LogBack 的配置中使用相对路径--> <!--<property name="LOG_HOME" value="logs" />--> <property name="LOG_HOME" value="/app/logs" /> <property name="PRO_NAME" value="svsync" /> <property name="LOG_PATTERN" value="%d{yyyy-MM-dd HH:mm:ss.SSS} [%c] [%p] %m%n" /> <contextListener class="com.zjft.uap.svsync.configuration.LoggerStartupListener" /> <!-- 控制台输出 --> <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>${LOG_PATTERN}</pattern> </encoder> </appender> <!-- 按照级别生成文件 INFO --> <appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender"> <file>${LOG_HOME}/${PRO_NAME}/${PRO_NAME}-${POD_ID}.log.crn</file> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>${LOG_PATTERN}</pattern> <charset>UTF-8</charset> </encoder> <rollingPolicy class="ch.qos.logback.core.rolling.IsspSizeBasedRollingPolicy"> <fileNamePattern>${LOG_HOME}/${PRO_NAME}/${PRO_NAME}-${POD_ID}-%d{yyyyMMddHHmmss}.log</fileNamePattern> <maxFileSize>50MB</maxFileSize> </rollingPolicy> </appender> <root level="INFO"> <appender-ref ref="STDOUT" /> <appender-ref ref="FILE" /> </root> </configuration>
2. springboot中便捷的OSS服务(minIO服务)
场景描述:在微服务开发部署过程中应客户方要求,服务中有关上传下载文件操作,这些上传或下载的文件对象必须要保存在对象存储服务中,在仔细的研究搜寻后发现了一款好用且轻量的对象存储服务,这个就是我们最后选择使用的 minIO,该对象存储服务器的搭建非常简单,只需要准备一台linux机器便可轻松安装(支持集群)
步骤:
2. springboot程序中使用(我们是基于minIO支持的aws s3协议开发使用的)
1> 引入maven依赖
<dependency> <groupId>com.amazonaws</groupId> <artifactId>aws-java-sdk-s3</artifactId> <version>1.11.238</version> </dependency>
2> 代码中的使用
public class Main { static String bucketName = "test"; static String accessKeyId = "minioadmin"; static String secretAccessKey = "minioadmin"; static String region = Regions.US_EAST_1.getName(); static String endpoint = "http://192.168.242.181:9000"; public static void main(String[] args) { AWSCredentials awsCredentials = new BasicAWSCredentials(accessKeyId, secretAccessKey); //Amazon s3云服务 // AmazonS3 s3 = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(awsCredentials)).withRegion(region).build(); //行内有私有云连接需要endpoint AmazonS3 s3 = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(awsCredentials)) .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, region)).build(); // createBucket(s3,"create"); // deleteBucket(s3, "kczhang-t-bk-1"); // listBuckets(s3); // ListKeys(s3, "uapversions"); // upload(s3, bucketName, "cmd-s3.zip"); // uploadToSubFolder(s3, bucketName, "bb/cc/", "cmd-s3.zip"); getObject(s3, bucketName, "cmd-s3.zip"); //deleteObject(s3, bucketName, "package.json"); //deleteObjects(s3, bucketName, new String[]{"cmd-s3.iml"}); // createFolder(s3, bucketName, "bb/"); } /** * ListKeys * @param s3 * @param bucketName */ public static void ListKeys(AmazonS3 s3, String bucketName) { try { System.out.println("Listing objects"); // maxKeys is set to 2 to demonstrate the use of // ListObjectsV2Result.getNextContinuationToken() // default 1000 ListObjectsV2Request req = new ListObjectsV2Request().withBucketName(bucketName); ListObjectsV2Result result; do { result = s3.listObjectsV2(req); for (S3ObjectSummary objectSummary : result.getObjectSummaries()) { System.out.printf(" - %s (size: %d)\n", objectSummary.getKey(), objectSummary.getSize()); } // If there are more than maxKeys keys in the bucket, get a continuation token // and list the next objects. String token = result.getNextContinuationToken(); System.out.println("Next Continuation Token: " + token); req.setContinuationToken(token); } while (result.isTruncated()); System.out.println("hghdgsjhdg"); } catch (AmazonServiceException e) { // The call was transmitted successfully, but Amazon S3 couldn't process // it, so it returned an error response. e.printStackTrace(); } catch (SdkClientException e) { // Amazon S3 couldn't be contacted for a response, or the client // couldn't parse the response from Amazon S3. e.printStackTrace(); } } /** * createBucket * @param s3 * @param bucketName */ public static void createBucket(AmazonS3 s3, String bucketName){ try { if(!s3.doesBucketExistV2(bucketName)){ s3.createBucket(new CreateBucketRequest(bucketName)); } System.out.println("Bucket location: " + s3.getBucketLocation(new GetBucketLocationRequest(bucketName))); }catch (AmazonServiceException e){ e.printStackTrace(); } } /** * deleteBucket * @param s3 * @param bucketName */ private static void deleteBucket(AmazonS3 s3,String bucketName){ ObjectListing objectListing = s3.listObjects(bucketName); while (true) { Iterator<S3ObjectSummary> objIter = objectListing.getObjectSummaries().iterator(); while (objIter.hasNext()) { s3.deleteObject(bucketName, objIter.next().getKey()); } // If the bucket contains many objects, the listObjects() call // might not return all of the objects in the first listing. Check to // see whether the listing was truncated. If so, retrieve the next page of objects and delete them. if (objectListing.isTruncated()) { objectListing = s3.listNextBatchOfObjects(objectListing); } else { break; } } // Delete all object versions (required for versioned buckets). VersionListing versionList = s3.listVersions(new ListVersionsRequest().withBucketName(bucketName)); while (true) { Iterator<S3VersionSummary> versionIter = versionList.getVersionSummaries().iterator(); while (versionIter.hasNext()) { S3VersionSummary vs = versionIter.next(); s3.deleteVersion(bucketName, vs.getKey(), vs.getVersionId()); } if (versionList.isTruncated()) { versionList = s3.listNextBatchOfVersions(versionList); } else { break; } } // After all objects and object versions are deleted, delete the bucket. try { s3.deleteBucket(bucketName); } catch (AmazonServiceException e) { e.printStackTrace(); } } /** * list buckets * @param s3 */ public static void listBuckets(AmazonS3 s3){ for(Bucket bucket:s3.listBuckets()){ System.out.println(bucket.getName()); } } /** * upload * @param s3 * @param bucketName * @param filePath */ public static void upload(AmazonS3 s3, String bucketName, String filePath){ File file = new File(filePath); String key = Paths.get(filePath).getFileName().toString(); try{ s3.putObject(bucketName, key, file); }catch (AmazonServiceException e){ System.err.println(e.getErrorMessage()); System.exit(1); } } /** * upload file to subFolder * @param s3 * @param bucketName * @param filePath */ public static void uploadToSubFolder(AmazonS3 s3, String bucketName, String bucketPath, String filePath){ File file = new File(filePath); try{ s3.putObject(bucketName, bucketPath, file); }catch (AmazonServiceException e){ System.err.println(e.getErrorMessage()); System.exit(1); } } /** * create empty folder * @param s3 * @param bucketName * @param filePath */ public static void createFolder(AmazonS3 s3, String bucketName, String filePath){ try{ s3.putObject(bucketName, filePath,""); }catch (AmazonServiceException e){ System.err.println(e.getErrorMessage()); System.exit(1); } } /** * getObject * @param s3 * @param bucketName * @param key */ public static void getObject(AmazonS3 s3, String bucketName, String key){ try{ S3ObjectInputStream s3stream = s3.getObject(bucketName, key).getObjectContent(); File filePath = new File(key); long fileLen = s3.getObject(bucketName, key).getObjectMetadata().getContentLength(); if(!filePath.exists()){ filePath.getParentFile().mkdirs(); filePath.createNewFile(); } FileOutputStream fs = new FileOutputStream(filePath); byte[] read_buf = new byte[1024]; int read_len = 0; long receiveLen = 0; while ((read_len = s3stream.read(read_buf)) > 0){ receiveLen += read_len; fs.write(read_buf, 0, read_len); System.out.println(receiveLen / fileLen * 100 + "%"); } s3stream.close(); fs.close(); System.out.println("Done!"); }catch (AmazonServiceException e){ System.err.println(e.getErrorMessage()); System.exit(1); }catch (FileNotFoundException e){ System.err.println("file" + e.getMessage()); System.exit(1); }catch (IOException e){ System.err.println(e.getMessage()); System.exit(1); } } /** * deleteObject * @param s3 * @param bucketName * @param key */ public static void deleteObject(AmazonS3 s3, String bucketName, String key){ try { s3.deleteObject(bucketName, key); System.out.println("Deleted!"); } catch (AmazonServiceException e){ System.err.println(e.getErrorMessage()); System.exit(1); } } /** * deleteObjects * @param s3 * @param bucketName * @param object_keys */ public static void deleteObjects(AmazonS3 s3, String bucketName, String[] object_keys){ for (String k : object_keys) { System.out.println(" * " + k); } try { s3.deleteObjects(new DeleteObjectsRequest(bucketName) .withKeys(object_keys)); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } System.out.println("Deleted!"); } }