|NO.Z.00066|——————————|BigDataEnd|——|Hadoop&Flink.V05|——|Flink.v04|Flink SQL|Flink SQL输出表|输出到kafka|
一、FlinkSQL输出表:输出到Kafka
### --- 输出到kafka
~~~ # 往kafka上输出表
DataStreamSource<String> data = env.addSource(new SourceFunction<String>
() {
@Override
public void run(SourceContext<String> ctx) throws Exception {
int num = 0;
while (true) {
num++;
ctx.collect("name"+num);
Thread.sleep(1000);
}
}
@Override
public void cancel() {
}
});
Table name = tEnv.fromDataStream(data, $("name"));
ConnectTableDescriptor descriptor = tEnv.connect(
// declare the external system to connect to
new Kafka()
.version("universal")
.topic("animal")
.startFromEarliest()
.property("bootstrap.servers", "hdp-2:9092")
)
// declare a format for this system
.withFormat(
// new Json()
new Csv()
)
// declare the schema of the table
.withSchema(
new Schema()
// .field("rowtime", DataTypes.TIMESTAMP(3))
// .rowtime(new Rowtime()
// .timestampsFromField("timestamp")
// .watermarksPeriodicBounded(60000)
// )
// .field("user", DataTypes.BIGINT())
.field("message", DataTypes.STRING())
);
// create a table with given name
descriptor.createTemporaryTable("MyUserTable");
name.executeInsert("MyUserTable");
### --- 输出到mysql (了解)
CREATE TABLE MyUserTable (
...
) WITH (
'connector.type' = 'jdbc',
-- required: specify this table type is jdbc 'connector.url' = 'jdbc:mysql://localhost:3306/flink-test',
-- required: JDBCDB url 'connector.table' = 'jdbc_table_name',
-- required: jdbc table name
-- optional: the class name of the JDBC driver to use to connect to this URL.
-- If not set, it will automatically be derived from the URL.'connector.driver' = 'com.mysql.jdbc.Driver',
-- optional: jdbc user name and password 'connector.username' = 'name', 'connector.password' = 'password',
-- **followings are scan options, optional, used when reading from a table**
-- optional: SQL query / prepared statement.
-- If set, this will take precedence over the 'connector.table' setting 'connector.read.query' = 'SELECT * FROM sometable',
-- These options must all be specified if any of them is specified. Inaddition,
-- partition.num must be specified. They describe how to partition the tablewhen
-- reading in parallel from multiple tasks. partition.column must be anumeric,
-- date, or timestamp column from the table in question. Notice that lowerBound and
-- upperBound are just used to decide the partition stride, not for filtering the
-- rows in table. So all rows in the table will be partitioned and returned.'connector.read.partition.column' = 'column_name',
-- optional: the column name used for partitioning the input.'connector.read.partition.num' = '50',
-- optional: the number of partitions.'connector.read.partition.lower-bound' = '500',
-- optional: the smallest value of the first partition.'connector.read.partition.upper-bound' = '1000',
-- optional: the largest value of the last partition.
-- optional, Gives the reader a hint as to the number of rows that should be fetched
-- from the database when reading per round trip. If the value specified is zero, then
-- the hint is ignored. The default value is zero. 'connector.read.fetch-size' = '100',
-- **followings are lookup options, optional, used in temporary join**
-- optional, max number of rows of lookup cache, over this value, the oldest rows will
-- be eliminated. "cache.max-rows" and "cache.ttl" options must all be specified if any
-- of them is specified. Cache is not enabled as default.'connector.lookup.cache.max-rows' = '5000',
-- optional, the max time to live for each rows in lookup cache, over this time, the oldest rows
-- will be expired. "cache.max-rows" and "cache.ttl" options must all be specified if any of
-- them is specified. Cache is not enabled as default.'connector.lookup.cache.ttl' = '10s','connector.lookup.max-retries' = '3',
-- optional, max retry times if lookup database failed
-- **followings are sink options, optional, used when writing into table**
-- optional, flush max size (includes all append, upsert and delete records),
-- over this number of records, will flush data. The default value is "5000".'connector.write.flush.max-rows' = '5000',
-- optional, flush interval mills, over this time, asynchronous threads will flush data.
-- The default value is "0s", which means no asynchronous flush thread will be scheduled.'connector.write.flush.interval' = '2s',
-- optional, max retry times if writing records to database failed 'connector.write.max-retries' = '3'
)
二、编程代码实现
### --- 编程代码实现:输出表到kafka
package com.yanqi.tableql;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.Csv;
import org.apache.flink.table.descriptors.Kafka;
import org.apache.flink.table.descriptors.Schema;
import static org.apache.flink.table.api.Expressions.$;
public class ToKafka {
public static void main(String[] args) throws Exception {
//env
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//tEnv
EnvironmentSettings settings = EnvironmentSettings.newInstance()
.useBlinkPlanner()
.inStreamingMode()
// .inBatchMode()
.withBuiltInCatalogName("default_catalog")
.withBuiltInDatabaseName("default_database")
.build();
StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings);
//读数据
DataStreamSource<String> data = env.addSource(new SourceFunction<String>() {
@Override
public void run(SourceContext<String> ctx) throws Exception {
int num = 0;
while (true) {
num++;
ctx.collect("name" + num);
Thread.sleep(1000);
}
}
@Override
public void cancel() {
}
});
Table nameTable = tEnv.fromDataStream(data, $("name"));
tEnv.connect(
new Kafka()
.version("universal")
.topic("lucasone")
.startFromEarliest()
.property("bootstrap.servers","hadoop01:9092")
)
.withFormat(new Csv())
.withSchema(
new Schema().field("name", DataTypes.STRING())
)
.createTemporaryTable("animalTable");
nameTable.executeInsert("animalTable");
env.execute();
}
}
### --- 编译打印
~~~ # 启动kafka主题:启动消费者
[root@hadoop01 ~]# kafka-console-consumer.sh --bootstrap-server hadoop01:9092 --topic lucasone --from-beginning
name1
name2
name3
name4
name5
~~~省略部分参数
~~~ # 编译打印
D:\JAVA\jdk1.8.0_231\bin\java.exe "-javaagent:D:\IntelliJIDEA\IntelliJ IDEA 2019.3.3\lib\idea_rt.jar=55127:D:\IntelliJIDEA\IntelliJ IDEA 2019.3.3\bin" -Dfile.encoding=UTF-8 -classpath D:\JAVA\jdk1.8.0_231\jre\lib\charsets.jar;D:\JAVA\jdk1.8.0_231\jre\lib\deploy.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\access-bridge-64.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\cldrdata.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\dnsns.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\jaccess.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\jfxrt.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\localedata.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\nashorn.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunec.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunjce_provider.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunmscapi.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\sunpkcs11.jar;D:\JAVA\jdk1.8.0_231\jre\lib\ext\zipfs.jar;D:\JAVA\jdk1.8.0_231\jre\lib\javaws.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jce.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jfr.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jfxswt.jar;D:\JAVA\jdk1.8.0_231\jre\lib\jsse.jar;D:\JAVA\jdk1.8.0_231\jre\lib\management-agent.jar;D:\JAVA\jdk1.8.0_231\jre\lib\plugin.jar;D:\JAVA\jdk1.8.0_231\jre\lib\resources.jar;D:\JAVA\jdk1.8.0_231\jre\lib\rt.jar;E:\NO.Z.80000.Hadoop.spark\FirstFlink\target\classes;D:\JAVA\scala-2.12.2\lib\scala-library.jar;D:\JAVA\scala-2.12.2\lib\scala-reflect.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-java\1.11.1\flink-java-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-core\1.11.1\flink-core-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-annotations\1.11.1\flink-annotations-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-metrics-core\1.11.1\flink-metrics-core-1.11.1.jar;C:\Users\Administrator\.m2\repository\com\esotericsoftware\kryo\kryo\2.24.0\kryo-2.24.0.jar;C:\Users\Administrator\.m2\repository\com\esotericsoftware\minlog\minlog\1.2\minlog-1.2.jar;C:\Users\Administrator\.m2\repository\org\objenesis\objenesis\2.1\objenesis-2.1.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-lang3\3.3.2\commons-lang3-3.3.2.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-math3\3.5\commons-math3-3.5.jar;C:\Users\Administrator\.m2\repository\org\slf4j\slf4j-api\1.7.15\slf4j-api-1.7.15.jar;C:\Users\Administrator\.m2\repository\com\google\code\findbugs\jsr305\1.3.9\jsr305-1.3.9.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\force-shading\1.11.1\force-shading-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-streaming-java_2.12\1.11.1\flink-streaming-java_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-runtime_2.12\1.11.1\flink-runtime_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-queryable-state-client-java\1.11.1\flink-queryable-state-client-java-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-hadoop-fs\1.11.1\flink-hadoop-fs-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-netty\4.1.39.Final-11.0\flink-shaded-netty-4.1.39.Final-11.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-jackson\2.10.1-11.0\flink-shaded-jackson-2.10.1-11.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-zookeeper-3\3.4.14-11.0\flink-shaded-zookeeper-3-3.4.14-11.0.jar;C:\Users\Administrator\.m2\repository\org\javassist\javassist\3.24.0-GA\javassist-3.24.0-GA.jar;C:\Users\Administrator\.m2\repository\com\typesafe\akka\akka-actor_2.12\2.5.21\akka-actor_2.12-2.5.21.jar;C:\Users\Administrator\.m2\repository\com\typesafe\config\1.3.3\config-1.3.3.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\modules\scala-java8-compat_2.12\0.8.0\scala-java8-compat_2.12-0.8.0.jar;C:\Users\Administrator\.m2\repository\com\typesafe\akka\akka-stream_2.12\2.5.21\akka-stream_2.12-2.5.21.jar;C:\Users\Administrator\.m2\repository\org\reactivestreams\reactive-streams\1.0.2\reactive-streams-1.0.2.jar;C:\Users\Administrator\.m2\repository\com\typesafe\ssl-config-core_2.12\0.3.7\ssl-config-core_2.12-0.3.7.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\modules\scala-parser-combinators_2.12\1.1.1\scala-parser-combinators_2.12-1.1.1.jar;C:\Users\Administrator\.m2\repository\com\typesafe\akka\akka-protobuf_2.12\2.5.21\akka-protobuf_2.12-2.5.21.jar;C:\Users\Administrator\.m2\repository\com\typesafe\akka\akka-slf4j_2.12\2.5.21\akka-slf4j_2.12-2.5.21.jar;C:\Users\Administrator\.m2\repository\org\clapper\grizzled-slf4j_2.12\1.3.2\grizzled-slf4j_2.12-1.3.2.jar;C:\Users\Administrator\.m2\repository\com\github\scopt\scopt_2.12\3.5.0\scopt_2.12-3.5.0.jar;C:\Users\Administrator\.m2\repository\org\xerial\snappy\snappy-java\1.1.4\snappy-java-1.1.4.jar;C:\Users\Administrator\.m2\repository\com\twitter\chill_2.12\0.7.6\chill_2.12-0.7.6.jar;C:\Users\Administrator\.m2\repository\com\twitter\chill-java\0.7.6\chill-java-0.7.6.jar;C:\Users\Administrator\.m2\repository\org\lz4\lz4-java\1.6.0\lz4-java-1.6.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-guava\18.0-11.0\flink-shaded-guava-18.0-11.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-clients_2.12\1.11.1\flink-clients_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-optimizer_2.12\1.11.1\flink-optimizer_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\commons-cli\commons-cli\1.3.1\commons-cli-1.3.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-scala_2.12\1.11.1\flink-scala_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-shaded-asm-7\7.1-11.0\flink-shaded-asm-7-7.1-11.0.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\scala-reflect\2.12.7\scala-reflect-2.12.7.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\scala-library\2.12.7\scala-library-2.12.7.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\scala-compiler\2.12.7\scala-compiler-2.12.7.jar;C:\Users\Administrator\.m2\repository\org\scala-lang\modules\scala-xml_2.12\1.0.6\scala-xml_2.12-1.0.6.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-streaming-scala_2.12\1.11.1\flink-streaming-scala_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-hadoop-compatibility_2.11\1.11.1\flink-hadoop-compatibility_2.11-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-common\2.8.5\hadoop-common-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-annotations\2.8.5\hadoop-annotations-2.8.5.jar;C:\Users\Administrator\.m2\repository\com\google\guava\guava\11.0.2\guava-11.0.2.jar;C:\Users\Administrator\.m2\repository\xmlenc\xmlenc\0.52\xmlenc-0.52.jar;C:\Users\Administrator\.m2\repository\org\apache\httpcomponents\httpclient\4.5.2\httpclient-4.5.2.jar;C:\Users\Administrator\.m2\repository\org\apache\httpcomponents\httpcore\4.4.4\httpcore-4.4.4.jar;C:\Users\Administrator\.m2\repository\commons-codec\commons-codec\1.4\commons-codec-1.4.jar;C:\Users\Administrator\.m2\repository\commons-io\commons-io\2.4\commons-io-2.4.jar;C:\Users\Administrator\.m2\repository\commons-net\commons-net\3.1\commons-net-3.1.jar;C:\Users\Administrator\.m2\repository\commons-collections\commons-collections\3.2.2\commons-collections-3.2.2.jar;C:\Users\Administrator\.m2\repository\javax\servlet\servlet-api\2.5\servlet-api-2.5.jar;C:\Users\Administrator\.m2\repository\org\mortbay\jetty\jetty\6.1.26\jetty-6.1.26.jar;C:\Users\Administrator\.m2\repository\org\mortbay\jetty\jetty-util\6.1.26\jetty-util-6.1.26.jar;C:\Users\Administrator\.m2\repository\org\mortbay\jetty\jetty-sslengine\6.1.26\jetty-sslengine-6.1.26.jar;C:\Users\Administrator\.m2\repository\javax\servlet\jsp\jsp-api\2.1\jsp-api-2.1.jar;C:\Users\Administrator\.m2\repository\com\sun\jersey\jersey-core\1.9\jersey-core-1.9.jar;C:\Users\Administrator\.m2\repository\com\sun\jersey\jersey-json\1.9\jersey-json-1.9.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jettison\jettison\1.1\jettison-1.1.jar;C:\Users\Administrator\.m2\repository\com\sun\xml\bind\jaxb-impl\2.2.3-1\jaxb-impl-2.2.3-1.jar;C:\Users\Administrator\.m2\repository\javax\xml\bind\jaxb-api\2.2.2\jaxb-api-2.2.2.jar;C:\Users\Administrator\.m2\repository\javax\xml\stream\stax-api\1.0-2\stax-api-1.0-2.jar;C:\Users\Administrator\.m2\repository\javax\activation\activation\1.1\activation-1.1.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jackson\jackson-jaxrs\1.8.3\jackson-jaxrs-1.8.3.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jackson\jackson-xc\1.8.3\jackson-xc-1.8.3.jar;C:\Users\Administrator\.m2\repository\com\sun\jersey\jersey-server\1.9\jersey-server-1.9.jar;C:\Users\Administrator\.m2\repository\asm\asm\3.1\asm-3.1.jar;C:\Users\Administrator\.m2\repository\commons-logging\commons-logging\1.1.3\commons-logging-1.1.3.jar;C:\Users\Administrator\.m2\repository\log4j\log4j\1.2.17\log4j-1.2.17.jar;C:\Users\Administrator\.m2\repository\net\java\dev\jets3t\jets3t\0.9.0\jets3t-0.9.0.jar;C:\Users\Administrator\.m2\repository\com\jamesmurty\utils\java-xmlbuilder\0.4\java-xmlbuilder-0.4.jar;C:\Users\Administrator\.m2\repository\commons-lang\commons-lang\2.6\commons-lang-2.6.jar;C:\Users\Administrator\.m2\repository\commons-configuration\commons-configuration\1.6\commons-configuration-1.6.jar;C:\Users\Administrator\.m2\repository\commons-digester\commons-digester\1.8\commons-digester-1.8.jar;C:\Users\Administrator\.m2\repository\commons-beanutils\commons-beanutils\1.7.0\commons-beanutils-1.7.0.jar;C:\Users\Administrator\.m2\repository\commons-beanutils\commons-beanutils-core\1.8.0\commons-beanutils-core-1.8.0.jar;C:\Users\Administrator\.m2\repository\org\slf4j\slf4j-log4j12\1.7.10\slf4j-log4j12-1.7.10.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jackson\jackson-core-asl\1.9.13\jackson-core-asl-1.9.13.jar;C:\Users\Administrator\.m2\repository\org\codehaus\jackson\jackson-mapper-asl\1.9.13\jackson-mapper-asl-1.9.13.jar;C:\Users\Administrator\.m2\repository\org\apache\avro\avro\1.7.4\avro-1.7.4.jar;C:\Users\Administrator\.m2\repository\com\thoughtworks\paranamer\paranamer\2.3\paranamer-2.3.jar;C:\Users\Administrator\.m2\repository\com\google\protobuf\protobuf-java\2.5.0\protobuf-java-2.5.0.jar;C:\Users\Administrator\.m2\repository\com\google\code\gson\gson\2.2.4\gson-2.2.4.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-auth\2.8.5\hadoop-auth-2.8.5.jar;C:\Users\Administrator\.m2\repository\com\nimbusds\nimbus-jose-jwt\4.41.1\nimbus-jose-jwt-4.41.1.jar;C:\Users\Administrator\.m2\repository\com\github\stephenc\jcip\jcip-annotations\1.0-1\jcip-annotations-1.0-1.jar;C:\Users\Administrator\.m2\repository\net\minidev\json-smart\2.3\json-smart-2.3.jar;C:\Users\Administrator\.m2\repository\net\minidev\accessors-smart\1.2\accessors-smart-1.2.jar;C:\Users\Administrator\.m2\repository\org\ow2\asm\asm\5.0.4\asm-5.0.4.jar;C:\Users\Administrator\.m2\repository\org\apache\directory\server\apacheds-kerberos-codec\2.0.0-M15\apacheds-kerberos-codec-2.0.0-M15.jar;C:\Users\Administrator\.m2\repository\org\apache\directory\server\apacheds-i18n\2.0.0-M15\apacheds-i18n-2.0.0-M15.jar;C:\Users\Administrator\.m2\repository\org\apache\directory\api\api-asn1-api\1.0.0-M20\api-asn1-api-1.0.0-M20.jar;C:\Users\Administrator\.m2\repository\org\apache\directory\api\api-util\1.0.0-M20\api-util-1.0.0-M20.jar;C:\Users\Administrator\.m2\repository\org\apache\curator\curator-framework\2.7.1\curator-framework-2.7.1.jar;C:\Users\Administrator\.m2\repository\com\jcraft\jsch\0.1.54\jsch-0.1.54.jar;C:\Users\Administrator\.m2\repository\org\apache\curator\curator-client\2.7.1\curator-client-2.7.1.jar;C:\Users\Administrator\.m2\repository\org\apache\curator\curator-recipes\2.7.1\curator-recipes-2.7.1.jar;C:\Users\Administrator\.m2\repository\org\apache\htrace\htrace-core4\4.0.1-incubating\htrace-core4-4.0.1-incubating.jar;C:\Users\Administrator\.m2\repository\org\apache\zookeeper\zookeeper\3.4.6\zookeeper-3.4.6.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-compress\1.4.1\commons-compress-1.4.1.jar;C:\Users\Administrator\.m2\repository\org\tukaani\xz\1.0\xz-1.0.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-hdfs\2.8.5\hadoop-hdfs-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-hdfs-client\2.8.5\hadoop-hdfs-client-2.8.5.jar;C:\Users\Administrator\.m2\repository\com\squareup\okhttp\okhttp\2.4.0\okhttp-2.4.0.jar;C:\Users\Administrator\.m2\repository\com\squareup\okio\okio\1.4.0\okio-1.4.0.jar;C:\Users\Administrator\.m2\repository\commons-daemon\commons-daemon\1.0.13\commons-daemon-1.0.13.jar;C:\Users\Administrator\.m2\repository\io\netty\netty\3.6.2.Final\netty-3.6.2.Final.jar;C:\Users\Administrator\.m2\repository\io\netty\netty-all\4.0.23.Final\netty-all-4.0.23.Final.jar;C:\Users\Administrator\.m2\repository\xerces\xercesImpl\2.9.1\xercesImpl-2.9.1.jar;C:\Users\Administrator\.m2\repository\xml-apis\xml-apis\1.3.04\xml-apis-1.3.04.jar;C:\Users\Administrator\.m2\repository\org\fusesource\leveldbjni\leveldbjni-all\1.8\leveldbjni-all-1.8.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-client\2.8.5\hadoop-client-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-app\2.8.5\hadoop-mapreduce-client-app-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-common\2.8.5\hadoop-mapreduce-client-common-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-yarn-client\2.8.5\hadoop-yarn-client-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-yarn-server-common\2.8.5\hadoop-yarn-server-common-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-shuffle\2.8.5\hadoop-mapreduce-client-shuffle-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-yarn-api\2.8.5\hadoop-yarn-api-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-core\2.8.5\hadoop-mapreduce-client-core-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-yarn-common\2.8.5\hadoop-yarn-common-2.8.5.jar;C:\Users\Administrator\.m2\repository\com\sun\jersey\jersey-client\1.9\jersey-client-1.9.jar;C:\Users\Administrator\.m2\repository\org\apache\hadoop\hadoop-mapreduce-client-jobclient\2.8.5\hadoop-mapreduce-client-jobclient-2.8.5.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-connector-kafka_2.11\1.11.1\flink-connector-kafka_2.11-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-connector-kafka-base_2.11\1.11.1\flink-connector-kafka-base_2.11-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\kafka\kafka-clients\2.4.1\kafka-clients-2.4.1.jar;C:\Users\Administrator\.m2\repository\com\github\luben\zstd-jni\1.4.3-1\zstd-jni-1.4.3-1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-connector-redis_2.11\1.1.5\flink-connector-redis_2.11-1.1.5.jar;C:\Users\Administrator\.m2\repository\redis\clients\jedis\2.8.0\jedis-2.8.0.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-pool2\2.3\commons-pool2-2.3.jar;C:\Users\Administrator\.m2\repository\mysql\mysql-connector-java\8.0.21\mysql-connector-java-8.0.21.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-api-java-bridge_2.12\1.11.1\flink-table-api-java-bridge_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-api-java\1.11.1\flink-table-api-java-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-streaming-java_2.12\1.11.1\flink-streaming-java_2.12-1.11.1-tests.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-api-scala-bridge_2.12\1.11.1\flink-table-api-scala-bridge_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-api-scala_2.12\1.11.1\flink-table-api-scala_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-planner-blink_2.12\1.11.1\flink-table-planner-blink_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-common\1.11.1\flink-table-common-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-table-runtime-blink_2.12\1.11.1\flink-table-runtime-blink_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\codehaus\janino\janino\3.0.9\janino-3.0.9.jar;C:\Users\Administrator\.m2\repository\org\codehaus\janino\commons-compiler\3.0.9\commons-compiler-3.0.9.jar;C:\Users\Administrator\.m2\repository\org\apache\calcite\avatica\avatica-core\1.16.0\avatica-core-1.16.0.jar;C:\Users\Administrator\.m2\repository\org\reflections\reflections\0.9.10\reflections-0.9.10.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-cep_2.12\1.11.1\flink-cep_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-json\1.11.1\flink-json-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-csv\1.11.1\flink-csv-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-orc_2.12\1.11.1\flink-orc_2.12-1.11.1.jar;C:\Users\Administrator\.m2\repository\org\apache\orc\orc-core\1.5.6\orc-core-1.5.6.jar;C:\Users\Administrator\.m2\repository\org\apache\orc\orc-shims\1.5.6\orc-shims-1.5.6.jar;C:\Users\Administrator\.m2\repository\io\airlift\aircompressor\0.10\aircompressor-0.10.jar;C:\Users\Administrator\.m2\repository\org\apache\hive\hive-storage-api\2.6.0\hive-storage-api-2.6.0.jar;C:\Users\Administrator\.m2\repository\org\apache\flink\flink-hbase_2.12\1.10.2\flink-hbase_2.12-1.10.2.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-server\1.4.3\hbase-server-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-common\1.4.3\hbase-common-1.4.3.jar;C:\Users\Administrator\.m2\repository\com\github\stephenc\findbugs\findbugs-annotations\1.3.9-1\findbugs-annotations-1.3.9-1.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-protocol\1.4.3\hbase-protocol-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-procedure\1.4.3\hbase-procedure-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-common\1.4.3\hbase-common-1.4.3-tests.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-client\1.4.3\hbase-client-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-prefix-tree\1.4.3\hbase-prefix-tree-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-metrics-api\1.4.3\hbase-metrics-api-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-metrics\1.4.3\hbase-metrics-1.4.3.jar;C:\Users\Administrator\.m2\repository\io\dropwizard\metrics\metrics-core\3.1.2\metrics-core-3.1.2.jar;C:\Users\Administrator\.m2\repository\commons-httpclient\commons-httpclient\3.1\commons-httpclient-3.1.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-hadoop-compat\1.4.3\hbase-hadoop-compat-1.4.3.jar;C:\Users\Administrator\.m2\repository\org\apache\hbase\hbase-hadoop2-compat\1.4.3\hbase-hadoop2-compat-1.4.3.jar;C:\Users\Administrator\.m2\repository\com\yammer\metrics\metrics-core\2.2.0\metrics-core-2.2.0.jar;C:\Users\Administrator\.m2\repository\org\apache\commons\commons-math\2.2\commons-math-2.2.jar;C:\Users\Administrator\.m2\repository\org\apache\htrace\htrace-core\3.1.0-incubating\htrace-core-3.1.0-incubating.jar;C:\Users\Administrator\.m2\repository\com\lmax\disruptor\3.3.0\disruptor-3.3.0.jar;C:\Users\Administrator\.m2\repository\junit\junit\4.12\junit-4.12.jar;C:\Users\Administrator\.m2\repository\org\hamcrest\hamcrest-core\1.3\hamcrest-core-1.3.jar;C:\Users\Administrator\.m2\repository\org\postgresql\postgresql\42.2.16\postgresql-42.2.16.jar;C:\Users\Administrator\.m2\repository\org\checkerframework\checker-qual\3.5.0\checker-qual-3.5.0.jar;C:\Users\Administrator\.m2\repository\com\github\housepower\clickhouse-native-jdbc\1.6-stable\clickhouse-native-jdbc-1.6-stable.jar;C:\Users\Administrator\.m2\repository\net\jpountz\lz4\lz4\1.3.0\lz4-1.3.0.jar;C:\Users\Administrator\.m2\repository\joda-time\joda-time\2.9.9\joda-time-2.9.9.jar;C:\Users\Administrator\.m2\repository\org\apache\kudu\kudu-client\1.5.0\kudu-client-1.5.0.jar;C:\Users\Administrator\.m2\repository\com\stumbleupon\async\1.4.1\async-1.4.1.jar;C:\Users\Administrator\.m2\repository\org\apache\yetus\audience-annotations\0.4.0\audience-annotations-0.4.0.jar com.yanqi.tableql.ToKafka
Walter Savage Landor:strove with none,for none was worth my strife.Nature I loved and, next to Nature, Art:I warm'd both hands before the fire of life.It sinks, and I am ready to depart
——W.S.Landor
分类:
bdv020-flink
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 全程不用写代码,我用AI程序员写了一个飞机大战
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· 记一次.NET内存居高不下排查解决与启示
· 白话解读 Dapr 1.15:你的「微服务管家」又秀新绝活了
· DeepSeek 开源周回顾「GitHub 热点速览」