【学习】单机flink1.12.2集成hadoop3.1.3
单机环境!!!单ip 如果 多ip可能有些默认为主机名的配置要注意,或者直接偷懒 /etc/hosts配置 当前主机名 ip 而不是hadoop ip
备注
主机名是 记得在/etc/hosts里添加 hadoop ip
环境变量
current_dir=/opt/bigdata #HADOOP_HOME export HDFS_NAMENODE_USER=root export HDFS_DATANODE_USER=root export HDFS_SECONDARYNAMENODE_USER=root export YARN_RESOURCEMANAGER_USER=root export YARN_NODEMANAGER_USER=root export HADOOP_HOME=${current_dir}/hadoop-3.1.3 export HADOOP_CLASSPATH=`hadoop classpath` export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native #JAVA_HOME export JAVA_HOME=${current_dir}/jdk1.8.0_333 #flink环境变量配置 export FLINK_HOME=${current_dir}/flink-1.12.2 export PATH=$PATH:$FLINK_HOME/bin #flink on yarn配置 export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop export HADOOP_CLASSPATH=`hadoop classpath` export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$FLINK_HOME/bin
core-site.xml
<?xml version="1.0"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>fs.default.name</name> <value>hdfs://hadoop:9000</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/opt/bigdata/hadoop-3.1.3/data</value> </property> <!-- 配置HDFS网页登录使用的静态用户为root --> <property> <name>hadoop.http.staticuser.user</name> <value>root</value> </property> <!-- 配置该root(superUser)允许通过代理访问的主机节点 --> <!-- hadoop.proxyuser.atguigu --> <property> <name>hadoop.proxyuser.root.hosts</name> <value>*</value> </property> <!-- 配置该root(superUser)允许通过代理用户所属组 --> <property> <name>hadoop.proxyuser.root.groups</name> <value>*</value> </property> <!-- 配置该root(superUser)允许通过代理的用户 --> <property> <name>hadoop.proxyuser.root.groups</name> <value>*</value> </property> </configuration>
hadoop-env.sh添加
JAVA_HOME=/opt/bigdata/jdk1.8.0_333
hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>dfs.replication</name> <value>1</value> </property> </configuration>
mapred-site.xml
<?xml version="1.0"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <!-- Put site-specific property overrides in this file. --> <configuration> <property> <name>mapred.job.tracker</name> <value>hadoop:9001</value> </property> <!-- 指定MapReduce程序运行在Yarn上 --> <property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <!-- 历史服务器端地址 --> <property> <name>mapreduce.jobhistory.address</name> <value>hadoop:10020</value> </property> <!-- 历史服务器web端地址 --> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>hadoop:19888</value> </property> </configuration>
workers
hadoop
yarn-site.xml
<?xml version="1.0"?> <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> <configuration> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <!-- 指定ResourceManager的地址--> <property> <name>yarn.resourcemanager.hostname</name> <value>hadoop</value> </property>
<!-- 指定NodeManager的地址 不然会用主机名 有时候会出现端口绑定问题 连接超时--> <property> <name>yarn.nodemanager.hostname</name> <value>hadoop</value> </property>
<!-- 环境变量的继承 --> <property> <name>yarn.nodemanager.env-whitelist</name> <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value> </property> <!-- yarn容器允许分配的最大最小内存 --> <property> <name>yarn.scheduler.minimum-allocation-mb</name> <value>512</value> </property> <property> <name>yarn.scheduler.maximum-allocation-mb</name> <value>4096</value> </property> <!-- yarn容器允许管理的物理内存大小 --> <property> <name>yarn.nodemanager.resource.memory-mb</name> <value>4096</value> </property> <!-- 关闭yarn对物理内存和虚拟内存的限制检查 --> <property> <name>yarn.nodemanager.pmem-check-enabled</name> <value>false</value> </property> <property> <name>yarn.nodemanager.vmem-check-enabled</name> <value>false</value> </property> <!-- 开启日志聚集功能 --> <property> <name>yarn.log-aggregation-enable</name> <value>true</value> </property> <!-- 设置日志聚集服务器地址 --> <property> <name>yarn.log.server.url</name> <value>http://hadoop:19888/jobhistory/logs</value> </property> <!-- 设置日志保留时间为7天 --> <property> <name>yarn.log-aggregation.retain-seconds</name> <value>604800</value> </property> </configuration>
命令
systemctl stop firewalld systemctl disable firewalld ssh-keygen ssh-copy-id localhost ssh-copy-id 192.168.146.133 vi /etc/hosts hdfs namenode -format start-all.sh yarn-session.sh
本文来自博客园,作者:一支小白,转载请注明原文链接:https://www.cnblogs.com/startnow/p/16537763.html