hadoop安装
vi /etc/sysconfig/network-scripts/ifcfg-ens33
``
IPADDR=192.168.182.8
``
vi /etc/hosts
``
192.168.182.8 hd1
192.168.182.9 hd2
192.168.182.10 hd3
``
vi /etc/hostname
``
hd1
``
ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa # 生成密钥对
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys # 将公钥写入authorized_keys文件
reboot # 重启
mkdir /usr/local/hadoop/ # 创建hadoop文件夹
cd /usr/local/hadoop/ # 进入hadoop文件夹
wget https://mirror.bit.edu.cn/apache/hadoop/common/hadoop-2.9.2/hadoop-2.9.2.tar.gz # 下载hadoop
tar -zvxf hadoop-2.9.2.tar.gz # 解压到当前目录
vi /etc/profile
``
export JAVA_HOME=/home/fleam/jdk1.8.0_191
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$CLASSPATH
export JAVA_PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.9.2
export PATH=$PATH:${JAVA_PATH}:/home/mongodb/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
``
source /etc/profile
hadoop version # 检查配置
echo $JAVA_HOME # /home/fleam/jdk1.8.0_191
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/hadoop-env.sh
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-env.sh
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/yarn-env.sh
``
export JAVA_HOME=/home/fleam/jdk1.8.0_191
``
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/hdfs-site.xml
``
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>/usr/data/hadoop/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/usr/data/hadoop/datanode</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/usr/data/hadoop/journalnode</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
<value>false</value>
</property>
</configuration>
``
mkdir -p /usr/data/hadoop/namenode
mkdir -p /usr/data/hadoop/datanode
mkdir -p /usr/data/hadoop/journalnode
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/core-site.xml
``
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.182.8:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/data/hadoop/tmp</value>
</property>
</configuration>
``
cp /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-site.xml.template /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-site.xml
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-site.xml
``
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
``
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/yarn-site.xml
``
<configuration>
<property>
<name>yarn.resourcemanager.address</name>
<value>192.168.182.8:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>192.168.182.8:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>192.168.182.8:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>192.168.182.8:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>192.168.182.8:8088</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>
``
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/slaves
``
192.168.182.8
192.168.182.9
192.168.182.10
``
# 克隆出 hd2 hd3
vi /etc/sysconfig/network-scripts/ifcfg-ens33
``
IPADDR=192.168.182.9
IPADDR=192.168.182.10
``
reboot
ssh root@192.168.182.9 # 测试免密登录
ssh root@192.168.182.10 # 测试免密登录
vi /etc/hostname
``
hd2
hd3
``
# 登录hd1
hadoop namenode –format # 用root账户格式化namenode
start-dfs.sh # 启动hdfs
start-yarn.sh # 启动yarn
start-all.sh # 启动全部
jps # hd1校验
##
3408 ResourceManager
3235 SecondaryNameNode
3996 Jps
2973 NameNode
3501 NodeManager
3070 DataNode
##
jps # hd2、hd3校验
##
1797 Jps
1638 NodeManager
1532 DataNode
##
# 重启格式化 hd1、hd2、hd3
rm -rf /usr/data/hadoop/tmp
rm -rf /usr/data/hadoop/namenode
rm -rf /usr/data/hadoop/datanode
rm -rf /usr/data/hadoop/journalnode
rm -rf /usr/local/hadoop/hadoop-2.9.2/logs/*
mkdir -p /usr/data/hadoop/namenode
mkdir -p /usr/data/hadoop/datanode
mkdir -p /usr/data/hadoop/journalnode
# hd1
hdfs namenode -format
start-all.sh
# 排错
stop-all.sh # 停止
cd /usr/local/hadoop/hadoop-2.9.2/logs # 日志
# 检查端口是否被占用
netstat -tunlp|grep 9000 # 查看端口占用
lsof -i:9000 # 查看9000端口进程
ps -ef | grep clickhouse # clickhouse开了9000端口
zookeeper安装
wget http://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
tar -zxvf zookeeper-3.4.14.tar.gz
mv zookeeper-3.4.14 /home/bigData
vi /etc/profile
``
export ZOOKEEPER_HOME=/home/bigData/zookeeper-3.4.14
export PATH=$PATH:${JAVA_PATH}:/home/mongodb/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf
``
source /etc/profile
cp /home/bigData/zookeeper-3.4.14/conf/zoo_sample.cfg /home/bigData/zookeeper-3.4.14/conf/zoo.cfg
vim /home/bigData/zookeeper-3.4.14/conf/zoo.cfg
``
dataDir=/usr/data/zookeeper/data
dataLogDir=/usr/data/zookeeper/logs
server.1=192.168.182.8:2888:3888
server.2=192.168.182.9:2888:3888
server.3=192.168.182.10:2888:3888
``
mkdir -p /usr/data/zookeeper/data
mkdir -p /usr/data/zookeeper/logs
cd /usr/data/zookeeper/data
touch myid
vi myid
``
1
2
3
``
zkServer.sh start
zkServer.sh status
hbase安装
wget https://mirror.bit.edu.cn/apache/hbase/1.4.13/hbase-1.4.13-bin.tar.gz
tar -zxvf hbase-1.4.13-bin.tar.gz
yum install -y ntpdate
mkdir /usr/local/hbase
mv hbase-1.4.13 /usr/local/hbase/
vi /etc/profile
``
export HBASE_HOME=/usr/local/hbase/hbase-1.4.13
:$HBASE_HOME/bin
``
source /etc/profile
vi /usr/local/hbase/hbase-1.4.13/conf/hbase-env.sh
``
export JAVA_HOME=/home/fleam/jdk1.8.0_191
export HBASE_MANAGES_ZK=false
export HBASE_CLASSPATH=$HBASE_CLASSPATH:/usr/local/hbase/hbase-1.4.13/conf:/usr/local/hbase/hbase-1.4.13/lib:/usr/local/hadoop/hadoop-2.9.2/etc/hadoop/
``
vi /usr/local/hbase/hbase-1.4.13/conf/hbase-site.xml
``
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://192.168.182.8:8020/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/usr/data/hbase/tmp</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>192.168.182.8,192.168.182.9,192.168.182.10</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
</property>
</configuration>
``
mkdir -p /usr/data/hbase/tmp
vi /usr/local/hbase/hbase-1.4.13/conf/regionservers
``
192.168.182.8
192.168.182.9
192.168.182.10
``
scp -r /usr/local/hbase/hbase-1.4.13/ 192.168.182.9:/usr/local/hbase/hbase-1.4.13/
scp -r /usr/local/hbase/hbase-1.4.13/ 192.168.182.10:/usr/local/hbase/hbase-1.4.13/
vi /etc/profile
``
export HBASE_HOME=/usr/local/hbase/hbase-1.4.13
:$HBASE_HOME/bin
``
source /etc/profile
rm -rf /usr/data/hadoop/tmp
rm -rf /usr/data/hadoop/namenode
rm -rf /usr/data/hadoop/datanode
rm -rf /usr/data/hadoop/journalnode
rm -rf /usr/local/hadoop/hadoop-2.9.2/logs/*
rm -rf /usr/local/hbase/hbase-1.4.13/logs/*
rm -rf /usr/data/hbase/tmp
mkdir -p /usr/data/hadoop/namenode
mkdir -p /usr/data/hadoop/datanode
mkdir -p /usr/data/hadoop/journalnode
mkdir -p /usr/data/hbase/tmp
zkServer.sh start
zkServer.sh status
zkCli.sh -server hd1
rmr /hbase
hdfs namenode -format
start-all.sh
start-hbase.sh
curl http://192.168.182.8:16010
mysql安装
rpm -qa|grep mariadb
rpm -e mariadb-libs-5.5.65-1.el7.x86_64
wget http://dev.mysql.com/get/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
yum install mysql-community-server
systemctl restart mysqld.service
mysql -u root
set password for 'root'@'localhost' =password('root');
hive安装
wget https://mirrors.tuna.tsinghua.edu.cn/apache/hive/hive-2.3.7/apache-hive-2.3.7-bin.tar.gz
tar -zxvf apache-hive-2.3.7-bin.tar.gz
mkdir -p /usr/local/hive/
mv apache-hive-2.3.7-bin /usr/local/hive/
vi /etc/profile
``
export HIVE_HOME=/usr/local/hive/apache-hive-2.3.7-bin
export PATH=$PATH:$HIVE_HOME/bin
``
source /etc/profile
hive --version
cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-env.sh.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-env.sh
vi /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-env.sh
``
HADOOP_HOME=/usr/local/hadoop/hadoop-2.9.2 #hadoop路径
export HIVE_CONF_DIR=/usr/local/hive/apache-hive-2.3.7-bin/conf #hive的conf路径
export HIVE_AUX_JARS_PATH=/usr/local/hive/apache-hive-2.3.7-bin/lib #hive的jar包路径
export JAVA_HOME=/home/fleam/jdk1.8.0_191 #jdk安装路径
``
cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-default.xml.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-site.xml
vi /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-site.xml
``
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/opt/hive/warehouse</value>
</property>
<property>
<name>hive.metastore.local</name>
<value>true</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://localhost:3306/hive_db?createDatabaseIfNotExist=true</value> //数据库所在主机的IP
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
</property>
``
# 凡有derby皆注释
# hive.querylog.location => /usr/hive/tmp/root
# hive.server2.logging.operation.log.location => /home/hive/root/operation_logs
# hive.exec.local.scratchdir => /home/hive/root
# hive.downloaded.resources.dir => /home/hive/${hive.session.id}_resources
mkdir -p /usr/hive/tmp/root
mkdir -p /home/hive/root/operation_logs
cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-exec-log4j2.properties.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-exec-log4j2.properties
cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-log4j2.properties.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-log4j2.properties
wget https://cdn.mysql.com/archives/mysql-connector-java-5.1/mysql-connector-java-5.1.48.tar.gz
tar mysql-connector-java-5.1.48.tar.gz
cp /home/mysql-connector-java-5.1.48/mysql-connector-java-5.1.48-bin.jar /usr/local/hive/apache-hive-2.3.7-bin/lib
zkServer.sh start
zkServer.sh status
hadoop-daemon.sh start journalnode
start-all.sh
start-hbase.sh
schematool -initSchema -dbType mysql
##
schemaTool completed
##
hive
kylin安装
wget https://mirror.bit.edu.cn/apache/kylin/apache-kylin-3.1.1/apache-kylin-3.1.1-bin-hbase1x.tar.gz
mkdir -p /usr/local/kylin/
tar -zxvf apache-kylin-3.1.1-bin-hbase1x.tar.gz -C /usr/local/kylin/
vi /etc/profile
``
export KYLIN_HOME=/usr/local/kylin/apache-kylin-3.1.1-bin-hbase1x
export PATH=$PATH:$KYLIN_HOME/bin
``
source /etc/profile
sh $KYLIN_HOME/bin/check-env.sh
1、所有节点
zkServer.sh start
zkServer.sh status
hadoop-daemon.sh start journalnode
2、主节点
start-all.sh
start-hbase.sh
nohup hive --service metastore &
nohup hive --service hiveserver2 &
mr-jobhistory-daemon.sh start historyserver
kylin.sh start
http://192.168.182.8:7070/kylin
默认用户名:ADMIN
默认密码:KYLIN
hdfs
hadoop fs -ls /
hadoop dfs -mkdir /input
hadoop fs -put 1.txt /input
hive
beeline
!connect jdbc:hive2://127.0.0.1:10000
hbase
.\hbase shell
list
scan table
disable table
drop table
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】凌霞软件回馈社区,博客园 & 1Panel & Halo 联合会员上线
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】博客园社区专享云产品让利特惠,阿里云新客6.5折上折
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 开发中对象命名的一点思考
· .NET Core内存结构体系(Windows环境)底层原理浅谈
· C# 深度学习:对抗生成网络(GAN)训练头像生成模型
· .NET 适配 HarmonyOS 进展
· .NET 进程 stackoverflow异常后,还可以接收 TCP 连接请求吗?
· 本地部署 DeepSeek:小白也能轻松搞定!
· 基于DeepSeek R1 满血版大模型的个人知识库,回答都源自对你专属文件的深度学习。
· 在缓慢中沉淀,在挑战中重生!2024个人总结!
· 大人,时代变了! 赶快把自有业务的本地AI“模型”训练起来!
· Tinyfox 简易教程-1:Hello World!
2016-10-28 侧边栏
2015-10-28 php基本语法之逻辑运算符