1.通过命令监控大数据平台运行状态
1.1.实验任务一:通过命令查看大数据平台状态
1.1. 步骤一:查看 Linux 系统的信息(uname -a)
[root@master ~]
Linux master 3.10 .0 -862. el7.x86_64
1.2. 步骤二:查看硬盘信息
[root@master ~]
Disk /dev/sda: 21.5 GB, 21474836480 bytes , 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type : dos
Disk identifier: 0x00098617
Device Boot Start End Blocks Id System
/dev/sda1 * 2048 2099199 1048576 83 Linux
/dev/sda2 2099200 41943039 19921920 8e Linux LVM
Disk /dev/mapper/centos-root: 18.2 GB, 18249416704 bytes , 35643392 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mapper/centos-swap: 2147 MB, 2147483648 bytes , 4194304 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
[root@master ~]
Filename Type Size Used Priority
/dev/dm-1 partition 2097148 0 -1
[root@master ~]
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/centos-root 19G 4.9 G 14G 27 % /
devtmpfs 1.1 G 0 1.1 G 0 % /dev
tmpfs 1.1 G 0 1.1 G 0 % /dev/shm
tmpfs 1.1 G 10M 1.1 G 1 % /run
tmpfs 1.1 G 0 1.1 G 0 % /sys/fs/cgroup
/dev/sda1 1.1 G 136M 928M 13 % /boot
tmpfs 208M 0 208M 0 % /run/user/0
1.3. 步骤三:查看网络 IP 地址(ifconfig)
[root@master ~]
ens33: flags=4163 <UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168 .100 .10 netmask 255.255 .255 .0 broadcast 192.168 .100 .255
inet6 fe80::38e1 :7d60:659f:1935 prefixlen 64 scopeid 0x20 <link>
ether 00 :0c:29 :77 :a4:59 txqueuelen 1000 (Ethernet)
RX packets 473 bytes 40928 (39.9 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 216 bytes 23403 (22.8 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73 <UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0 .0 .1 netmask 255.0 .0 .0
inet6 ::1 prefixlen 128 scopeid 0x10 <host>
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
1.4. 步骤四:查看所有监听端口(netstat -lntp)
[root@master ~]
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0 .0 .0 :22 0.0 .0 .0 :* LISTEN 921 /sshd
tcp6 0 0 :::3306 :::* LISTEN 957 /mysqld
tcp6 0 0 :::22 :::* LISTEN 921 /sshd
1.5. 步骤五:查看所有已经建立的连接(netstat -antp)
[root@master ~]
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0 .0 .0 :22 0.0 .0 .0 :* LISTEN 921 /sshd
tcp 0 52 192.168 .100 .10 :22 192.168 .100 .1 :2099 ESTABLISHED 1257 /sshd: root@pts
tcp6 0 0 :::3306 :::* LISTEN 957 /mysqld
tcp6 0 0 :::22 :::* LISTEN 921 /sshd
1.6. 步骤六:实时显示进程状态(top),该命令可以查看进程对 CPU、内存的占比等。
[root@master ~]
top - 22 :22 :34 up 17 min , 2 users, load average: 0.05 , 0.04 , 0.05
Tasks: 99 total, 1 running, 98 sleeping, 0 stopped, 0 zombie
%Cpu(s): 0.0 us, 0.0 sy, 0.0 ni,100.0 id , 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 2030172 total, 1545544 free, 287624 used, 197004 buff/cache
KiB Swap: 2097148 total, 2097148 free, 0 used. 1555692 avail Mem
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1 root 20 0 127964 6460 4080 S 0.0 0.3 0 :01.10 systemd
2 root 20 0 0 0 0 S 0.0 0.0 0 :00.00 kthreadd
3 root 20 0 0 0 0 S 0.0 0.0 0 :00.01 ksoftirqd/0
5 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 kworker/0 :0H
7 root rt 0 0 0 0 S 0.0 0.0 0 :00.02 migration/0
8 root 20 0 0 0 0 S 0.0 0.0 0 :00.00 rcu_bh
9 root 20 0 0 0 0 S 0.0 0.0 0 :00.21 rcu_sched
10 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 lru-add-drain
11 root rt 0 0 0 0 S 0.0 0.0 0 :00.00 watchdog/0
12 root rt 0 0 0 0 S 0.0 0.0 0 :00.00 watchdog/1
13 root rt 0 0 0 0 S 0.0 0.0 0 :00.00 migration/1
14 root 20 0 0 0 0 S 0.0 0.0 0 :00.03 ksoftirqd/1
15 root 20 0 0 0 0 S 0.0 0.0 0 :00.00 kworker/1 :0
16 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 kworker/1 :0H
18 root 20 0 0 0 0 S 0.0 0.0 0 :00.00 kdevtmpfs
19 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 netns
20 root 20 0 0 0 0 S 0.0 0.0 0 :00.00 khungtaskd
21 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 writeback
22 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 kintegrityd
23 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 bioset
24 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 kblockd
25 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 md
26 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 edac-poller
27 root 20 0 0 0 0 S 0.0 0.0 0 :00.27 kworker/0 :1
32 root 20 0 0 0 0 S 0.0 0.0 0 :00.00 kswapd0
33 root 25 5 0 0 0 S 0.0 0.0 0 :00.00 ksmd
34 root 39 19 0 0 0 S 0.0 0.0 0 :00.26 khugepaged
35 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 crypto
43 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 kthrotld
44 root 20 0 0 0 0 S 0.0 0.0 0 :00.06 kworker/u256:1
45 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 kmpath_rdacd
46 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 kaluad
48 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 kpsmoused
49 root 20 0 0 0 0 S 0.0 0.0 0 :00.47 kworker/0 :2
50 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 ipv6_addrconf
63 root 0 -20 0 0 0 S 0.0 0.0 0 :00.00 deferwq
95 root 20 0 0 0 0 S 0.0 0.0 0 :00.00 kauditd
1.7. 步骤七:查看 CPU 信息( cat /proc/cpuinfo)
[root@master ~]
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 158
model name : Intel(R) Core(TM) i5-9500 CPU @ 3.00 GHz
stepping : 10
microcode : 0xca
cpu MHz : 3000.001
cache size : 9216 KB
physical id : 0
siblings : 1
core id : 0
cpu cores : 1
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 22
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon nopl xtopology tsc_reliable nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch fsgsbase tsc_adjust bmi1 avx2 smep bmi2 invpcid mpx rdseed adx smap clflushopt xsaveopt xsavec ibpb ibrs stibp arat spec_ctrl intel_stibp arch_capabilities
bogomips : 6000.00
clflush size : 64
cache_alignment : 64
address sizes : 43 bits physical, 48 bits virtual
power management:
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 158
model name : Intel(R) Core(TM) i5-9500 CPU @ 3.00 GHz
stepping : 10
microcode : 0xca
cpu MHz : 3000.001
cache size : 9216 KB
physical id : 2
siblings : 1
core id : 0
cpu cores : 1
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 22
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon nopl xtopology tsc_reliable nonstop_tsc eagerfpu pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch fsgsbase tsc_adjust bmi1 avx2 smep bmi2 invpcid mpx rdseed adx smap clflushopt xsaveopt xsavec ibpb ibrs stibp arat spec_ctrl intel_stibp arch_capabilities
bogomips : 6000.00
clflush size : 64
cache_alignment : 64
address sizes : 43 bits physical, 48 bits virtual
power management:
1.8. 步骤八:查看内存信息( cat /proc/meminfo),该命令可以查看总内存、空 闲内存等信息。
[root@master ~]
MemTotal: 2030172 kB
MemFree: 1545816 kB
MemAvailable: 1555964 kB
Buffers: 2112 kB
Cached: 139780 kB
SwapCached: 0 kB
Active: 252236 kB
Inactive: 119600 kB
Active(anon): 230408 kB
Inactive(anon): 9268 kB
Active(file): 21828 kB
Inactive(file): 110332 kB
Unevictable: 0 kB
Mlocked: 0 kB
SwapTotal: 2097148 kB
SwapFree: 2097148 kB
Dirty: 0 kB
Writeback: 0 kB
AnonPages: 229984 kB
Mapped: 29948 kB
Shmem: 9732 kB
Slab: 55112 kB
SReclaimable: 19452 kB
SUnreclaim: 35660 kB
KernelStack: 4272 kB
PageTables: 4056 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
CommitLimit: 3112232 kB
Committed_AS: 794432 kB
VmallocTotal: 34359738367 kB
VmallocUsed: 180228 kB
VmallocChunk: 34359310332 kB
HardwareCorrupted: 0 kB
AnonHugePages: 184320 kB
CmaTotal: 0 kB
CmaFree: 0 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB
DirectMap4k: 65408 kB
DirectMap2M: 2031616 kB
DirectMap1G: 0 kB
2.1实验任务二: 通过命令查看 Hadoop 状态
2.1. 步骤一:切换到 hadoop 用户
[root@master ~ ]# su - hadoop
Last login: Tue Apr 18 22 :30 :53 EDT 2023 on pts/0
2.2. 步骤二:切换到 Hadoop 的安装目录
[hadoop@master ~]$ cd /usr/local/src/hadoop/
2.3. 步骤三:启动 Hadoop
[hadoop@master hadoop]$ start-all .sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master]
master: starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
slave2: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
slave1: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
192.168 .10 .20 : ssh: connect to host 192.168 .10 .20 port 22 : Connection refused
192.168 .10 .30 : ssh: connect to host 192.168 .10 .30 port 22 : Connection refused
Starting secondary namenodes [0.0 .0 .0 ]
0.0 .0 .0 : starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-resourcemanager-master.out
slave1: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave1.out
slave2: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave2.out
192.168 .10 .20 : ssh: connect to host 192.168 .10 .20 port 22 : Connection refused
192.168 .10 .30 : ssh: connect to host 192.168 .10 .30 port 22 : Connection refused
[hadoop@master hadoop]$ jps
2065 Jps
1641 SecondaryNameNode
1802 ResourceManager
1437 NameNode
2.4. 步骤四:关闭 Hadoop
[hadoop@master hadoop]$ stop-all .sh
This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh
Stopping namenodes on [master]
master: stopping namenode
slave2: stopping datanode
slave1: stopping datanode
192.168 .10 .20 : ssh: connect to host 192.168 .10 .20 port 22 : Connection refused
192.168 .10 .30 : ssh: connect to host 192.168 .10 .30 port 22 : Connection refused
Stopping secondary namenodes [0.0 .0 .0 ]
0.0 .0 .0 : stopping secondarynamenode
stopping yarn daemons
stopping resourcemanager
slave2: stopping nodemanager
slave1: stopping nodemanager
192.168 .10 .20 : ssh: connect to host 192.168 .10 .20 port 22 : Connection refused
192.168 .10 .30 : ssh: connect to host 192.168 .10 .30 port 22 : Connection refused
no proxyserver to stop
[hadoop@master hadoop]$ jps
2508 Jps
2.实验二 通过命令监控大数据平台资 源状态
1.1实验任务一:通过命令查看 YARN 状态
1.1. 步骤一:确认切换到目录 /usr/local/src/hadoop
[hadoop@master hadoop]$ cd /usr/local/src/hadoop
1.2. 步骤二:返回主机界面在 Master 主机上执行 start-all.sh
[hadoop@master hadoop]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin /../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@slave1 hadoop]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin /../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@slave2 hadoop]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin /../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@master hadoop]$ start-all .sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master]
master: starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
slave2: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
slave1: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
192.168 .10 .30 : ssh: connect to host 192.168 .10 .30 port 22 : Connection refused
192.168 .10 .20 : ssh: connect to host 192.168 .10 .20 port 22 : Connection refused
Starting secondary namenodes [0.0 .0 .0 ]
0.0 .0 .0 : starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-resourcemanager-master.out
slave2: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave2.out
slave1: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave1.out
192.168 .10 .20 : ssh: connect to host 192.168 .10 .20 port 22 : Connection refused
192.168 .10 .30 : ssh: connect to host 192.168 .10 .30 port 22 : Connection refused
[hadoop@master hadoop]$ jps
2689 NameNode
2531 QuorumPeerMain
3315 Jps
2890 SecondaryNameNode
3050 ResourceManager
1.3. 步 骤 三 : 执 行 JPS 命 令 , 发 现 Master 上 有 NodeManager 进程和 ResourceManager 进程,则 YARN 启动完成。
[hadoop@master hadoop]$ jps
2689 NameNode
2531 QuorumPeerMain
3315 Jps
2890 SecondaryNameNode
3050 ResourceManager
2.实验任务二:通过命令查看 HDFS 状态
2.1. 步骤一:目录操作
切换到 hadoop 目录,执行 cd /usr/local/src/hadoop 命令
[hadoop@master hadoop]$ pwd
/usr/local/src/hadoop
[hadoop@master hadoop]$ ./bin /hdfs dfs -ls /
Found 5 items
drwxr-xr-x - hadoop supergroup 0 2023 -04-06 05:29 /hbase
drwxr-xr-x - hadoop supergroup 0 2023 -03-16 05:34 /input
drwxr-xr-x - hadoop supergroup 0 2023 -03-17 01:47 /output
drwx------ - hadoop supergroup 0 2023 -04-18 22 :35 /tmp
drwxr-xr-x - hadoop supergroup 0 2023 -04-12 08:12 /user
2.2. 步骤二:查看 HDSF 的报告,执行命令: bin/hdfs dfsadmin -report
[hadoop@master hadoop]$ bin /hdfs dfsadmin -report
Configured Capacity: 36477861888 (33.97 GB)
Present Capacity: 31487311872 (29.32 GB)
DFS Remaining: 31481454592 (29.32 GB)
DFS Used: 5857280 (5.59 MB)
DFS Used%: 0.02 %
Under replicated blocks: 1
Blocks with corrupt replicas: 1
Missing blocks: 1
Missing blocks (with replication factor 1 ): 0
-------------------------------------------------
Live datanodes (2 ):
Name: 192.168 .100 .20 :50010 (slave1)
Hostname: slave1
Decommission Status : Normal
Configured Capacity: 18238930944 (16.99 GB)
DFS Used: 2928640 (2.79 MB)
Non DFS Used: 2494345216 (2.32 GB)
DFS Remaining: 15741657088 (14.66 GB)
DFS Used%: 0.02 %
DFS Remaining%: 86.31 %
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00 %
Cache Remaining%: 0.00 %
Xceivers: 1
Last contact: Tue Apr 25 22 :36 :28 EDT 2023
Name: 192.168 .100 .30 :50010 (slave2)
Hostname: slave2
Decommission Status : Normal
Configured Capacity: 18238930944 (16.99 GB)
DFS Used: 2928640 (2.79 MB)
Non DFS Used: 2496204800 (2.32 GB)
DFS Remaining: 15739797504 (14.66 GB)
DFS Used%: 0.02 %
DFS Remaining%: 86.30 %
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00 %
Cache Remaining%: 0.00 %
Xceivers: 1
Last contact: Tue Apr 25 22 :36 :28 EDT 2023
2.3. 步骤三:查看 HDFS 空间情况,执行命令:hdfs dfs -df
[hadoop@master hadoop]$ hdfs dfs -df /
Filesystem Size Used Available Use%
hdfs://master:9000 36477861888 5857280 31481454592 0%
3.实验任务三:通过命令查看 HBase 状态
3.1. 步骤一:启动运行 HBase
切换到 HBase 安装目录/usr/local/src/hbase,命令如下:
[hadoop@master hadoop]$ cd /usr/local/src/hbase
[hadoop@master hbase]$ hbase version
HBase 1.2 .1
Source code repository git://asf-dev/home/busbey/projects/hbase revision=8d8a7107dc4ccbf36a92f64675dc60392f85c015
Compiled by busbey on Wed Mar 30 11 :19 :21 CDT 2016
From source with checksum f4bb4a14bb4e0b72b46f729dae98a772
3.2. 步骤二:查看 HBase 版本信息
[hadoop@master hbase]$ hbase version
HBase 1.2 .1
Source code repository git://asf-dev/home/busbey/projects/hbase revision=8d8a7107dc4ccbf36a92f64675dc60392f85c015
Compiled by busbey on Wed Mar 30 11 :19 :21 CDT 2016
From source with checksum f4bb4a14bb4e0b72b46f729dae98a772
[hadoop@master hbase]$ hbase shell
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/hbase/lib/slf4j-log4j12-1.7 .5 .jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: Found binding in [jar:file:/usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7 .10 .jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: See http://www.slf4j.org/codes.html
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
HBase Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 1.2 .1 , r8d8a7107dc4ccbf36a92f64675dc60392f85c015, Wed Mar 30 11 :19 :21 CDT 2016
hbase(main):001:0 > version
1.2 .1 , r8d8a7107dc4ccbf36a92f64675dc60392f85c015, Wed Mar 30 11 :19 :21 CDT 2016
hbase(main):002:0 > quit
3.3. 步骤三:查询 HBase 状态,在 HBase 命令交互界面,执行 status 命令
[hadoop@master hbase]$ hbase shell
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/hbase/lib/slf4j-log4j12-1.7 .5 .jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: Found binding in [jar:file:/usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7 .10 .jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: See http://www.slf4j.org/codes.html
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
HBase Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 1.2 .1 , r8d8a7107dc4ccbf36a92f64675dc60392f85c015, Wed Mar 30 11 :19 :21 CDT 2016
hbase(main):001:0 > status
ERROR: Can't get master address from ZooKeeper; znode data == null
Here is some help for this command:
Show cluster status. Can be ' summary', ' simple', ' detailed', or ' replication'. The
default is ' summary'. Examples:
hbase> status
hbase> status ' simple'
hbase> status ' summary'
hbase> status ' detailed'
hbase> status ' replication'
hbase> status ' replication', ' source'
hbase> status ' replication', ' sink'
#“简单”查询 HBase 的状态,执行命令 status ' simple'
hbase(main):002:0> status ' simple'
ERROR: Can' t get master address from ZooKeeper; znode data == null
Here is some help for this command:
Show cluster status. Can be 'summary' , 'simple' , 'detailed' , or 'replication' . The
default is 'summary' . Examples:
hbase> status
hbase> status 'simple'
hbase> status 'summary'
hbase> status 'detailed'
hbase> status 'replication'
hbase> status 'replication' , 'source'
hbase> status 'replication' , 'sink'
hbase(main):003:0 > help 'status'
Show cluster status. Can be 'summary' , 'simple' , 'detailed' , or 'replication' . The
default is 'summary' . Examples:
hbase> status
hbase> status 'simple'
hbase> status 'summary'
hbase> status 'detailed'
hbase> status 'replication'
hbase> status 'replication' , 'source'
hbase> status 'replication' , 'sink'
hbase(main):004:0 > quit
3.4. 步骤四 停止 HBase 服务
停止 HBase 服务,则执行命令 stop-hbase.sh。
[hadoop@master hbase ]$ stop-hbase.sh
stopping hbasecat: /tmp/hbase-hadoop-master.pid: No such file or directory
master: no zookeeper to stop because no pid file /tmp/hbase-hadoop-zookeeper.pid
slave2: no zookeeper to stop because no pid file /tmp/hbase-hadoop-zookeeper.pid
slave1: no zookeeper to stop because no pid file /tmp/hbase-hadoop-zookeeper.pid
4.实验任务四:通过命令查看 Hive 状态
4.1. 步骤一:启动 Hive
切换到/usr/local/src/hive 目录,输入 hive,回车。
[hadoop@master hbase]$ cd /usr/local/src/hive
[hadoop@master hive]$ hive
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/hive-jdbc-2.0 .0 -standalone.jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/log4j-slf4j-impl-2.4 .1 .jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: Found binding in [jar:file:/usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7 .10 .jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: See http://www.slf4j.org/codes.html
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
Logging initialized using configuration in jar:file:/usr/local/src/hive/lib/hive-common-2.0 .0 .jar!/hive-log4j2.properties
Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1. X releases.
hive>
4.2. 步骤二:Hive 操作基本命令
注意:Hive 命令行语句后面一定要加分号。
hive> show databases;
OK
default
sample
Time taken: 0.599 seconds, Fetched: 2 row(s)
hive> use default;
OK
Time taken: 0.016 seconds
hive> show tables;
OK
test
Time taken: 0.038 seconds, Fetched: 1 row(s)
hive> create table stu(id int ,name string);
OK
Time taken: 0.216 seconds
hive> insert into stu values (1001 ,"zhangsan" );
WARNING: Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1. X releases.
Query ID = hadoop_20230425224452_387816fe-5070 -4fb5-958c-c2a25dcc426d
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there's no reduce operator
Starting Job = job_1682476413854_0001, Tracking URL = http://master:8088/proxy/application_1682476413854_0001/
Kill Command = /usr/local/src/hadoop/bin/hadoop job -kill job_1682476413854_0001
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
2023-04-25 22:45:30,789 Stage-1 map = 0%, reduce = 0%
2023-04-25 22:45:37,126 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 1.78 sec
MapReduce Total cumulative CPU time: 1 seconds 780 msec
Ended Job = job_1682476413854_0001
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to: hdfs://master:9000/user/hive/warehouse/stu/.hive-staging_hive_2023-04-25_22-45-19_510_1539995881137188314-1/-ext-10000
Loading data to table default.stu
MapReduce Jobs Launched:
Stage-Stage-1: Map: 1 Cumulative CPU: 1.78 sec HDFS Read: 4138 HDFS Write: 81 SUCCESS
Total MapReduce CPU Time Spent: 1 seconds 780 msec
OK
Time taken: 20.046 seconds
hive> insert into stu values (1001,"zhangsan");
WARNING: Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
Query ID = hadoop_20230425224452_387816fe-5070-4fb5-958c-c2a25dcc426d
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there' s no reduce operator
Starting Job = job_1682476413854_0002, Tracking URL = http://master:8088 /proxy/application_1682476413854_0002/
Kill Command = /usr/local/src/hadoop/bin /hadoop job -kill job_1682476413854_0002
Hadoop job information for Stage-1 : number of mappers: 1 ; number of reducers: 0
2023 -04-25 22 :45 :47 ,757 Stage-1 map = 0 %, reduce = 0 %
2023 -04-25 22 :45 :51 ,958 Stage-1 map = 100 %, reduce = 0 %, Cumulative CPU 0.97 sec
MapReduce Total cumulative CPU time: 970 msec
Ended Job = job_1682476413854_0002
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to: hdfs://master:9000 /user/hive/warehouse/stu/.hive-staging_hive_2023-04-25_22 -45 -39_737_6941445347299338402 -1 /-ext-10000
Loading data to table default.stu
MapReduce Jobs Launched:
Stage-Stage-1 : Map: 1 Cumulative CPU: 0.97 sec HDFS Read: 4244 HDFS Write: 81 SUCCESS
Total MapReduce CPU Time Spent: 970 msec
OK
Time taken: 13.444 seconds
hive> show tables;
OK
stu
test
values__tmp__table__1
values__tmp__table__2
Time taken: 0.017 seconds, Fetched: 4 row(s)
hive> desc stu;
OK
id int
name string
Time taken: 0.025 seconds, Fetched: 2 row(s)
hive> select * from stu;
OK
1001 zhangsan
1001 zhangsan
Time taken: 0.055 seconds, Fetched: 2 row(s)
4.3. 步骤三:通过 Hive 命令行界面查看文件系统和历史命令
hive > ! ls /usr/local/src;
flume
hadoop
hbase
hive
jdk
sqoop
zookeeper
hive> dfs -ls /;
Found 5 items
drwxr-xr-x - hadoop supergroup 0 2023 -04-06 05:29 /hbase
drwxr-xr-x - hadoop supergroup 0 2023 -03-16 05:34 /input
drwxr-xr-x - hadoop supergroup 0 2023 -03-17 01:47 /output
drwx------ - hadoop supergroup 0 2023 -04-18 22 :35 /tmp
drwxr-xr-x - hadoop supergroup 0 2023 -04-12 08:12 /user
[hadoop@master hive]$ cd /home/hadoop
[hadoop@master ~]$ pwd
/home/hadoop
[hadoop@master ~]$ cat .hivehistory
show databases;
exit
exit;
exit
;
create database sample;
show databases;
use sample;
create table student(number STRING, name STRING)
create table student(number STRING, name STRING)
row format delimited
fields terminated by "|"
stored as textfile;
create table student(number STRING, name STRING) row format delimited fields terminated by "|" stored as textfile;
exit;
select * from student;
use sample;
select * from student;
quit;
show databases;
use default;
show tables;
create table stu(id int ,name string);
insert into stu values (1001 ,"zhangsan" );
show tables;
desc stu;
select * from stu;
! ls /usr/local/src;
dfs -ls /;
exit;
3.实验三 通过命令监控大数据平台服务状态
1.实验任务一: 通过命令查看 ZooKeeper 状态
1.1步骤一: 查看 ZooKeeper 状态,执行命令 zkServer.sh status,结果显示如下
[hadoop@master ~]$ zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin /../conf/zoo.cfg
Mode: follower
1.2. 步骤二: 查看运行进程
QuorumPeerMain:QuorumPeerMain 是 ZooKeeper 集群的启动入口类,是用来加载配 置启动 QuorumPeer 线程的.
[hadoop@master ~]$ jps
2689 NameNode
4369 Jps
2531 QuorumPeerMain
2890 SecondaryNameNode
3050 ResourceManager
1.3. 步骤四: 在成功启动 ZooKeeper 服务后,输入命令 zkCli.sh,连接到 ZooKeeper 服务。
[hadoop@master ~]$ zkCli.sh
Connecting to localhost:2181
2023 -04-25 22 :55 :25 ,316 [myid:] - INFO [main:Environment@100 ] - Client environment:zookeeper.version=3.4 .8 --1 , built on 02/06/2016 03:18 GMT
2023 -04-25 22 :55 :25 ,319 [myid:] - INFO [main:Environment@100 ] - Client environment:host.name=master
2023 -04-25 22 :55 :25 ,319 [myid:] - INFO [main:Environment@100 ] - Client environment:java.version=1.8 .0_152
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:java.vendor=Oracle Corporation
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:java.home=/usr/local/src/jdk/jre
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:java.class .path=/usr/local/src/zookeeper/bin /../build/classes:/usr/local/src/zookeeper/bin /../build/lib/*.jar:/usr/local/src/zookeeper/bin /../lib/slf4j-log4j12-1.6 .1 .jar:/usr/local/src/zookeeper/bin /../lib/slf4j-api-1.6 .1 .jar:/usr/local/src/zookeeper/bin /../lib/netty-3.7 .0 .Final.jar:/usr/local/src/zookeeper/bin /../lib/log4j-1.2 .16 .jar:/usr/local/src/zookeeper/bin /../lib/jline-0.9 .94 .jar:/usr/local/src/zookeeper/bin /../zookeeper-3.4 .8 .jar:/usr/local/src/zookeeper/bin /../src/java/lib/*.jar:/usr/local/src/zookeeper/bin /../conf::/usr/local/src/sqoop/lib
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:java.io.tmpdir=/tmp
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:java.compiler=<NA>
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:os.name=Linux
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:os.arch=amd64
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:os.version=3.10 .0 -862. el7.x86_64
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:user.name=hadoop
2023 -04-25 22 :55 :25 ,330 [myid:] - INFO [main:Environment@100 ] - Client environment:user.home=/home/hadoop
2023 -04-25 22 :55 :25 ,331 [myid:] - INFO [main:Environment@100 ] - Client environment:user.dir =/home/hadoop
2023 -04-25 22 :55 :25 ,332 [myid:] - INFO [main:ZooKeeper@438 ] - Initiating client connection, connectString=localhost:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@69d0a921
Welcome to ZooKeeper!
2023 -04-25 22 :55 :25 ,373 [myid:] - INFO [main-SendThread(localhost:2181 ):ClientCnxn$SendThread@1032 ] - Opening socket connection to server localhost/127.0 .0 .1 :2181. Will not attempt to authenticate using SASL (unknown error)
JLine support is enabled
2023 -04-25 22 :55 :25 ,462 [myid:] - INFO [main-SendThread(localhost:2181 ):ClientCnxn$SendThread@876 ] - Socket connection established to localhost/127.0 .0 .1 :2181 , initiating session
2023 -04-25 22 :55 :25 ,542 [myid:] - INFO [main-SendThread(localhost:2181 ):ClientCnxn$SendThread@1299 ] - Session establishment complete on server localhost/127.0 .0 .1 :2181 , sessionid = 0x187bb656fc50002 , negotiated timeout = 30000
WATCHER::
WatchedEvent state:SyncConnected type :None path:null
[zk: localhost:2181 (CONNECTED) 1 ] help
ZooKeeper -server host:port cmd args
stat path [watch]
set path data [version]
ls path [watch]
delquota [-n|-b] path
ls2 path [watch]
setAcl path acl
setquota -n|-b val path
history
redo cmdno
printwatches on|off
delete path [version]
sync path
listquota path
rmr path
get path [watch]
create [-s] [-e] path data acl
addauth scheme auth
quit
getAcl path
close
connect host:port
1.4. 步骤五: 使用 Watch 监听/hbase 目录,一旦/hbase 内容有变化,将会有提 示。打开监视,执行命令 get /hbase 1。
[hadoop@master ~]$ zkCli.sh
Connecting to localhost:2181
2023 -04-25 22 :57 :05,158 [myid:] - INFO [main:Environment@100 ] - Client environment:zookeeper.version=3.4 .8 --1 , built on 02/06/2016 03:18 GMT
2023 -04-25 22 :57 :05,161 [myid:] - INFO [main:Environment@100 ] - Client environment:host.name=master
2023 -04-25 22 :57 :05,161 [myid:] - INFO [main:Environment@100 ] - Client environment:java.version=1.8 .0_152
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:java.vendor=Oracle Corporation
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:java.home=/usr/local/src/jdk/jre
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:java.class .path=/usr/local/src/zookeeper/bin /../build/classes:/usr/local/src/zookeeper/bin /../build/lib/*.jar:/usr/local/src/zookeeper/bin /../lib/slf4j-log4j12-1.6 .1 .jar:/usr/local/src/zookeeper/bin /../lib/slf4j-api-1.6 .1 .jar:/usr/local/src/zookeeper/bin /../lib/netty-3.7 .0 .Final.jar:/usr/local/src/zookeeper/bin /../lib/log4j-1.2 .16 .jar:/usr/local/src/zookeeper/bin /../lib/jline-0.9 .94 .jar:/usr/local/src/zookeeper/bin /../zookeeper-3.4 .8 .jar:/usr/local/src/zookeeper/bin /../src/java/lib/*.jar:/usr/local/src/zookeeper/bin /../conf::/usr/local/src/sqoop/lib
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:java.io.tmpdir=/tmp
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:java.compiler=<NA>
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:os.name=Linux
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:os.arch=amd64
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:os.version=3.10 .0 -862. el7.x86_64
2023 -04-25 22 :57 :05,162 [myid:] - INFO [main:Environment@100 ] - Client environment:user.name=hadoop
2023 -04-25 22 :57 :05,163 [myid:] - INFO [main:Environment@100 ] - Client environment:user.home=/home/hadoop
2023 -04-25 22 :57 :05,163 [myid:] - INFO [main:Environment@100 ] - Client environment:user.dir =/home/hadoop
2023 -04-25 22 :57 :05,163 [myid:] - INFO [main:ZooKeeper@438 ] - Initiating client connection, connectString=localhost:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@69d0a921
2023 -04-25 22 :57 :05,178 [myid:] - INFO [main-SendThread(localhost:2181 ):ClientCnxn$SendThread@1032 ] - Opening socket connection to server localhost/127.0 .0 .1 :2181. Will not attempt to authenticate using SASL (unknown error)
Welcome to ZooKeeper!
JLine support is enabled
2023 -04-25 22 :57 :05,232 [myid:] - INFO [main-SendThread(localhost:2181 ):ClientCnxn$SendThread@876 ] - Socket connection established to localhost/127.0 .0 .1 :2181 , initiating session
2023 -04-25 22 :57 :05,242 [myid:] - INFO [main-SendThread(localhost:2181 ):ClientCnxn$SendThread@1299 ] - Session establishment complete on server localhost/127.0 .0 .1 :2181 , sessionid = 0x187bb656fc50003 , negotiated timeout = 30000
WATCHER::
WatchedEvent state:SyncConnected type :None path:null
[zk: localhost:2181 (CONNECTED) 0 ] get /hbase 1
cZxid = 0x200000002
ctime = Thu Apr 06 05:29 :31 EDT 2023
mZxid = 0x200000002
mtime = Thu Apr 06 05:29 :31 EDT 2023
pZxid = 0x300000007
cversion = 17
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 15
[zk: localhost:2181 (CONNECTED) 1 ] set /hbase value-update
WATCHER::
WatchedEvent state:SyncConnected type :NodeDataChanged path:/hbase
cZxid = 0x200000002
ctime = Thu Apr 06 05:29 :31 EDT 2023
mZxid = 0x300000012
mtime = Tue Apr 25 22 :57 :25 EDT 2023
pZxid = 0x300000007
cversion = 17
dataVersion = 1
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 12
numChildren = 15
[zk: localhost:2181 (CONNECTED) 2 ] get /hbase
value-update
cZxid = 0x200000002
ctime = Thu Apr 06 05:29 :31 EDT 2023
mZxid = 0x300000012
mtime = Tue Apr 25 22 :57 :25 EDT 2023
pZxid = 0x300000007
cversion = 17
dataVersion = 1
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 12
numChildren = 15
[zk: localhost:2181 (CONNECTED) 3 ]
2.实验任务二: 通过命令查看 Sqoop 状态
2.1. 步骤一: 查询 Sqoop 版本号,验证 Sqoop 是否启动成功。
首先切换到/usr/local/src/sqoop 目录,执行命令:./bin/sqoop-version
[hadoop@master ~]$ cd /usr/local/src/sqoop
[hadoop@master sqoop]$ ./bin /sqoop-version
Warning: /usr/local/src/sqoop/../hcatalog does not exist! HCatalog jobs will fail.
Please set $HCAT_HOME to the root of your HCatalog installation.
Warning: /usr/local/src/sqoop/../accumulo does not exist! Accumulo imports will fail.
Please set $ACCUMULO_HOME to the root of your Accumulo installation.
23 /04/25 22 :59 :39 INFO sqoop.Sqoop: Running Sqoop version: 1.4 .7
Sqoop 1.4 .7
git commit id 2328971411f57f0cb683dfb79d19d4d19d185dd8
Compiled by maugli on Thu Dec 21 15 :59 :58 STD 2017
2.2. 步骤二: 测试 Sqoop 是否能够成功连接数据库
切换到 Sqoop 的 目 录 , 执 行 命 令 bin/sqoop list-databases --connect jdbc:mysql://master:3306/ --username root --password Password123$,命令中 “master:3306”为数据库主机名和端口.
\
[hadoop@master sqoop]$ sqoop list -databases --connect jdbc:mysql://127.0 .0 .1 :3306 / --username root -P
Warning: /usr/local/src/sqoop/../hcatalog does not exist! HCatalog jobs will fail.
Please set $HCAT_HOME to the root of your HCatalog installation.
Warning: /usr/local/src/sqoop/../accumulo does not exist! Accumulo imports will fail.
Please set $ACCUMULO_HOME to the root of your Accumulo installation.
23 /04/25 23 :03:10 INFO sqoop.Sqoop: Running Sqoop version: 1.4 .7
Enter password:
23 /04/25 23 :03:25 INFO manager.MySQLManager: Preparing to use a MySQL streaming resultset.
Tue Apr 25 23 :03:25 EDT 2023 WARN: Establishing SSL connection without server's identity verification is not recommended. According to MySQL 5.5.45+, 5.6.26+ and 5.7.6+ requirements SSL connection must be established by default if explicit option isn' t set . For compliance with existing applications not using SSL the verifyServerCertificate property is set to 'false' . You need either to explicitly disable SSL by setting useSSL=false, or set useSSL=true and provide truststore for server certificate verification.
information_schema
hive
mysql
performance_schema
sample
sys
2.3. 步骤三: 执行命令 sqoop help,可以看到如下内容,代表 Sqoop 启动成功。
[hadoop@master sqoop]$ sqoop help
Warning: /usr/local/src/sqoop/../hcatalog does not exist! HCatalog jobs will fail.
Please set $HCAT_HOME to the root of your HCatalog installation.
Warning: /usr/local/src/sqoop/../accumulo does not exist! Accumulo imports will fail.
Please set $ACCUMULO_HOME to the root of your Accumulo installation.
23 /04/25 23 :04:51 INFO sqoop.Sqoop: Running Sqoop version: 1.4 .7
usage: sqoop COMMAND [ARGS]
Available commands:
codegen Generate code to interact with database records
create-hive-table Import a table definition into Hive
eval Evaluate a SQL statement and display the results
export Export an HDFS directory to a database table
help List available commands
import Import a table from a database to HDFS
import -all -tables Import tables from a database to HDFS
import -mainframe Import datasets from a mainframe server to HDFS
job Work with saved jobs
list -databases List available databases on a server
list -tables List available tables in a database
merge Merge results of incremental imports
metastore Run a standalone Sqoop metastore
version Display version information
See 'sqoop help COMMAND' for information on a specific command.
3.实验任务三: 通过命令查看 Flume 状态
3.1. 步骤一:检查 Flume 安装是否成功,执行 flume-ng version 命令,查看 Flume 的版本。
[hadoop@master sqoop]$ cd /usr/local/src/flume
[hadoop@master flume]$ flume-ng version
Flume 1.6 .0
Source code repository: https://git-wip-us.apache.org/repos/asf/flume.git
Revision: 2561a23240a71ba20bf288c7c2cda88f443c2080
Compiled by hshreedharan on Mon May 11 11 :15 :44 PDT 2015
From source with checksum b29e416802ce9ece3269d34233baf43f
3.2. 步骤二:添加 example.conf 到/usr/local/src/flume
[hadoop@master flume]$ vi /usr/local/src/flume/example.conf
a1.sources=r1
a1.sinks=k1
a1.channels=c1
a1.sources.r1.type =spooldir
a1.sources.r1.spoolDir=/usr/local/src/flume/
a1.sources.r1.fileHeader=true
a1.sinks.k1.type =hdfs
a1.sinks.k1.hdfs.path=hdfs://master:9000 /flume
a1.sinks.k1.hdfs.rollsize=1048760
成目标文件
a1.sinks.k1.hdfs.rollCount=0
a1.sinks.k1.hdfs.rollInterval=900
件
a1.sinks.k1.hdfs.useLocalTimeStamp=true
a1.channels.c1.type =file
a1.channels.c1.capacity=1000
a1.channels.c1.transactionCapacity=100
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
3.3. 步骤三:启动 Flume Agent a1 日志控制台
[hadoop@master flume]$ /usr/local/src/flume/bin /flume-ng agent --conf ./conf --conf-file ./example.conf --name a1 -Dflume.root.logger=INFO,console
Info: Sourcing environment configuration script /usr/local/src/flume/conf/flume-env.sh
Info: Including Hadoop libraries found via (/usr/local/src/hadoop/bin /hadoop) for HDFS access
Info: Excluding /usr/local/src/hadoop/share/hadoop/common/lib/slf4j-api-1.7 .10 .jar from classpath
Info: Excluding /usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7 .10 .jar from classpath
Info: Including HBASE libraries found via (/usr/local/src/hbase/bin /hbase) for HBASE access
Info: Excluding /usr/local/src/hbase/lib/slf4j-api-1.7 .7 .jar from classpath
Info: Excluding /usr/local/src/hbase/lib/slf4j-log4j12-1.7 .5 .jar from classpath
Info: Excluding /usr/local/src/hadoop/share/hadoop/common/lib/slf4j-api-1.7 .10 .jar from classpath
Info: Excluding /usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7 .10 .jar from classpath
Info: Including Hive libraries found via (/usr/local/src/hive) for Hive access
+ exec /usr/local/src/jdk/bin /java -Xmx20m -Dflume.root.logger=INFO,console -cp '/usr/local/src/flume/conf:/usr/local/src/flume/lib/*:/usr/local/src/hadoop/etc/hadoop:/usr/local/src/hadoop/share/hadoop/common/lib/activation-1.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/src/hadoop/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/src/hadoop/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/src/hadoop/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/usr/local/src/hadoop/share/hadoop/common/lib/asm-3.2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/avro-1.7.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-cli-1.2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-codec-1.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-collections-3.2.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-compress-1.4.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-configuration-1.6.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-digester-1.8.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-httpclient-3.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-io-2.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-lang-2.6.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-logging-1.1.3.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-math3-3.1.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-net-3.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/curator-client-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/curator-framework-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/gson-2.2.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/guava-11.0.2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/hadoop-annotations-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/hadoop-auth-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/hamcrest-core-1.3.jar:/usr/local/src/hadoop/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/usr/local/src/hadoop/share/hadoop/common/lib/httpclient-4.2.5.jar:/usr/local/src/hadoop/share/hadoop/common/lib/httpcore-4.2.5.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jersey-core-1.9.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jersey-json-1.9.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jersey-server-1.9.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jets3t-0.9.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jettison-1.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jetty-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jetty-util-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jsch-0.1.42.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jsp-api-2.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jsr305-3.0.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/junit-4.11.jar:/usr/local/src/hadoop/share/hadoop/common/lib/log4j-1.2.17.jar:/usr/local/src/hadoop/share/hadoop/common/lib/mockito-all-1.8.5.jar:/usr/local/src/hadoop/share/hadoop/common/lib/netty-3.6.2.Final.jar:/usr/local/src/hadoop/share/hadoop/common/lib/paranamer-2.3.jar:/usr/local/src/hadoop/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/servlet-api-2.5.jar:/usr/local/src/hadoop/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/stax-api-1.0-2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/xmlenc-0.52.jar:/usr/local/src/hadoop/share/hadoop/common/lib/xz-1.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/zookeeper-3.4.6.jar:/usr/local/src/hadoop/share/hadoop/common/hadoop-common-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/hadoop-common-2.7.1-tests.jar:/usr/local/src/hadoop/share/hadoop/common/hadoop-nfs-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/jdiff:/usr/local/src/hadoop/share/hadoop/common/lib:/usr/local/src/hadoop/share/hadoop/common/sources:/usr/local/src/hadoop/share/hadoop/common/templates:/usr/local/src/hadoop/share/hadoop/hdfs:/usr/local/src/hadoop/share/hadoop/hdfs/lib/asm-3.2.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-io-2.4.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/guava-11.0.2.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/usr/local/src/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.7.1-tests.jar:/usr/local/src/hadoop/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/hdfs/jdiff:/usr/local/src/hadoop/share/hadoop/hdfs/lib:/usr/local/src/hadoop/share/hadoop/hdfs/sources:/usr/local/src/hadoop/share/hadoop/hdfs/templates:/usr/local/src/hadoop/share/hadoop/hdfs/webapps:/usr/local/src/hadoop/share/hadoop/yarn/lib/activation-1.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/aopalliance-1.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/asm-3.2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-cli-1.2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-codec-1.4.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-collections-3.2.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-io-2.4.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-lang-2.6.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/guava-11.0.2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/guice-3.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/javax.inject-1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-client-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-core-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-json-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-server-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jettison-1.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jetty-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/log4j-1.2.17.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/servlet-api-2.5.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/xz-1.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-api-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-client-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-common-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-registry-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-common-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib:/usr/local/src/hadoop/share/hadoop/yarn/sources:/usr/local/src/hadoop/share/hadoop/yarn/test:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/asm-3.2.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/guice-3.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/javax.inject-1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/junit-4.11.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/xz-1.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1-tests.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib:/usr/local/src/hadoop/share/hadoop/mapreduce/lib-examples:/usr/local/src/hadoop/share/hadoop/mapreduce/sources:/usr/local/src/hadoop/contrib/capacity-scheduler/*.jar:/usr/local/src/hbase/conf:/usr/local/src/jdk//lib/tools.jar:/usr/local/src/hbase:/usr/local/src/hbase/lib/activation-1.1.jar:/usr/local/src/hbase/lib/antisamy-1.4.3.jar:/usr/local/src/hbase/lib/aopalliance-1.0.jar:/usr/local/src/hbase/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/src/hbase/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/src/hbase/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/src/hbase/lib/api-util-1.0.0-M20.jar:/usr/local/src/hbase/lib/asm-3.1.jar:/usr/local/src/hbase/lib/avro-1.7.4.jar:/usr/local/src/hbase/lib/batik-css-1.7.jar:/usr/local/src/hbase/lib/batik-ext-1.7.jar:/usr/local/src/hbase/lib/batik-util-1.7.jar:/usr/local/src/hbase/lib/bsh-core-2.0b4.jar:/usr/local/src/hbase/lib/commons-beanutils-1.7.0.jar:/usr/local/src/hbase/lib/commons-beanutils-core-1.7.0.jar:/usr/local/src/hbase/lib/commons-cli-1.2.jar:/usr/local/src/hbase/lib/commons-codec-1.9.jar:/usr/local/src/hbase/lib/commons-collections-3.2.2.jar:/usr/local/src/hbase/lib/commons-compress-1.4.1.jar:/usr/local/src/hbase/lib/commons-configuration-1.6.jar:/usr/local/src/hbase/lib/commons-daemon-1.0.13.jar:/usr/local/src/hbase/lib/commons-digester-1.8.jar:/usr/local/src/hbase/lib/commons-el-1.0.jar:/usr/local/src/hbase/lib/commons-fileupload-1.2.jar:/usr/local/src/hbase/lib/commons-httpclient-3.1.jar:/usr/local/src/hbase/lib/commons-io-2.4.jar:/usr/local/src/hbase/lib/commons-lang-2.6.jar:/usr/local/src/hbase/lib/commons-logging-1.2.jar:/usr/local/src/hbase/lib/commons-math-2.2.jar:/usr/local/src/hbase/lib/commons-math3-3.1.1.jar:/usr/local/src/hbase/lib/commons-net-3.1.jar:/usr/local/src/hbase/lib/disruptor-3.3.0.jar:/usr/local/src/hbase/lib/esapi-2.1.0.jar:/usr/local/src/hbase/lib/findbugs-annotations-1.3.9-1.jar:/usr/local/src/hbase/lib/guava-12.0.1.jar:/usr/local/src/hbase/lib/guice-3.0.jar:/usr/local/src/hbase/lib/guice-servlet-3.0.jar:/usr/local/src/hbase/lib/hadoop-annotations-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-auth-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-client-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-common-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-hdfs-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-mapreduce-client-app-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-mapreduce-client-common-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-mapreduce-client-core-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-mapreduce-client-jobclient-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-mapreduce-client-shuffle-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-yarn-api-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-yarn-client-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-yarn-common-2.5.1.jar:/usr/local/src/hbase/lib/hadoop-yarn-server-common-2.5.1.jar:/usr/local/src/hbase/lib/hbase-annotations-1.2.1.jar:/usr/local/src/hbase/lib/hbase-annotations-1.2.1-tests.jar:/usr/local/src/hbase/lib/hbase-client-1.2.1.jar:/usr/local/src/hbase/lib/hbase-common-1.2.1.jar:/usr/local/src/hbase/lib/hbase-common-1.2.1-tests.jar:/usr/local/src/hbase/lib/hbase-examples-1.2.1.jar:/usr/local/src/hbase/lib/hbase-external-blockcache-1.2.1.jar:/usr/local/src/hbase/lib/hbase-hadoop2-compat-1.2.1.jar:/usr/local/src/hbase/lib/hbase-hadoop-compat-1.2.1.jar:/usr/local/src/hbase/lib/hbase-it-1.2.1.jar:/usr/local/src/hbase/lib/hbase-it-1.2.1-tests.jar:/usr/local/src/hbase/lib/hbase-prefix-tree-1.2.1.jar:/usr/local/src/hbase/lib/hbase-procedure-1.2.1.jar:/usr/local/src/hbase/lib/hbase-protocol-1.2.1.jar:/usr/local/src/hbase/lib/hbase-resource-bundle-1.2.1.jar:/usr/local/src/hbase/lib/hbase-rest-1.2.1.jar:/usr/local/src/hbase/lib/hbase-server-1.2.1.jar:/usr/local/src/hbase/lib/hbase-server-1.2.1-tests.jar:/usr/local/src/hbase/lib/hbase-shell-1.2.1.jar:/usr/local/src/hbase/lib/hbase-thrift-1.2.1.jar:/usr/local/src/hbase/lib/htrace-core-3.1.0-incubating.jar:/usr/local/src/hbase/lib/httpclient-4.2.5.jar:/usr/local/src/hbase/lib/httpcore-4.4.1.jar:/usr/local/src/hbase/lib/jackson-core-asl-1.9.13.jar:/usr/local/src/hbase/lib/jackson-jaxrs-1.9.13.jar:/usr/local/src/hbase/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/src/hbase/lib/jackson-xc-1.9.13.jar:/usr/local/src/hbase/lib/jamon-runtime-2.4.1.jar:/usr/local/src/hbase/lib/jasper-compiler-5.5.23.jar:/usr/local/src/hbase/lib/jasper-runtime-5.5.23.jar:/usr/local/src/hbase/lib/javax.inject-1.jar:/usr/local/src/hbase/lib/java-xmlbuilder-0.4.jar:/usr/local/src/hbase/lib/jaxb-api-2.2.2.jar:/usr/local/src/hbase/lib/jaxb-impl-2.2.3-1.jar:/usr/local/src/hbase/lib/jcodings-1.0.8.jar:/usr/local/src/hbase/lib/jersey-client-1.9.jar:/usr/local/src/hbase/lib/jersey-core-1.9.jar:/usr/local/src/hbase/lib/jersey-guice-1.9.jar:/usr/local/src/hbase/lib/jersey-json-1.9.jar:/usr/local/src/hbase/lib/jersey-server-1.9.jar:/usr/local/src/hbase/lib/jets3t-0.9.0.jar:/usr/local/src/hbase/lib/jettison-1.3.3.jar:/usr/local/src/hbase/lib/jetty-6.1.26.jar:/usr/local/src/hbase/lib/jetty-sslengine-6.1.26.jar:/usr/local/src/hbase/lib/jetty-util-6.1.26.jar:/usr/local/src/hbase/lib/joni-2.1.2.jar:/usr/local/src/hbase/lib/jruby-complete-1.6.8.jar:/usr/local/src/hbase/lib/jsch-0.1.42.jar:/usr/local/src/hbase/lib/jsp-2.1-6.1.14.jar:/usr/local/src/hbase/lib/jsp-api-2.1-6.1.14.jar:/usr/local/src/hbase/lib/jsr305-1.3.9.jar:/usr/local/src/hbase/lib/junit-4.12.jar:/usr/local/src/hbase/lib/leveldbjni-all-1.8.jar:/usr/local/src/hbase/lib/libthrift-0.9.3.jar:/usr/local/src/hbase/lib/log4j-1.2.17.jar:/usr/local/src/hbase/lib/metrics-core-2.2.0.jar:/usr/local/src/hbase/lib/nekohtml-1.9.12.jar:/usr/local/src/hbase/lib/netty-all-4.0.23.Final.jar:/usr/local/src/hbase/lib/paranamer-2.3.jar:/usr/local/src/hbase/lib/protobuf-java-2.5.0.jar:/usr/local/src/hbase/lib/servlet-api-2.5-6.1.14.jar:/usr/local/src/hbase/lib/servlet-api-2.5.jar:/usr/local/src/hbase/lib/snappy-java-1.0.4.1.jar:/usr/local/src/hbase/lib/spymemcached-2.11.6.jar:/usr/local/src/hbase/lib/xalan-2.7.0.jar:/usr/local/src/hbase/lib/xml-apis-1.3.03.jar:/usr/local/src/hbase/lib/xml-apis-ext-1.3.04.jar:/usr/local/src/hbase/lib/xmlenc-0.52.jar:/usr/local/src/hbase/lib/xom-1.2.5.jar:/usr/local/src/hbase/lib/xz-1.0.jar:/usr/local/src/hbase/lib/zookeeper-3.4.6.jar:/usr/local/src/hadoop/etc/hadoop:/usr/local/src/hadoop/share/hadoop/common/lib/activation-1.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/src/hadoop/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/src/hadoop/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/src/hadoop/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/usr/local/src/hadoop/share/hadoop/common/lib/asm-3.2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/avro-1.7.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-cli-1.2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-codec-1.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-collections-3.2.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-compress-1.4.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-configuration-1.6.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-digester-1.8.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-httpclient-3.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-io-2.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-lang-2.6.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-logging-1.1.3.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-math3-3.1.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/commons-net-3.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/curator-client-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/curator-framework-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/curator-recipes-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/gson-2.2.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/guava-11.0.2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/hadoop-annotations-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/hadoop-auth-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/hamcrest-core-1.3.jar:/usr/local/src/hadoop/share/hadoop/common/lib/htrace-core-3.1.0-incubating.jar:/usr/local/src/hadoop/share/hadoop/common/lib/httpclient-4.2.5.jar:/usr/local/src/hadoop/share/hadoop/common/lib/httpcore-4.2.5.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jersey-core-1.9.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jersey-json-1.9.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jersey-server-1.9.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jets3t-0.9.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jettison-1.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jetty-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jetty-util-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jsch-0.1.42.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jsp-api-2.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/jsr305-3.0.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/junit-4.11.jar:/usr/local/src/hadoop/share/hadoop/common/lib/log4j-1.2.17.jar:/usr/local/src/hadoop/share/hadoop/common/lib/mockito-all-1.8.5.jar:/usr/local/src/hadoop/share/hadoop/common/lib/netty-3.6.2.Final.jar:/usr/local/src/hadoop/share/hadoop/common/lib/paranamer-2.3.jar:/usr/local/src/hadoop/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/servlet-api-2.5.jar:/usr/local/src/hadoop/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/usr/local/src/hadoop/share/hadoop/common/lib/stax-api-1.0-2.jar:/usr/local/src/hadoop/share/hadoop/common/lib/xmlenc-0.52.jar:/usr/local/src/hadoop/share/hadoop/common/lib/xz-1.0.jar:/usr/local/src/hadoop/share/hadoop/common/lib/zookeeper-3.4.6.jar:/usr/local/src/hadoop/share/hadoop/common/hadoop-common-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/hadoop-common-2.7.1-tests.jar:/usr/local/src/hadoop/share/hadoop/common/hadoop-nfs-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/common/jdiff:/usr/local/src/hadoop/share/hadoop/common/lib:/usr/local/src/hadoop/share/hadoop/common/sources:/usr/local/src/hadoop/share/hadoop/common/templates:/usr/local/src/hadoop/share/hadoop/hdfs:/usr/local/src/hadoop/share/hadoop/hdfs/lib/asm-3.2.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-io-2.4.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/guava-11.0.2.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/htrace-core-3.1.0-incubating.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/jsr305-3.0.0.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/leveldbjni-all-1.8.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/netty-all-4.0.23.Final.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/usr/local/src/hadoop/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/usr/local/src/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/hdfs/hadoop-hdfs-2.7.1-tests.jar:/usr/local/src/hadoop/share/hadoop/hdfs/hadoop-hdfs-nfs-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/hdfs/jdiff:/usr/local/src/hadoop/share/hadoop/hdfs/lib:/usr/local/src/hadoop/share/hadoop/hdfs/sources:/usr/local/src/hadoop/share/hadoop/hdfs/templates:/usr/local/src/hadoop/share/hadoop/hdfs/webapps:/usr/local/src/hadoop/share/hadoop/yarn/lib/activation-1.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/aopalliance-1.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/asm-3.2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-cli-1.2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-codec-1.4.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-collections-3.2.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-io-2.4.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-lang-2.6.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/guava-11.0.2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/guice-3.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/javax.inject-1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-client-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-core-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-json-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jersey-server-1.9.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jettison-1.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jetty-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/jsr305-3.0.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/log4j-1.2.17.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/servlet-api-2.5.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/xz-1.0.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib/zookeeper-3.4.6-tests.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-api-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-client-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-common-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-registry-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-common-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-sharedcachemanager-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-tests-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/yarn/lib:/usr/local/src/hadoop/share/hadoop/yarn/sources:/usr/local/src/hadoop/share/hadoop/yarn/test:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/asm-3.2.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/guice-3.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/hadoop-annotations-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/javax.inject-1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/junit-4.11.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib/xz-1.0.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.1-tests.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar:/usr/local/src/hadoop/share/hadoop/mapreduce/lib:/usr/local/src/hadoop/share/hadoop/mapreduce/lib-examples:/usr/local/src/hadoop/share/hadoop/mapreduce/sources:/usr/local/src/hadoop/contrib/capacity-scheduler/*.jar:/usr/local/src/hbase/conf:/usr/local/src/hive/lib/*' -Djava.library.path=:/usr/local/src/hadoop/lib/native:/usr/local/src/hadoop/lib/native org.apache.flume.node.Application --conf-file ./example.conf --name a1
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/flume/lib/slf4j-log4j12-1.6 .1 .jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/log4j-slf4j-impl-2.4 .1 .jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/hive-jdbc-2.0 .0 -standalone.jar!/org/slf4j/impl/StaticLoggerBinder.class ]
SLF4J: See http://www.slf4j.org/codes.html
2023 -04-25 23 :10 :43 ,238 (lifecycleSupervisor-1 -0 ) [INFO - org.apache.flume.node.PollingPropertiesFileConfigurationProvider.start(PollingPropertiesFileConfigurationProvider.java:61 )] Configuration provider starting
2023 -04-25 23 :10 :43 ,258 (conf-file-poller-0 ) [INFO - org.apache.flume.node.PollingPropertiesFileConfigurationProvider$FileWatcherRunnable.run(PollingPropertiesFileConfigurationProvider.java:133 )] Reloading configuration file:./example.conf
2023 -04-25 23 :10 :43 ,262 (conf-file-poller-0 ) [INFO - org.apache.flume.conf.FlumeConfiguration$AgentConfiguration.addProperty(FlumeConfiguration.java:931 )] Added sinks: k1 Agent: a1
2023 -04-25 23 :10 :43 ,262 (conf-file-poller-0 ) [INFO - org.apache.flume.conf.FlumeConfiguration$AgentConfiguration.addProperty(FlumeConfiguration.java:1017 )] Processing:k1
2023 -04-25 23 :10 :43 ,263 (conf-file-poller-0 ) [INFO - org.apache.flume.conf.FlumeConfiguration$AgentConfiguration.addProperty(FlumeConfiguration.java:1017 )] Processing:k1
2023 -04-25 23 :10 :43 ,263 (conf-file-poller-0 ) [INFO - org.apache.flume.conf.FlumeConfiguration$AgentConfiguration.addProperty(FlumeConfiguration.java:1017 )] Processing:k1
2023 -04-25 23 :10 :43 ,263 (conf-file-poller-0 ) [INFO - org.apache.flume.conf.FlumeConfiguration$AgentConfiguration.addProperty(FlumeConfiguration.java:1017 )] Processing:k1
2023 -04-25 23 :10 :43 ,263 (conf-file-poller-0 ) [INFO - org.apache.flume.conf.FlumeConfiguration$AgentConfiguration.addProperty(FlumeConfiguration.java:1017 )] Processing:k1
2023 -04-25 23 :10 :43 ,263 (conf-file-poller-0 ) [INFO - org.apache.flume.conf.FlumeConfiguration$AgentConfiguration.addProperty(FlumeConfiguration.java:1017 )] Processing:k1
2023 -04-25 23 :10 :43 ,263 (conf-file-poller-0 ) [WARN - org.apache.flume.conf.FlumeConfiguration.<init>(FlumeConfiguration.java:102 )] Configuration property ignored: 件 =
2023 -04-25 23 :10 :43 ,263 (conf-file-poller-0 ) [WARN - org.apache.flume.conf.FlumeConfiguration.<init>(FlumeConfiguration.java:102 )] Configuration property ignored: 成目标文件 =
2023 -04-25 23 :10 :43 ,264 (conf-file-poller-0 ) [INFO - org.apache.flume.conf.FlumeConfiguration$AgentConfiguration.addProperty(FlumeConfiguration.java:1017 )] Processing:k1
2023 -04-25 23 :10 :43 ,281 (conf-file-poller-0 ) [INFO - org.apache.flume.conf.FlumeConfiguration.validateConfiguration(FlumeConfiguration.java:141 )] Post-validation flume configuration contains configuration for agents: [a1]
2023 -04-25 23 :10 :43 ,281 (conf-file-poller-0 ) [INFO - org.apache.flume.node.AbstractConfigurationProvider.loadChannels(AbstractConfigurationProvider.java:145 )] Creating channels
2023 -04-25 23 :10 :43 ,286 (conf-file-poller-0 ) [ERROR - org.apache.flume.node.PollingPropertiesFileConfigurationProvider$FileWatcherRunnable.run(PollingPropertiesFileConfigurationProvider.java:142 )] Failed to load configuration data. Exception follows.
org.apache.flume.FlumeException: Unable to load channel type : file
at org.apache.flume.channel.DefaultChannelFactory.getClass(DefaultChannelFactory.java:69 )
at org.apache.flume.node.AbstractConfigurationProvider.getOrCreateChannel(AbstractConfigurationProvider.java:231 )
at org.apache.flume.node.AbstractConfigurationProvider.loadChannels(AbstractConfigurationProvider.java:194 )
at org.apache.flume.node.AbstractConfigurationProvider.getConfiguration(AbstractConfigurationProvider.java:96 )
at org.apache.flume.node.PollingPropertiesFileConfigurationProvider$FileWatcherRunnable.run(PollingPropertiesFileConfigurationProvider.java:140 )
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511 )
at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308 )
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301 (ScheduledThreadPoolExecutor.java:180 )
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294 )
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149 )
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624 )
at java.lang.Thread.run(Thread.java:748 )
Caused by: java.lang.ClassNotFoundException: file
at java.net.URLClassLoader.findClass(URLClassLoader.java:381 )
at java.lang.ClassLoader.loadClass(ClassLoader.java:424 )
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:338 )
at java.lang.ClassLoader.loadClass(ClassLoader.java:357 )
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:264 )
at org.apache.flume.channel.DefaultChannelFactory.getClass(DefaultChannelFactory.java:67 )
... 11 more
^C2023-04-25 23 :11 :25 ,356 (agent-shutdown-hook) [INFO - org.apache.flume.lifecycle.LifecycleSupervisor.stop(LifecycleSupervisor.java:79 )] Stopping lifecycle supervisor 10
2023 -04-25 23 :11 :25 ,359 (agent-shutdown-hook) [INFO - org.apache.flume.node.PollingPropertiesFileConfigurationProvider.stop(PollingPropertiesFileConfigurationProvider.java:83 )] Configuration provider stopping
3.4. 步骤四: 查看结果
[hadoop@master flume]$ hdfs dfs -ls /tmp
Found 3 items
drwx------ - hadoop supergroup 0 2023 -04-18 22 :37 /tmp/flume
drwx------ - hadoop supergroup 0 2023 -03-16 05:37 /tmp/hadoop-yarn
drwx-wx-wx - hadoop supergroup 0 2023 -03-23 00 :17 /tmp/hive
[hadoop@master flume]$ hdfs dfs -lsr /tmp/flume
lsr: DEPRECATED: Please use 'ls -R' instead.
-rw-r--r-- 2 hadoop supergroup 1672 2023 -04-18 22 :35 /tmp/flume/FlumeData.1681871756800
-rw-r--r-- 2 hadoop supergroup 1633 2023 -04-18 22 :35 /tmp/flume/FlumeData.1681871756801
-rw-r--r-- 2 hadoop supergroup 2562 2023 -04-18 22 :35 /tmp/flume/FlumeData.1681871756849
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756850
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756851
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756852
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756853
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756854
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756855
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756856
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756857
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756858
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756859
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756860
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756861
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756862
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756863
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756864
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756865
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756866
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756867
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756868
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756869
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756870
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756871
-rw-r--r-- 2 hadoop supergroup 1269 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756872
-rw-r--r-- 2 hadoop supergroup 1370 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756873
-rw-r--r-- 2 hadoop supergroup 3226 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756874
-rw-r--r-- 2 hadoop supergroup 2163 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756875
-rw-r--r-- 2 hadoop supergroup 2163 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756876
-rw-r--r-- 2 hadoop supergroup 2163 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756877
-rw-r--r-- 2 hadoop supergroup 2163 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756878
-rw-r--r-- 2 hadoop supergroup 2163 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756879
-rw-r--r-- 2 hadoop supergroup 1353 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756880
-rw-r--r-- 2 hadoop supergroup 1389 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756881
-rw-r--r-- 2 hadoop supergroup 1512 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756882
-rw-r--r-- 2 hadoop supergroup 3214 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756883
-rw-r--r-- 2 hadoop supergroup 2163 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756884
-rw-r--r-- 2 hadoop supergroup 2163 2023 -04-18 22 :36 /tmp/flume/FlumeData.1681871756885
......
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· Manus爆火,是硬核还是营销?
· 终于写完轮子一部分:tcp代理 了,记录一下
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通