wget https://archive.apache.org/dist/hbase/2.3.0/hbase-2.3.0-bin.tar.gz
http://mirrors.hust.edu.cn/apache/hbase/1.6.0/hbase-1.6.0-bin.tar.gz
http://mirrors.hust.edu.cn/apache/hbase/hbase-1.3.6/hbase-1.3.6-bin.tar.gz
# 解压
tar -zxf hbase-1.3.6-bin.tar.gz -C /ups/app/
mv hbase-1.3.6 hbase
chown -R hadoop:hadoop hbase
cat > /etc/profile.d/hbase.sh <<-EOF
export HBASE_HOME=/ups/app/hbase
export PATH=\${PATH}:\${HBASE_HOME}/bin
EOF
for i in 2 3; do
scp /etc/profile.d/hbase.sh pg${i}:/etc/profile.d/hbase.sh
done
在上面Hadoop HA完全分布式环境的基础上搭建Hbase集群
模块\主机名 | pg1(192.168.10.190) | pg2(192.168.10.191) | pg3(192.168.10.192) |
---|---|---|---|
HDFS | datanode | datanode | datanode |
HDFS | namenode | namenode | |
YARN | nodemanger | nodemanger | nodemanger |
YARN | resourcemanager | resourcemanager | |
HistroryServer | JobHistoryServer | ||
主备故障切换 | ZKFC (DFSZKFailoverController) | ZKFC | |
NameNodes数据同步 | journalnode | journalnode | journalnode |
zookeeper | QuorumPeerMain | QuorumPeerMain | QuorumPeerMain |
HBase | HMaster | HMaster | |
HBase | HRegionServer | HRegionServer | HRegionServer |
配置文件路径:${HBASE_HOME}/conf
vi hbase-env.sh
export JAVA_HOME=${JAVA_HOME}
export HBASE_MANAGES_ZK=false
# 命令
sed -ri -e "/^#export JAVA_HOME=*/a\export JAVA_HOME=\${JAVA_HOME}" -e "/^#export HBASE_MANAGES_ZK=true/a\export HBASE_MANAGES_ZK=false" ${HBASE_HOME}/conf/hbase-env.sh
<configuration>
<!-- 设置HRegionServers共享目录,请加上端口号 -->
<property>
<!-- 指定 hbase 在 HDFS 上存储的路径 -->
<name>hbase.rootdir</name>
<value>hdfs://hacluster/hbase</value>
</property>
<!-- 指定HMaster主机 -->
<!--
<property>
<name>hbase.master</name>
<value>hdfs://pg1:60000</value>
</property>
-->
<!-- 启用分布式模式 -->
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<!-- 指定Zookeeper集群位置 -->
<property>
<name>hbase.zookeeper.quorum</name>
<value>pg1:2181,pg2:2181,pg3:2181</value>
</property>
<!-- 指定独立Zookeeper安装路径 -->
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/ups/app/zookeeper</value>
</property>
<!-- 指定ZooKeeper集群端口 -->
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
</property>
</configuration>
cat > ${HBASE_HOME}/conf/regionservers <<-EOF
pg1
pg2
pg3
EOF
echo 'pg2'> ${HBASE_HOME}/conf/backup-masters
要把 hadoop 的 hdfs-site.xml 和 core-site.xml 放到${HBASE_HOME}/conf
cp ${HADOOP_HOME}/etc/hadoop/core-site.xml ${HADOOP_HOME}/etc/hadoop/hdfs-site.xml ${HBASE_HOME}/conf/
cd /ups/app
for host in 2 3; do
scp -r /ups/app/hbase pg${host}:/ups/app/hbase
ssh pg${host} -c "chown -R hadoop:hadoop /ups/app/hbase"
done
# 在Hadoop集群中主namenode节点执行
start-hbase.sh
# 备用主节点启动HMaster进程,作为备用HMaster
hbase-daemon.sh start master
hbase-daemon.sh start regionserver
stop-hbase.sh
http://192.168.10.190:16010
http://192.168.10.191:16010