hadoop+hive+hbase+kylin

hadoop安装


vi /etc/sysconfig/network-scripts/ifcfg-ens33

IPADDR=192.168.182.8


vi /etc/hosts

192.168.182.8 hd1
192.168.182.9 hd2
192.168.182.10 hd3


vi /etc/hostname

hd1


ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa # 生成密钥对
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys # 将公钥写入authorized_keys文件
reboot # 重启
mkdir /usr/local/hadoop/ # 创建hadoop文件夹
cd /usr/local/hadoop/ # 进入hadoop文件夹
wget https://mirror.bit.edu.cn/apache/hadoop/common/hadoop-2.9.2/hadoop-2.9.2.tar.gz # 下载hadoop
tar -zvxf hadoop-2.9.2.tar.gz # 解压到当前目录

vi /etc/profile

export JAVA_HOME=/home/fleam/jdk1.8.0_191
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$CLASSPATH
export JAVA_PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.9.2
export PATH=$PATH:${JAVA_PATH}:/home/mongodb/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin

source /etc/profile
hadoop version # 检查配置
echo $JAVA_HOME # /home/fleam/jdk1.8.0_191

vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/hadoop-env.sh
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-env.sh
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/yarn-env.sh

export JAVA_HOME=/home/fleam/jdk1.8.0_191


vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/hdfs-site.xml

<configuration>
    <!--指定hdfs中namenode的存储位置-->
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>/usr/data/hadoop/namenode</value>
    </property>
    <!--指定hdfs中datanode的存储位置-->
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>/usr/data/hadoop/datanode</value>
    </property>
    <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
    <property>
        <name>dfs.journalnode.edits.dir</name>
        <value>/usr/data/hadoop/journalnode</value>
    </property>
    <!--指定hdfs保存数据的副本数量-->
    <property>
        <name>dfs.replication</name>
        <value>3</value>
    </property>
    <property>
        <name>dfs.permissions.enabled</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.namenode.datanode.registration.ip-hostname-check</name>
        <value>false</value>
    </property>
</configuration>


mkdir -p /usr/data/hadoop/namenode
mkdir -p /usr/data/hadoop/datanode
mkdir -p /usr/data/hadoop/journalnode

vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/core-site.xml

<configuration>
        <!--指定namenode的地址-->
        <property>
                <name>fs.defaultFS</name>
            <value>hdfs://192.168.182.8:9000</value>
        </property>
        <!--用来指定使用hadoop时产生文件的存放目录-->
        <property>
                <name>hadoop.tmp.dir</name>
                <value>/usr/data/hadoop/tmp</value>
        </property>
        <!--指定zookeeper地址 (该条配置可先不设置,是后期配置hbase时做的更改,等安装好zookeeper后再设置)-->
        <!--
        <property>
                <name>ha.zookeeper.quorum</name>
                <value>192.168.182.8:2181,192.168.182.9:2181,192.168.182.10:2181</value>
        </property>
        -->
</configuration>


cp /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-site.xml.template /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-site.xml
vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/mapred-site.xml

<configuration>
    <!-- 指定mr框架为yarn方式 -->
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
</configuration>


vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/yarn-site.xml

<configuration>
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>192.168.182.8:8032</value>
    </property>

    <property>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>192.168.182.8:8030</value>
    </property>

    <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>192.168.182.8:8031</value>
    </property>

    <property>
        <name>yarn.resourcemanager.admin.address</name>
        <value>192.168.182.8:8033</value>
    </property>

    <property>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>192.168.182.8:8088</value>
    </property>

    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
    </property>

</configuration>


vi /usr/local/hadoop/hadoop-2.9.2/etc/hadoop/slaves

192.168.182.8
192.168.182.9
192.168.182.10


# &#x514B;&#x9686;&#x51FA; hd2 hd3

vi /etc/sysconfig/network-scripts/ifcfg-ens33

IPADDR=192.168.182.9
IPADDR=192.168.182.10


reboot
ssh root@192.168.182.9 # &#x6D4B;&#x8BD5;&#x514D;&#x5BC6;&#x767B;&#x5F55;
ssh root@192.168.182.10 # &#x6D4B;&#x8BD5;&#x514D;&#x5BC6;&#x767B;&#x5F55;

vi /etc/hostname

hd2
hd3


# &#x767B;&#x5F55;hd1
hadoop namenode &#x2013;format # &#x7528;root&#x8D26;&#x6237;&#x683C;&#x5F0F;&#x5316;namenode
start-dfs.sh # &#x542F;&#x52A8;hdfs
start-yarn.sh # &#x542F;&#x52A8;yarn
start-all.sh # &#x542F;&#x52A8;&#x5168;&#x90E8;

jps # hd1&#x6821;&#x9A8C;
##
3408 ResourceManager
3235 SecondaryNameNode
3996 Jps
2973 NameNode
3501 NodeManager
3070 DataNode
##

jps # hd2&#x3001;hd3&#x6821;&#x9A8C;
##
1797 Jps
1638 NodeManager
1532 DataNode
##

# &#x91CD;&#x542F;&#x683C;&#x5F0F;&#x5316; hd1&#x3001;hd2&#x3001;hd3
rm -rf /usr/data/hadoop/tmp
rm -rf /usr/data/hadoop/namenode
rm -rf /usr/data/hadoop/datanode
rm -rf /usr/data/hadoop/journalnode
rm -rf /usr/local/hadoop/hadoop-2.9.2/logs/*
mkdir -p /usr/data/hadoop/namenode
mkdir -p /usr/data/hadoop/datanode
mkdir -p /usr/data/hadoop/journalnode

# hd1
hdfs namenode -format
start-all.sh

# &#x6392;&#x9519;
stop-all.sh # &#x505C;&#x6B62;
cd /usr/local/hadoop/hadoop-2.9.2/logs # &#x65E5;&#x5FD7;

# &#x68C0;&#x67E5;&#x7AEF;&#x53E3;&#x662F;&#x5426;&#x88AB;&#x5360;&#x7528;
netstat -tunlp|grep 9000 # &#x67E5;&#x770B;&#x7AEF;&#x53E3;&#x5360;&#x7528;
lsof -i:9000 # &#x67E5;&#x770B;9000&#x7AEF;&#x53E3;&#x8FDB;&#x7A0B;
ps -ef | grep clickhouse # clickhouse&#x5F00;&#x4E86;9000&#x7AEF;&#x53E3;

zookeeper安装

# &#x6240;&#x6709;&#x8282;&#x70B9;
wget http://archive.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz
tar -zxvf zookeeper-3.4.14.tar.gz
mv zookeeper-3.4.14 /home/bigData

vi /etc/profile

export ZOOKEEPER_HOME=/home/bigData/zookeeper-3.4.14
export PATH=$PATH:${JAVA_PATH}:/home/mongodb/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZOOKEEPER_HOME/bin:$ZOOKEEPER_HOME/conf

source /etc/profile

cp /home/bigData/zookeeper-3.4.14/conf/zoo_sample.cfg /home/bigData/zookeeper-3.4.14/conf/zoo.cfg
vim /home/bigData/zookeeper-3.4.14/conf/zoo.cfg

dataDir=/usr/data/zookeeper/data
dataLogDir=/usr/data/zookeeper/logs
#zookeeper&#x96C6;&#x7FA4;&#x7684;&#x8282;&#x70B9;&#xFF0C;&#x6DFB;&#x52A0;&#x5230;&#x672B;&#x5C3E;
server.1=192.168.182.8:2888:3888
server.2=192.168.182.9:2888:3888
server.3=192.168.182.10:2888:3888


mkdir -p /usr/data/zookeeper/data
mkdir -p /usr/data/zookeeper/logs
cd /usr/data/zookeeper/data
touch myid
vi myid

1
2
3


zkServer.sh start
zkServer.sh status

hbase安装

# hd1
wget https://mirror.bit.edu.cn/apache/hbase/1.4.13/hbase-1.4.13-bin.tar.gz
tar -zxvf hbase-1.4.13-bin.tar.gz
yum install -y ntpdate # &#x6240;&#x6709;&#x8282;&#x70B9;
mkdir /usr/local/hbase
mv hbase-1.4.13 /usr/local/hbase/ # /usr/local/hbase/hbase-1.4.13

vi /etc/profile

export HBASE_HOME=/usr/local/hbase/hbase-1.4.13
:$HBASE_HOME/bin

source /etc/profile

vi /usr/local/hbase/hbase-1.4.13/conf/hbase-env.sh

export JAVA_HOME=/home/fleam/jdk1.8.0_191
export HBASE_MANAGES_ZK=false  #&#x7531;HBase&#x8D1F;&#x8D23;&#x542F;&#x52A8;&#x548C;&#x5173;&#x95ED;Zookeeper
export HBASE_CLASSPATH=$HBASE_CLASSPATH:/usr/local/hbase/hbase-1.4.13/conf:/usr/local/hbase/hbase-1.4.13/lib:/usr/local/hadoop/hadoop-2.9.2/etc/hadoop/
# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m -XX:ReservedCodeCacheSize=256m" # &#x9700;&#x8981;&#x6CE8;&#x91CA;&#x6389;
# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m -XX:ReservedCodeCacheSize=256m" # &#x9700;&#x8981;&#x6CE8;&#x91CA;&#x6389;


vi /usr/local/hbase/hbase-1.4.13/conf/hbase-site.xml

<configuration>
    <property>
        <name>hbase.rootdir</name>
        <value>hdfs://192.168.182.8:8020/hbase</value>
    </property>
    <property>
        <name>hbase.cluster.distributed</name>
        <value>true</value>
    </property>
    <property>
        <name>hbase.tmp.dir</name>
        <value>/usr/data/hbase/tmp</value>
    </property>
    <property>
        <name>hbase.zookeeper.quorum</name>
        <value>192.168.182.8,192.168.182.9,192.168.182.10</value>
    </property>
    <property>
        <name>hbase.zookeeper.property.clientPort</name>
        <value>2181</value>
    </property>
</configuration>


mkdir -p /usr/data/hbase/tmp

vi /usr/local/hbase/hbase-1.4.13/conf/regionservers

192.168.182.8
192.168.182.9
192.168.182.10


scp -r /usr/local/hbase/hbase-1.4.13/ 192.168.182.9:/usr/local/hbase/hbase-1.4.13/
scp -r /usr/local/hbase/hbase-1.4.13/ 192.168.182.10:/usr/local/hbase/hbase-1.4.13/

# &#x5176;&#x4F59;&#x8282;&#x70B9;
vi /etc/profile

export HBASE_HOME=/usr/local/hbase/hbase-1.4.13
:$HBASE_HOME/bin

source /etc/profile

# &#x542F;&#x52A8;&#x6240;&#x6709;&#x8282;&#x70B9;
rm -rf /usr/data/hadoop/tmp
rm -rf /usr/data/hadoop/namenode
rm -rf /usr/data/hadoop/datanode
rm -rf /usr/data/hadoop/journalnode
rm -rf /usr/local/hadoop/hadoop-2.9.2/logs/*
rm -rf /usr/local/hbase/hbase-1.4.13/logs/*
rm -rf /usr/data/hbase/tmp
mkdir -p /usr/data/hadoop/namenode
mkdir -p /usr/data/hadoop/datanode
mkdir -p /usr/data/hadoop/journalnode
mkdir -p /usr/data/hbase/tmp
zkServer.sh start
zkServer.sh status

# &#x4E3B;&#x8282;&#x70B9;
zkCli.sh -server hd1
rmr /hbase
hdfs namenode -format
start-all.sh
start-hbase.sh
curl http://192.168.182.8:16010

mysql安装

rpm -qa|grep mariadb
rpm -e mariadb-libs-5.5.65-1.el7.x86_64 --nodeps
wget http://dev.mysql.com/get/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
yum install mysql-community-server
systemctl restart mysqld.service
mysql -u root
set password for 'root'@'localhost' =password('root');

hive安装

wget https://mirrors.tuna.tsinghua.edu.cn/apache/hive/hive-2.3.7/apache-hive-2.3.7-bin.tar.gz
tar -zxvf apache-hive-2.3.7-bin.tar.gz
mkdir -p /usr/local/hive/
mv apache-hive-2.3.7-bin  /usr/local/hive/
vi /etc/profile

export HIVE_HOME=/usr/local/hive/apache-hive-2.3.7-bin
export PATH=$PATH:$HIVE_HOME/bin


source /etc/profile
hive --version
cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-env.sh.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-env.sh
vi /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-env.sh

HADOOP_HOME=/usr/local/hadoop/hadoop-2.9.2    #hadoop&#x8DEF;&#x5F84;
export HIVE_CONF_DIR=/usr/local/hive/apache-hive-2.3.7-bin/conf    #hive&#x7684;conf&#x8DEF;&#x5F84;
export HIVE_AUX_JARS_PATH=/usr/local/hive/apache-hive-2.3.7-bin/lib    #hive&#x7684;jar&#x5305;&#x8DEF;&#x5F84;
export JAVA_HOME=/home/fleam/jdk1.8.0_191    #jdk&#x5B89;&#x88C5;&#x8DEF;&#x5F84;


cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-default.xml.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-site.xml
vi /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-site.xml

    <property>
        <name>hive.metastore.warehouse.dir</name>
        <value>/opt/hive/warehouse</value>
    </property>
    <property>
        <name>hive.metastore.local</name>
        <value>true</value>
    </property>
    <!-- 元数据库的链接地址 mysql -->
    <!-- 如果是远程mysql数据库的话需要在这里写入远程的IP或hosts -->
    <!--配置mysql连接,如果没有hive_db库则新建-->
    <property>
        <name>javax.jdo.option.ConnectionURL</name>
        <value>jdbc:mysql://localhost:3306/hive_db?createDatabaseIfNotExist=true</value>    //&#x6570;&#x636E;&#x5E93;&#x6240;&#x5728;&#x4E3B;&#x673A;&#x7684;IP
    </property>
    <!--配置jdbc驱动-->
    <property>
        <name>javax.jdo.option.ConnectionDriverName</name>
        <value>com.mysql.jdbc.Driver</value>
    </property>
    <!--mysql用户名root-->
    <property>
        <name>javax.jdo.option.ConnectionUserName</name>
        <value>root</value>
    </property>
    <!--配置mysql密码-->
    <property>
        <name>javax.jdo.option.ConnectionPassword</name>
        <value>root</value>
    </property>

# &#x51E1;&#x6709;derby&#x7686;&#x6CE8;&#x91CA;
# hive.querylog.location => /usr/hive/tmp/root
# hive.server2.logging.operation.log.location => /home/hive/root/operation_logs
# hive.exec.local.scratchdir => /home/hive/root
# hive.downloaded.resources.dir => /home/hive/${hive.session.id}_resources
mkdir -p /usr/hive/tmp/root
mkdir -p /home/hive/root/operation_logs

cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-exec-log4j2.properties.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-exec-log4j2.properties
cp /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-log4j2.properties.template /usr/local/hive/apache-hive-2.3.7-bin/conf/hive-log4j2.properties

wget https://cdn.mysql.com/archives/mysql-connector-java-5.1/mysql-connector-java-5.1.48.tar.gz
tar mysql-connector-java-5.1.48.tar.gz
cp /home/mysql-connector-java-5.1.48/mysql-connector-java-5.1.48-bin.jar /usr/local/hive/apache-hive-2.3.7-bin/lib

zkServer.sh start
zkServer.sh status
hadoop-daemon.sh start journalnode
start-all.sh
start-hbase.sh

schematool -initSchema -dbType mysql
##
schemaTool completed
##
hive

kylin安装

wget https://mirror.bit.edu.cn/apache/kylin/apache-kylin-3.1.1/apache-kylin-3.1.1-bin-hbase1x.tar.gz
mkdir -p /usr/local/kylin/
tar -zxvf apache-kylin-3.1.1-bin-hbase1x.tar.gz -C /usr/local/kylin/
vi /etc/profile

export KYLIN_HOME=/usr/local/kylin/apache-kylin-3.1.1-bin-hbase1x
export PATH=$PATH:$KYLIN_HOME/bin

source /etc/profile
sh $KYLIN_HOME/bin/check-env.sh

1&#x3001;&#x6240;&#x6709;&#x8282;&#x70B9;
zkServer.sh start
zkServer.sh status
hadoop-daemon.sh start journalnode
2&#x3001;&#x4E3B;&#x8282;&#x70B9;
start-all.sh
start-hbase.sh
nohup hive --service metastore &
nohup hive --service hiveserver2 &
mr-jobhistory-daemon.sh start historyserver
kylin.sh start

http://192.168.182.8:7070/kylin
&#x9ED8;&#x8BA4;&#x7528;&#x6237;&#x540D;&#xFF1A;ADMIN
&#x9ED8;&#x8BA4;&#x5BC6;&#x7801;&#xFF1A;KYLIN

hdfs

hadoop fs -ls /
hadoop dfs -mkdir /input
hadoop fs -put  1.txt   /input

hive

beeline
!connect jdbc:hive2://127.0.0.1:10000

hbase

.\hbase shell
list  --&#x67E5;&#x770B;&#x8BE5;&#x7528;&#x6237;&#x4E0B;&#x7684;&#x6240;&#x6709;&#x8868;&#x683C;
scan table --&#x67E5;&#x8BE2;&#x6240;&#x6709;&#x7684;&#x6570;&#x636E;
disable table --&#x7981;&#x7528;
drop table --&#x5220;&#x9664;

Original: https://www.cnblogs.com/hellowzd/p/13891599.html
Author: fleam
Title: hadoop+hive+hbase+kylin

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/6811/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

发表回复

登录后才能评论
免费咨询
免费咨询
扫码关注
扫码关注
联系站长

站长Johngo!

大数据和算法重度研究者!

持续产出大数据、算法、LeetCode干货,以及业界好资源!

2022012703491714

微信来撩,免费咨询:xiaozhu_tec

分享本页
返回顶部