hadoop生态搭建(3节点)-07.hive配置

时间:2021-07-07 00:27:33

http://archive.apache.org/dist/hive/hive-2.1.1/

# ==================================================================安装 hive

tar -zxvf apache-hive-2.1.1-bin.tar.gz -C /usr/local
mv /usr/local/apache-hive-2.1.1-bin /usr/local/hive-2.1.1
rm –r ~/apache-hive-2.1.1-bin.tar.gz cp ~/mysql-connector-java-5.1.46.jar /usr/local/hive-2.1.1/lib/

# 配置环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加
export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $HIVE_HOME mkdir -p $HIVE_HOME/data/hive
mkdir -p $HIVE_HOME/data/hive/operaion_logs
mkdir -p $HIVE_HOME/data/resources # 配置 hive
cp $HIVE_HOME/conf/hive-env.sh.template $HIVE_HOME/conf/hive-env.sh
cp $HIVE_HOME/conf/hive-default.xml.template $HIVE_HOME/conf/hive-site.xml
cp $HIVE_HOME/conf/hive-exec-log4j2.properties.template $HIVE_HOME/conf/hive-exec-log4j2.properties
cp $HIVE_HOME/conf/hive-log4j2.properties.template $HIVE_HOME/conf/hive-log4j2.properties
# ${system:java.io.tmpdir}/${system:user.name} 替换为本机路径 /usr/local/hive-2.1.1/data/hive
# ${system:java.io.tmpdir}/${hive.session.id}_resources替换为本机路径 /usr/local/hive-2.1.1/data/resources
# ${system:java.io.tmpdir}/${system:user.name}/operation_logs 替换为本机路径 /usr/local/hive-2.1.1/data/hive/operation_logs

vi $HIVE_HOME/conf/hive-site.xml

# esc 后输入
:%s#${system:java.io.tmpdir}/${system:user.name}#/usr/local/hive-2.1.1/data/hive#

:%s#${system:java.io.tmpdir}/${hive.session.id}_resources#/usr/local/hive-2.1.1/data/resources#

# esc / 输入 hive.exec.scratchdir 找到后<value> 点击 Insert 键后进行修改

<property>
<name>hive.exec.scratchdir</name>
<value>/hive/tmp</value>
</property> <property>
<name>hive.metastore.warehouse.dir</name>
<value>/hive/warehouse</value>
</property> <!-- 通过jdbc协议连接mysql的hive库 -->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://node1:3306/hive?createDatabaseIfNotExist=true&amp;useSSL=false</value>
</property> <!-- jdbc的mysql驱动 -->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property> <!-- mysql用户名 -->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
</property> <!-- mysql用户密码 -->
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>Hive-123</value>
</property> <!--hiveserver2的HA-->
<property>
<name>hive.server2.support.dynamic.service.discovery</name>
<value>true</value>
</property> <property>
<name>hive.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property> <!-- hive的web页面暂时不配置 -->
<property>
<name>hive.hwi.war.file</name>
<value>/usr/local/hive-2.1.1/lib/hive-hwi-2.1.1.jar</value>
</property>
# 从hbase/lib下复制必要jar包到hive/lib下
cp $HBASE_HOME/lib/hbase-client-1.2.4.jar $HBASE_HOME/lib/hbase-common-1.2.4.jar $HIVE_HOME/lib # 同步hive和hadoop的jline版本
cp $HIVE_HOME/lib/jline-2.12.jar $HADOOP_HOME/share/hadoop/yarn/lib # 查看版本
# cd $HADOOP_HOME/share/hadoop/yarn/lib
# find ./ -name "*jline*jar" # 删除低版本的jline 0.9
# rm jline-0.9.94.jar # 复制jdk的tools.jar到hive/lib下
cp $JAVA_HOME/lib/tools.jar $HIVE_HOME/lib # rm -f $HIVE_HOME/lib/log4j-slf4j-impl-2.4.1.jar vi $HIVE_HOME/conf/hive-env.sh HADOOP_HOME=/usr/local/hadoop-2.7.6
export HIVE_HOME=/usr/local/hive-2.1.1
export HIVE_CONF_DIR=/usr/local/hive-2.1.1/conf
export HIVE_AUX_JARS_PATH=/usr/local/hive-2.1.1/lib

# 如果hadoop之前没有配置hadoop.proxyuser.root.groups需配置

# ==================================================================node1 node2 node3
# 如果没有权限,在Hadoop的core-site.xml中增加配置:
vi $HADOOP_HOME/etc/hadoop/core-site.xml <property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property> # ==================================================================node1
# 使用超级用户刷新配置
yarn rmadmin -refreshSuperUserGroupsConfiguration
hdfs dfsadmin -refreshSuperUserGroupsConfiguration # ==================================================================node1 node2
# 如果是对namenode做过HA,则需要在主备namenode上执行
hdfs dfsadmin -fs hdfs://appcluster -refreshSuperUserGroupsConfiguration

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $HIVE_HOME

# ==================================================================node1

$HIVE_HOME/bin/schematool -initSchema -dbType mysql

scp -r $HIVE_HOME node2:/usr/local/
scp -r $HIVE_HOME node3:/usr/local/

# 启动

# ==================================================================node1 node2 node3
# 启动 zookeeper
zkServer.sh start
zkServer.sh status # ==================================================================node1
# 启动hadoop所有进程
$HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node1
# 要实现Hbase的HA(High Availablity)
$HBASE_HOME/bin/hbase-daemon.sh start master # 启动Hbase(start-hbase.sh)
$HBASE_HOME/bin/start-hbase.sh # ==================================================================node2
# 开启Hbase的HA
$HBASE_HOME/bin/hbase-daemon.sh start master # ==================================================================node1
$HIVE_HOME/bin/hiveserver2 # ==================================================================node2
$HIVE_HOME/bin/hiveserver2 # ==================================================================node1
zkCli.sh ls /hiveserver2 get /hiveserver2/serverUri=node1:10000;version=2.1.1;sequence=0000000000 $HIVE_HOME/bin/beeline -u "jdbc:hive2://node1:2181,node2:2181,node3:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2" root 123456 # $HIVE_HOME/bin/beeline
# > !connect jdbc:hive2://node1:2181,node2:2181,node3:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2 root "123456" > create external table user_info(user_id int comment 'userID',user_name string comment 'userName')row format delimited fields terminated by '\t' lines terminated by '\n'; > show tables; mkdir /root/hive vi /root/hive/user_info.txt 1001 zhangsan
1002 lisi
1003 wangwu > load data local inpath '/root/hive/user_info.txt' into table user_info; > select * from user_info; > quit; hdfs dfs -ls / hdfs dfs -ls /hive/warehouse hdfs dfs -cat /hive/warehouse/user_info/user_info.txt hadoop fs -mkdir /hive_input_data vi /root/hive/user_info.txt 1001 zhangsan
1002 lisi
1003 wangwu
1004 liuliu
1005 qiqi hadoop fs -put /root/hive/user_info.txt /hive_input_data hdfs dfs -ls /hive_input_data # hdfs -dfs -chmod 777 /hive_input_data > select * from user_info; > load data inpath '/hive_input_data/user_info.txt' overwrite into table user_info; > select * from user_info; # ==================================================================node1
# stop已经启动的进程
$HBASE_HOME/bin/stop-hbase.sh $HADOOP_HOME/sbin/stop-all.sh # ==================================================================node1 node2 node3
# 停止 zookeeper
zkServer.sh stop # ==================================================================node1
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc shutdown -h now
# 快照 hive_hiveserver2集群

hadoop生态搭建(3节点)-07.hive配置的更多相关文章

  1. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  2. hadoop生态搭建(3节点)-08&period;kafka配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  3. hadoop生态搭建(3节点)-04&period;hadoop配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  4. hadoop生态搭建(3节点)-09&period;flume配置

    # http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...

  5. hadoop生态搭建(3节点)-10&period;spark配置

    # https://www.scala-lang.org/download/2.12.4.html# ================================================= ...

  6. hadoop生态搭建(3节点)-13&period;mongodb配置

    # 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...

  7. hadoop生态搭建(3节点)-15&period;Nginx&lowbar;Keepalived&lowbar;Tomcat配置

    # Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...

  8. hadoop生态搭建(3节点)-17&period;sqoop配置&lowbar;单节点

    # ==================================================================安装 sqoop tar -zxvf ~/sqoop-1.4.7 ...

  9. hadoop生态搭建(3节点)-03&period;zookeeper配置

    # https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html # ===== ...

随机推荐

  1. jQuery使用&period;on&lpar;&rpar;无法绑定hover

    发现好像没有hover这个事件,jQuery的hover事件是一个封装,hover算不得一个事件.他只是将mouseover和mouseout合并了用mouseover和mouseout两个配合效果好 ...

  2. C语言知识整理(2):volatile与register

    1.volatile volatile是易变的,不稳定的意思,volatile是关键字,是一种类型修饰符,用它修饰的变量表示可以被某些编译器未知的因素更改,比如操作系统.硬件或者其他线程等,遇到这个关 ...

  3. Javacript中&lpar;function&lpar;&rpar;&lbrace;&rcub;&rpar;&lpar;&rpar; 与 &lpar;function&lpar;&rpar;&lbrace;&rcub;&lpar;&rpar;&rpar; 区别 &lbrace;转&rcub;

    这个问题可以从不同的角度来看,但从结果上来说 :他们是一样的.首先,如果从AST(抽象语法树)的角度来看,两者的AST是一模一样的,最终结果都是一次函数调用.因此,就解析器产生的结果论而言,两者是没有 ...

  4. Tomcat J2ee 发布步骤

    1.找到要发布的工程,并发布到本地tomcat下,测试完全没有问题,找到tomcat下webapps下 并找到该工程,进入该工程目录,全选添加到  drivingSchool.zip 或  drivi ...

  5. JAVA常见异常集锦(持续更新)

    No1:Nested in org.springframework.beans.factory.parsing.BeanDefinitionParsingException 2013-07-02 10 ...

  6. vue&period;js事件,属性,以及交互

    这是我学习vue的第二天,今天主要学习了如何利用vue阻止事件冒泡,阻止事件的默认行为,键盘事件以及如何添加class.style这些属性,以及如何利用vue来进行数据交互,利用百度的一个API来写一 ...

  7. HDFS High Availability Using the Quorum Journal Manager

    http://hadoop.apache.org/docs/r2.9.0/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.htm ...

  8. JavaBasic&lowbar;11

    Object默认的实现是比较对象的地址 Object默认的实现是比较对象的地址局部内部类 局部位置内部类:局部是指方法体中 1.可以直接访问外部类的成员(这个特征是所有内部类所共有) 2.可以创建内部 ...

  9. OwnCloud 开源网盘

    https://www.getnas.com/freenas-owncloud/ FreeNAS 插件:OwnCloud 开源网盘 ownCloud 分为服务器端和客户端两个部分,服务器端可以在 Fr ...

  10. JS-Object&lpar;3&rpar; 继承&lpar;prototype方式, 类方式&rpar;; javascript6的知识(部分)

    原型方式的继承 创建child object classes(constructors) , 子类如何从父类中继承特性. 原型链继承prototypal inheritance (ruby中的继承也是 ...