<?xml version="1.0"?> <allocations> <queue name="root"> <queue name="default"> <minResources>2048 mb,4 vcores</minResources> <maxResources>2000000 mb,660 vcores</maxResources> <maxRunningApps>80</maxRunningApps> <aclSubmitApps>*</aclSubmitApps> <weight>2.0</weight> <schedulingMode>fair</schedulingMode> </queue> <queue name="queue_a"> <minResources>2048 mb,4 vcores</minResources> <maxResources>102400 mb,148 vcores</maxResources> <maxRunningApps>50</maxRunningApps> <aclSubmitApps>*</aclSubmitApps> <weight>2.0</weight> <schedulingMode>fair</schedulingMode> </queue> <queue name="queue_b"> <minResources>2048 mb,4 vcores</minResources> <maxResources>102400 mb,148 vcores</maxResources> <maxRunningApps>50</maxRunningApps> <aclSubmitApps>*</aclSubmitApps> <weight>2.0</weight> <schedulingMode>fair</schedulingMode> </queue> </queue> </allocations>
beeline -u " jdbc:hive2://10.202.77.201:10000" -n hive -p hive
set hive.execution.engine=tez;
nohup hive --service hiveserver2 &
nohup hive --service metastore &
hive -S -e "select * from xxx" --S静音模式不打印MR2的进度信息 e加载hql查询语句
hive -f test.hql --加载一个hql文件
source test.hql
for f in 'rpm -qa | grep xxx';do rpm -e --nodeps ${f} done;
磁盘空间满了,kill超时太长的job
cd hive/yarn/local1/usercache/hive/appcache
su yarn
yarn application -kill job名