1.安装插件
安装jdk/ant/libtool/protobuf/maven/zlib-devel/openssl-devel/ncurses-devel、gcc、snappy、bzip
#set java
export JAVA_HOME=/usr/app/jdk1.8.0
export JRE_HOME=/usr/app/jdk1.8.0/jre
export CLASS_PATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
export JAVA_HOME JRE_HOME CLASS_PATH PATH #set maven
export M2_HOME=/usr/app/maven
export PATH=$PATH:$M2_HOME/bin #set ant
export ANT_HOME=/usr/app/ant
export PATH=$PATH:$ANT_HOME/bin #set hadoop
export HADOOP_HOME=/usr/app/hadoop
export PATH=$PATH:$HADOOP_HOME/bin 2.配置etc/hosts如 192.168.66.66 xinfang 3.配置ssh无密登陆
配置前,先安装 SSH 服务。
ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
#把 id_rsa.pub 追加到授权的 key 里面去、
chmod 600 ~/.ssh/authorized_keys #授权
vim /etc/ssh/sshd_config
RSAAuthentication yes # 启用 RSA 认证
PubkeyAuthentication yes # 启用公钥私钥配对认证方式
AuthorizedKeysFile .ssh/authorized_keys # 公钥文件路径(和上面生成
的文件同)
service sshd restart#重启 SSHD 服务
验证 ssh,# ssh ip 或 ssh localhost 4.源码编译hadoop 64bit
#mkdir /usr/app -p
#cd /usr/app
#wget http://mirror.bit.edu.cn/apache/hadoop/core/hadoop-2.7.2/hadoop-2.7.2-src.tar.gz
#tar zxvf hadoop-2.7.2-src.tar.gz
#cd hadoop-2.7.2-src
#mvn package -Pdist,native -DskipTests -Dtar -Drequire.snappy
#保持联网状态,大约50多分钟(中途失败,查找原因,重新执行上一句命令)
#编译完成后,cd hadoop-dist/target将hadoop-2.7.2.tar.gz拷贝出来解压
#新建tmp/name/data目录 mkdir /usr/app/hadoop -p mkdir /usr/app/hadoop/hdfs/name -p mkdir /usr/app/hadoop/hdfs/data
#配置hadoop/etc/hadoop/六个配置文件
#4.1 core-site.xml
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/app/hadoop/tmp</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://xinfang:9000</value>
</property>
<property>
<name>io.compression.codecs</name>
<value>org.apache.hadoop.io.compress.GzipCodec,
org.apache.hadoop.io.compress.DefaultCodec,
org.apache.hadoop.io.compress.BZip2Codec,
org.apache.hadoop.io.compress.SnappyCodec
</value>
</property>
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
</configuration> #4.2 hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/app/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/app/hadoop/hdfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration> #4.3 mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.map.output.compress</name>
<value>true</value>
</property>
<property>
<name>mapreduce.map.output.compress.codec</name>
<value>org.apache.hadoop.io.compress.SnappyCodec</value>
</property>
</configuration> #4.4 yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration> #4.5 hadoop-env.sh
export JAVA_HOME=/usr/app/jdk1.8.0
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HADOOP_HOME/lib/native/
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_HOME/lib/native" #4.6 yarn-env.sh
export JAVA_HOME=/usr/app/jdk1.8.0 5.启动hadoop
#ssh 192.168.66.66
#hdfs namenode -format(不可多次格式化,容易造成datanode与namenode的集群ID不一致)
#sh /usr/app/hadoop/sbin/start-all.sh
#jps查看守护进程
NodeManager
SecondaryNameNode
DataNode
ResourceManager
NameNode
6.web访问
http://192.168.66.66:50070/ - Hadoop DFS 状态
http://192.168.66.66:8088/ -YARN 的 ResourceManager --集群