1.解壓
(1)將hadoop壓縮包復(fù)制到/opt/software路徑下
(2)解壓hadoop到/opt/module目錄下
[root@kb135 software]# tar -zxvf hadoop-3.1.3.tar.gz -C /opt/module/
(3)修改hadoop屬主和屬組
[root@kb135 module]# chown -R root:root ./hadoop-3.1.3/
2.配置環(huán)境變量
[root@kb135 module]# vim /etc/profile
# HADOOP_HOME
export HADOOP_HOME=/opt/soft/hadoop313
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/lib
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export HDFS_JOURNALNODE_USER=root
export HDFS_ZKFC_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export HADOOP_YARN_HOME=$HADOOP_HOME
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec
export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
修改完之后[root@kb135 module]# source /etc/profile
3.在hadoop目錄創(chuàng)建data目錄
[root@kb135 module]# cd ./hadoop-3.1.3/
創(chuàng)建目錄data
[root@kb135 hadoop-3.1.3]# mkdir ./data
4.修改配置文件
進(jìn)入/opt/module/hadoop-3.1.3/etc/hadoop目錄,查看目錄下的文件,配置幾個必要的文件
(1)配置core-site.xml
[root@kb135 hadoop]# vim ./core-site.xml
<configuration>
??? <property>
????? <name>fs.defaultFS</name>
????? <value>hdfs://kb135:9000</value>
??? </property>
??? <property>
????? <name>hadoop.tmp.dir</name>
????? <value>/opt/module/hadoop-3.1.3/data</value>
??? </property>
??? <property>
????? <name>hadoop.http.staticuser.user</name>
?? ???<value>root</value>
??? </property>
??? <property>
????? <name>io.file.buffer.size</name>
????? <value>131073</value>
??? </property>
??? <property>
????? <name>hadoop.proxyuser.root.hosts</name>
????? <value>*</value>
??? </property>
??? <property>
????? <name>hadoop.proxyuser.root.groups</name>
????? <value>*</value>
??? </property>
?</configuration>
(2)配置hadoop-env.sh
[root@kb135 hadoop]# vim ./hadoop-env.sh
修改第54行
export JAVA_HOME=/opt/module/jdk1.8.0_381
(3)配置hdfs-site.xml
[root@kb135 hadoop]# vim ./hdfs-site.xml
<configuration>
?? <property>
???? <name>dfs.replication</name>
???? <value>1</value>
?? </property>
?? <property>
???? <name>dfs.namenode.name.dir</name>
???? <value>/opt/module/hadoop-3.1.3/data/dfs/name</value>
?? </property>
?? <property>
???? <name>dfs.datanode.data.dir</name>
???? <value>/opt/module/hadoop-3.1.3/data/dfs/data</value>
?? </property>
?? <property>
???? <name>dfs.permissions.enabled</name>
???? <value>false</value>
?? </property>
</configuration>
(4)配置mapred-site.xml
[root@kb135 hadoop]# vim ./mapred-site.xml
<configuration>
??? <property>
????? <name>mapreduce.framework.name</name>
????? <value>yarn</value>
??? </property>
??? <property>
????? <name>mapreduce.jobhistory.address</name>
????? <value>kb135:10020</value>
??? </property>
??? <property>
????? <name>mapreduce.jobhistory.webapp.address</name>
????? <value>kb135:19888</value>
??? </property>
??? <property>
????? <name>mapreduce.map.memory.mb</name>
????? <value>2048</value>
??? </property>
??? <property>
????? <name>mapreduce.reduce.memory.mb</name>
????? <value>2048</value>
??? </property>
??? <property>
????? <name>mapreduce.application.classpath</name>
? ????<value>/opt/module/hadoop-3.1.3/etc/hadoop:/opt/module/hadoop-3.1.3/share/hadoop/common/*:/opt/module/hadoop-3.1.3/share/hadoop/common/lib/*:/opt/module/hadoop-3.1.3/share/hadoop/hdfs/*:/opt/module/hadoop-3.1.3/share/hadoop/hdfs/lib/*:/opt/module/hadoop-3.1.3/share/hadoop/mapreduce/*:/opt/module/hadoop-3.1.3/share/hadoop/mapreduce/lib/*:/opt/module/hadoop-3.1.3/share/hadoop/yarn/*:/opt/module/hadoop-3.1.3/share/hadoop/yarn/lib/*</value>
??? </property>
</configuration>
(5)配置yarn-site.xml
[root@kb135 hadoop]# vim ./yarn-site.xml
<configuration>
?? <property>
???? <name>yarn.resourcemanager.connect.retry-interval.ms</name>
???? <value>20000</value>
?? </property>
?? <property>
???? <name>yarn.resourcemanager.scheduler.class</name>
???? <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
?? </property>
?? <property>
???? <name>yarn.nodemanager.localizer.address</name>
???? <value>kb135:8040</value>
?? </property>
?? <property>
???? <name>yarn.nodemanager.address</name>
???? <value>kb135:8050</value>
?? </property>
?? <property>
???? <name>yarn.nodemanager.webapp.address</name>
???? <value>kb135:8042</value>
?? </property>
? <property>
???? <name>yarn.nodemanager.aux-services</name>
???? <value>mapreduce_shuffle</value>
?? </property>
?? <property>
???? <name>yarn.nodemanager.local-dirs</name>
???? <value>/opt/module/hadoop-3.1.3/yarndata/yarn</value>
?? </property>
?? <property>
???? <name>yarn.nodemanager.log-dirs</name>
???? <value>/opt/module/hadoop-3.1.3/yarndata/log</value>
?? </property>
?? <property>
???? <name>yarn.nodemanager.vmem-check-enabled</name>
???? <value>false</value>
?? </property>
</configuration>
(6)配置workers
[root@kb135 hadoop]# vim ./workers
修改為kb135
5.初始化hadoop
進(jìn)入/opt/module/hadoop-3.1.3/bin路徑
[root@kb135 bin]# hadoop namenode -format
6.設(shè)置免密登錄
[root@kb135 ~]# ssh-keygen -t rsa -P ""
[root@kb135 ~]# cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
[root@kb135 ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub -p22 root@kb135
7.啟動hadoop
[root@kb135 ~]# start-all.sh
查看進(jìn)程
[root@kb135 ~]# jps
8.測試
網(wǎng)頁中輸入網(wǎng)址:http://192.168.142.135:9870/文章來源:http://www.zghlxwxcb.cn/news/detail-669890.html
文章來源地址http://www.zghlxwxcb.cn/news/detail-669890.html
到了這里,關(guān)于centos7安裝hadoop 單機版的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!