centos hadoop

时间:2017-09-30 16:41:19 阅读:556次
centos hadoop

1.禁用ipv6

vi /etc/sysctl.conf

net.ipv6.conf.all.disable_ipv6=1

net.ipv6.conf.default.disable_ipv6=1

 

sysctl -p

2. 修改主机名

hostnamectl set-hostname master.hadoop

 

Vi /etc/hosts

本机IP master.hadoop

3. 关闭防火墙  

systemctl status firewalld

4. 关闭selinux

setenforce 0 临时关闭

vi /etc/selinux/config

SELINUX=disabled

 

sestatus 查看状态

 

增加用户

useradd hadoop

visudo

Hadoop ALL=(ALL) NOPASSWD: ALL,!/usr/bin/passwd root

 

su - hadoop 

ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''

cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys

chmod 600 .ssh/authorized_keys

 

sudo vi .ssh/config

 

Host localhost

  StrictHostKeyChecking no

Host 0.0.0.0

  StrictHostKeyChecking no

Host *hadoop*

  StrictHostKeyChecking no

  UserKnownHostsFile /dev/null

sudo chmod 600 .ssh/config

 

5. 下载hadoop-2.7.4.tar.gz jdk-9_linux-x64_bin.rpm

rpm -ivh jdk-9_linux-x64_bin.rpm

cd /usr/local

  sudo tar xzvf hadoop-2.7.4.tar.gz

   Sudo ln -s hadoop-2.7.4 hadoop

 

Vi /etc/profile

export JAVA_HOME=/usr/java/default

export HADOOP_HOME=/usr/local/hadoop

export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH

source /etc/profile

 

 chown -R hadoop:hadoop hadoop-2.7.4/

 

Cd /usr/local/hadoop

vi etc/hadoop/hadoop-env.sh

export JAVA_HOME=/usr/java/default

 

hadoop fs -ls /

 

配置

vi core-site.xml

<?xml version="1.0" encoding="UTF-8"?>

<configuration>

        <property>

                <name>fs.defaultFS</name>

                <value>hdfs://master.hadoop:8020/</value>

        </property>

</configuration>


vi hdfs-site.xml

<configuration>

        <property>

                <name>dfs.namenode.name.dir</name>

                <value>file:///data/dfs/namenode</value>

                <description>NameNode directory for namespace and transsction logs storage</description>

        </property>

        <property>

                <name>dfs.datanode.data.dir</name>

                <value>file:///data/dfs/datanode</value>

        </property>

        <property>

                <name>dfs.namenode.checkpoint.dir</name>

                <value>file:///data/dfs/namesecondary</value>

        </property>

        <property>

                <name>dfs.replication</name>

                <value>1</value>

        </property>

</configuration>

 

mkdir -p /data/dfs/namenode /data/dfs/datanode /data/dfs/namesecondary

Sudo chown -R hadoop:hadoop /data

 

sudo vi mapred-site.xml

<?xml version="1.0" encoding="UTF-8"?>

<configuration>

        <property>

                <name>mapreduce.framework.name</name>

                <value>yarn</value>

        </property>

         <property>

                <name>mapreduce.jobhistory.address</name>

                <value>master.hadoop:10020</value>

        </property>

         <property>

                <name>mapreduce.jobhistory.webapp.address</name>

                <value>master.hadoop:19888</value>

        </property>

</configuration>

 

sudo vi yarn-site.xml

<configuration>

        <property>

                <name>yarn.nodemanager.aux-services</name>

                <value>mapreduce_shuffle</value>

        </property>

        <property>

<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>

                <value>org.apache.hadoop.mapred.ShuffleHandler</value>

        </property>

        <property>

                <name>yarn.resourcemanager.hostname</name>

                <value>master.hadoop</value>

        </property>

</configuration>

 

 

启动服务:

hdfs namenode -format 格式化-元数据

start-dfs.sh

start-yarn.sh

 

 

hadoop fs -mkdir -p /user/hadoop/input

hadoop fs -put ./input/test.txt input/

hadoop fs -ls

测试 wordcount

hadoop jar /usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.4.jar wordcount input out

 

hadoop fs -ls out/

hadoop fs -cat out/part-r-00000

 

http://本机IP:50070

 

 

配置集群,使用虚拟机复制master并在其他三台机器分别配置

hostnamectl set-hostname slave1.hadoop

hostnamectl set-hostname slave2.hadoop

hostnamectl set-hostname slave3.hadoop

 

vi /etc/hosts

192.168.0.172 master.hadoop

192.168.0.101 slave1.hadoop

192.168.0.102 slave2.hadoop

192.168.0.103 slave3.hadoop

 

cd /usr/local/hadoop/etc/hadoop/

vi hdfs-site.xml

修改dfs.replication 3

 

vi slaves

slave1.hadoop

slave2.hadoop

slave3.hadoop

 

rm -rf /data/dfs/datanode/current

 

master启动服务

Start-dfs.sh

Start-yarn.sh


扫描二维码关注程序员爱笔记,接收更多资讯

评论

快速评论