1.5 安装Hadoop
1.5.1 上传、解压
hadoop安装文件:hadoop335
# 解压缩
[root@192 ~]# tar -zxvf hadoop-3.3.5.tar.gz
# 重定名
[root@192 ~]# mv hadoop-3.3.5 hadoop3
# 删除安装文件
[root@192 ~]# rm -f hadoop-3.3.5.tar.gz
1.5.2 修改设置文件
修改core-site.xml
[root@192 ~]# vi /root/hadoop3/etc/hadoop/core-site.xml
在<configuration> </configuration>中添加如下代码
<property>
<name>fs.defaultFS</name>
<value>hdfs://127.0.0.1:9000</value> //如果其他主机也要访问,那就将127.0.0.1:9000改为:你的网卡IP:9000,或者改为0.0.0.0:9000
</property>
<property>
<name>hadoop.tmp.dir</name>
<!-- 自定义 hadoop 的工作目录 -->
<value>/root/hadoop3/data</value>
</property>
<property>
<name>hadoop.native.lib</name>
<!-- 禁用Hadoop的当地库 -->
<value>false</value>
</property>
修改yarn-site.xml
[root@192 ~]# vi /root/hadoop3/etc/hadoop/yarn-site.xml
在<configuration> </configuration>中添加如下代码
<property>
<name>yarn.resourcemanager.hostname</name>
<value>127.0.0.1</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<!-- yarn web 页面 -->
<value>0.0.0.0:8088</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<!-- reducer获取数据的方式 -->
<value>mapreduce_shuffle</value>
</property>
修改hdfs-site.xml
vi hadoop3/etc/hadoop/hdfs-site.xml
在<configuration> </configuration>中添加如下代码
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
修改mapred-site.xml
vi hadoop3/etc/hadoop/mapred-site.xml
在<configuration> </configuration>中添加如下代码
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
修改hadoop-env.sh
vi hadoop3/etc/hadoop/hadoop-env.sh
在文件末尾添加以下代码:
# 将当前用户 root 赋给下面这些变量
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root
# JAVA的绝对路径,可以使用whereis java查看
export JAVA_HOME=/root/jdk8
# Hadoop的安装路径下的etc/hadoop的绝对路径
export HADOOP_CONF_DIR=/root/hadoop3/etc/hadoop
设置 Hadoop 环境变量
修改profile文件
vi /etc/profile
文件结尾添加以下代码:
HADOOP_HOME=/root/hadoop3
PATH=$PATH HADOOP_HOME/bin HADOOP_HOME/sbin
# 运行设置文件
[root@192 ~]# source /etc/profile
# 检查 PATH 中是否包罗 $HADOOP_HOME/bin HADOOP_HOME/sbin 对应的目录
[root@192 ~]# echo $PATH
/root/jdk8/bin:/root/jdk8/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/hadoop3/bin:/root/hadoop3/sbin
修改主机名
[root@192 ~]# hostname
192.168.126.130
# 修改主机名
[root@192 ~]# hostnamectl set-hostname hiel
[root@192 ~]# hostname
hiel
# 编辑hosts文件
[root@192 ~]# vi /etc/hosts
编辑hosts文件,注释第一行,添加第二行:
#127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
127.0.0.1 hiel
重启系统
ssh免密登录
# 用户名@IP
[root@hiel ~]# ssh root@127.0.0.1
The authenticity of host '127.0.0.1 (127.0.0.1)' can't be established.
ECDSA key fingerprint is SHA256:0Qxn8DYmmVeTx8uKS0xkyi+4zFhd79p0J4hfn8K1MNs.
ECDSA key fingerprint is MD5:33:07:8f:43:1c:65:fd:70:96:9a:3e:cf:60:45:9f:1d.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '127.0.0.1' (ECDSA) to the list of known hosts.
# 输入暗码
root@127.0.0.1's password:
Last login: Thu Nov 14 18:18:48 2024 from 192.168.126.1
# 退出
[root@hiel ~]# exit
登出
Connection to 127.0.0.1 closed.
# 免密登录设置
[root@hiel ~]# ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
Generating public/private rsa key pair.
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256 fqAFUsF3rqBF1jSAsYa0vo0AeNMuVTN9BqRzjbMMYE root@hiel
The key's randomart image is:
+---[RSA 2048]----+
|o==o*=O+o |
|*=+E.#+= |
|o*..BoO.o |
|o.o Oo= . |
| o .o.= S |
| . . o |
| . |
| |
| |
+----[SHA256]-----+
[root@hiel ~]# cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[root@hiel ~]# chmod 0600 ~/.ssh/authorized_keys
# 验证
[root@hiel ~]# ssh root@127.0.0.1
Last login: Thu Nov 14 18:45:17 2024 from 192.168.126.1
# 退出
[root@hiel ~]# exit
登出
Connection to 127.0.0.1 closed.
免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。 |