Hadoop安装

/etc/profile

export JAVA_HOME=/opt/java/jdk-11.0.5/
export CLASSPATH=$JAVA_HOME/lib
export PATH=$PATH:$JAVA_HOME/bin

/home/hadoop/.bashrc

export JAVA_HOME=/opt/java/jdk-11.0.5/
export CLASSPATH=$JAVA_HOME/lib
export PATH=$PATH:$JAVA_HOME/bin

#HADOOP VARIABLES START 
export HADOOP_INSTALL=/home/bigdata/hadoop/
export PATH=$PATH:$HADOOP_INSTALL/bin
export PATH=$PATH:$HADOOP_INSTALL/sbin
export HADOOP_MAPRED_HOME=$HADOOP_INSTALL 
export HADOOP_COMMON_HOME=$HADOOP_INSTALL 
export HADOOP_HDFS_HOME=$HADOOP_INSTALL 
export YARN_HOME=$HADOOP_INSTALL 
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_INSTALL/lib"
#HADOOP VARIABLES END 

/etc/hadoop/hadoop-env.sh

export JAVA_HOME=/opt/java/jdk-11.0.5/
export HADOOP=/home/bigdata/hadoop/
export PATH=$PATH:/usr/local/hadoop/bin

/etc/hadoop/yarn-env.sh

JAVA_HOME=/opt/java/jdk-11.0.5/

/etc/hadoop/core-site.xml

<property>
     <name>hadoop.tmp.dir</name>
     <value>file:/home/bigdata/hadoopTmp</value>
     <description>Abase for other temporary directories.</description>
</property>
<property>
     <name>fs.defaultFS</name>
     <value>hdfs://localhost:9000</value>
</property>

/etc/hadoop/hdfs-site.xml

<property>
     <name>dfs.replication</name>
     <value>1</value>
</property>
<property>
     <name>dfs.namenode.name.dir</name>
     <value>file:/home/bigdata/hadoopTmp/dfs/name</value>
</property>
<property>
     <name>dfs.datanode.data.dir</name>
     <value>file:/home/bigdata/hadoopTmp/dfs/data</value>
</property>

/etc/hadoop/yarn-site.xml

<property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
</property>
<property>
        <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
        <name>yarn.resourcemanager.address</name>
        <value>127.0.0.1:8032</value>
</property>
<property>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>127.0.0.1:8030</value>
</property>
<property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>127.0.0.1:8031</value>
</property>

$hdfs namenode -format //文件系统初始化,后续不用多次初始化

$start-dfs.sh //启动HDFS(单机)

$start-all.sh //启动HDFS(集群)

$jps //查看进程

##### issue

== 采用root启动 在/etc/profile配置如下:

export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root

相关推荐

  1. Hadoop安装

    2023-12-07 05:34:03       29 阅读
  2. Hadoop - 安装

    2023-12-07 05:34:03       18 阅读
  3. Hadoop3的安装

    2023-12-07 05:34:03       18 阅读
  4. Hadoop 伪分布式安装

    2023-12-07 05:34:03       11 阅读

最近更新

  1. TCP协议是安全的吗?

    2023-12-07 05:34:03       18 阅读
  2. 阿里云服务器执行yum,一直下载docker-ce-stable失败

    2023-12-07 05:34:03       19 阅读
  3. 【Python教程】压缩PDF文件大小

    2023-12-07 05:34:03       18 阅读
  4. 通过文章id递归查询所有评论(xml)

    2023-12-07 05:34:03       20 阅读

热门阅读

  1. 解决 video.js ios 播放一会行一会不行

    2023-12-07 05:34:03       41 阅读
  2. 微信小程序中全局变量的应用

    2023-12-07 05:34:03       38 阅读
  3. 【Android】Glide的简单使用(下)

    2023-12-07 05:34:03       36 阅读
  4. JPA构建多条件查询

    2023-12-07 05:34:03       38 阅读
  5. 浅谈深度学习中的过拟合

    2023-12-07 05:34:03       34 阅读
  6. EasyExcel下拉列表长度过长不显示【已修复】

    2023-12-07 05:34:03       45 阅读
  7. 第3节:Vue3 v-bind指令

    2023-12-07 05:34:03       37 阅读
  8. 【flink】基于flink全量同步postgres表到doris

    2023-12-07 05:34:03       38 阅读
  9. Stream流

    Stream流

    2023-12-07 05:34:03      31 阅读