Sample hdfs-site.xml for Apache Hadoop 2.7.3

This is a sample hdfs-site.xml for Apache Hadoop with essential properties. A skeleton hdfs-site.xml is also found in etc/hadoop directory after unpacking the tarball.

hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->
<configuration>

  <property>
    <name>dfs.name.dir</name>
    <value>/var/local/hadoop/hdfs/name</value>
    <final>true</final>
  </property>

   <property>
       <name>dfs.data.dir</name>
       <value>/data,data2</value>
       <final>true</final>
   </property>

  <property>
    <name>dfs.heartbeat.interval</name>
    <value>3</value>
  </property>

  <property>
    <name>dfs.datanode.address</name>
    <value>0.0.0.0:1004</value>
  </property>

  <property>
    <name>dfs.safemode.threshold.pct</name>
    <value>1.0f</value>
  </property>

 <property>
    <name>dfs.datanode.http.address</name>
    <value>0.0.0.0:1006</value>
  </property>

  <property>
    <name>dfs.http.address</name>
    <value>0.0.0.0:50070</value>
    <final>true</final>
  </property>

  <property>
    <name>dfs.datanode.ipc.address</name>
    <value>0.0.0.0:8025</value>
  </property>

<!-- Permission related properties -->
 <property>
    <name>dfs.umaskmode</name>
    <value>077</value>
  </property>

  <property>
    <name>dfs.permissions</name> 
    <value>true</value>
  </property>

  <property>
    <name>dfs.permissions.supergroup</name>
    <value>hdpgrp</value>
  </property>

  <property>
    <name>dfs.block.access.token.enable</name>
    <value>true</value>
  </property>

  <property>
    <name>dfs.secondary.http.address</name>
    <value>${local.secondnamenode}:50090</value>
    <description>Address of secondary namenode web server</description>
  </property>

  <property>
    <name>dfs.secondary.https.port</name>
    <value>50490</value>
    <description>The https port where secondary-namenode binds</description>
  </property>

  <property>
    <name>dfs.https.port</name>
    <value>50470</value>
    <description>The https port where namenode binds</description>
  </property>

  <property>
    <name>dfs.https.address</name>
    <value>0.0.0.0:50470</value>
    <description>The https address where namenode binds</description>
  </property>

  <property>
    <name>dfs.datanode.data.dir.perm</name>
    <value>700</value>
    <description>
        The permissions that should be there on dfs.data.dir
        directories. The datanode will not come up if the permissions are
        different on existing dfs.data.dir directories. If the directories
        don't exist, they will be created with this permission.</description>
  </property>

  <property>
    <name>dfs.access.time.precision</name>
    <value>0</value>
    <description>
          The access time for HDFS file is precise upto this value.
          The default value is 1 hour. Setting a value of 0 disables
          access times for HDFS.
    </description>
  </property>

  <property>
    <name>dfs.cluster.administrators</name>
    <value> hdfs</value>
    <description>ACL for who all can view the default servlets in the HDFS</description>
  </property>

</configuration>

Comments