Commit 234a6674 authored by David Johnson's avatar David Johnson

A bunch of Hadoop config changes.

Let the master node just be the resourcemanager (thus in the current
test setup, the resourcemanager node does nothing).  Don't make the
master a slave (datanode) node -- this seems to force HDFS to spread
input files around to all datanodes instead of keeping it locally on the
master.  Also don't tell slaves about the slaves list file.
parent b4ddf801
Pipeline #1914 passed with stage
in 2 seconds
......@@ -7,5 +7,9 @@
<name>dfs.datanode.data.dir</name>
<value>/mnt/datanode</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
......@@ -5,7 +5,7 @@
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>0.0.0.0:19888</value>
<value>master:9001</value>
</property>
<property>
<name>mapred.child.java.opts</name>
......
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>resourcemanager</value>
<value>master</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
......
......@@ -21,7 +21,8 @@ tar -xzvf $GCTARBALL -C $HADOOPDIR/etc/
# Copy the slaves file into place
#
cp $HOMEDIR/slaves.namefile $HADOOPDIR/etc/hadoop/slaves
echo master >> $HADOOPDIR/etc/hadoop/slaves
# Don't make the master a slave (datanode) node.
#echo master >> $HADOOPDIR/etc/hadoop/slaves
# Note down the master
echo master > $HADOOPDIR/etc/hadoop/master
......
......@@ -20,7 +20,7 @@ tar -xzvf $GCTARBALL -C $HADOOPDIR/etc/
#
# Copy the slaves file into place
#
cp slaves.namefile $HADOOPDIR/etc/hadoop/slaves
#cp slaves.namefile $HADOOPDIR/etc/hadoop/slaves
mkdir -p $HADOOPDIR/logs
chmod 777 $HADOOPDIR/logs
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment