Vous êtes sur la page 1sur 3

--------------sudo nano /etc/network/interfaces

sudo nano /etc/resolv.conf


---------------

1.download file hadoop


2.copikan hadoop ke file ->home/pi
single node
1.instal hadoop
buat directory
sudo mkdir /opt
masuk directory
cd /opt
file yang didownload di ekstrak
sudo tar . xvzf /home/pi/hadoop.1.2.1.tar.gz -C /opt/
rename
sudo mv hadoop-1.2.1 hadoop
ubah hak akses
sudo chown -R hduser:hadoop hadoop
(buat group)
-buat group
sudo addgroup hadoop ->
sudo adduser --ingroup hadoop hduser->
password :
sudo adduser hduser sudo->
sudo chown -R hduser:hadoop hadoop->
masuk ke user
su hduser->password
cd ~/
mkdir ~/.ssh->
ssh-keygen -t rsa -P ""->
cat ~/.ssh/id_rsa.pub > ~/.ssh/authorized_keys ->
ssh localhost ->
sudo nano /etc/bash.bashrc->
copy
export JAVA_HOME=$(readlink -f /usr/bin/java | sed "s:bin/java::
")
export HADOOP_INSTALL=/opt/hadoop
export PATH=$PATH:$HADOOP_INSTALL/bin
CTRL+O
update-alternatives --config java
/usr/lib/jvm/java-7-openjdk-amd64/
(sebelum jre)
ketik sudo nano ~/.bashrc ->
isi paling bawa
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 (ganti lokasi
java)
export
export
export
export
export

HADOOP_INSTALL=/usr/local/hadoop (opt/hadoop)
PATH=$PATH:$HADOOP_INSTALL/bin
PATH=$PATH:$HADOOP_INSTALL/sbin
HADOOP_MAPRED_HOME=$HADOOP_INSTALL
HADOOP_COMMON_HOME=$HADOOP_INSTALL

export
export
export
export

HADOOP_HDFS_HOME=$HADOOP_INSTALL
YARN_HOME=$HADOOP_INSTALL
HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native
HADOOP_OPTS="-Djava.library.path=$HADOOP_INSTALL/lib"
ctrl+o
sudo nano /opt/hadoop/conf/hadoop-env.sh ->
(hilangkan pagar pada JAVA-HOME)
= JAVA-HOME = /usr/lib/jvm/java-7-openjdk-amd64/
ctrl+o
ctrl+x
buat directory
-----------------sudo mkdir -p /app/hadoop/tmp ->
sudo chown hduser:hadoop /app/hadoop/tmp
sudo nano /opt/hadoop/conf/core-site.xml
->
copy dlm configuration

<property>
<name>hadoop.tmp.dir</name>
<value>/app/hadoop/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:54310</value>
<description>The name of the default file system. A URI whose
scheme and authority determine the FileSystem implementation. The
uri's scheme determines the config property (fs.SCHEME.impl) naming
the FileSystem implementation class. The uri's authority is used to
determine the host, port, etc. for a filesystem.</description>
</property>
ctrl+o
sudo nano /opt/hadoop/conf/mapred-site.xml
dlm configuration
<property>
<name>mapred.job.tracker</name>
<value>localhost:54311</value>
<description>The host and port that the MapReduce job tracker runs
at. If "local", then jobs are run in-process as a single map
and reduce task.
</description>
</property>
ctrl+o
buat
sudo
sudo
sudo

directory
mkdir -p /opt/hadoop_store/hdfs/namenode->
mkdir -p /opt/hadoop_store/hdfs/datanode->
chown -R hduser:hadoop /opt/hadoop_store

sudo nano /opt/hadoop/conf/hdfs-site.xml ->


dlm configur

<property>
<name>dfs.replication</name>
<value>1</value>
<description>Default block replication.
The actual number of replications can be specified when the file is created.
The default is used if replication is not specified in create time.
</description>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/opt/hadoop_store/hdfs/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/opt/hadoop_store/hdfs/datanode</value>
</property>
ctrl+o
hadoop namenode -format ->
------------not found
jalan ssh
ssh localhost
starting hadoop
---------------------cd /opt/hadoop/sbin ->
start-all.sh ->
ketik jps (melihat yg jln 6 node)
hadoop interface
localhost50070

(raspberry Pi hadoop cluster)


(bogotobogo hadoop)

Vous aimerez peut-être aussi