HDFS命令所在目錄:
/home/hadoop/hadoop-2.7.2/bin
執行命令時需要帶./代表當前目錄:
./hdfs dfs - ls /
查看環境變量$PATH
echo $PATH
HDFS Shell接口
顯示指定目錄下文件的詳細信息
~~~
hdfs dfs -ls /
hdfs dfs -ls /music
~~~
創建目錄
~~~
hdfs dfs -mkdir /music
~~~
創建多級目錄
~~~
hdfs dfs -mkdir -p /music/sub
~~~
上傳文件
~~~
hdfs dfs -put /home/hadoop/aa.jpg /music
hdfs dfs -put /home/hadoop/aa.jpg hdfs://master:9000/music
~~~
下載文件
~~~
hdfs dfs -copyToLocal /music/aa.jpg /home/hadoop/copy.jpg
~~~
復制文件
~~~
hdfs dfs -cp /music/aa.jpg /music/bb.jpg
~~~
移動和重命名文件
~~~
hdfs dfs -mv /home/hadoop/aa.jpg /music/bb.jpg
hdfs dfs -mv /home/hadoop/aa.jpg /music/sub/bb.jpg
~~~
刪除文件
~~~
hdfs dfs -rm /music/aa.jpg
hdfs dfs -rm -r /music/*
~~~
查看文件內容
~~~
hdfs dfs -cat /music/test.txt
~~~
追加內容
~~~
hdfs dfs -appendToFile README.txt /music/test.txt
~~~
HDFS編程
1. 把master上配置好的集群hadoop-2.7.2導出到``window``中。
在sftp命令下:
~~~
lcd e:/tmp
get /home/hadoop/hadoop-2.7.2
~~~
2. 把hadoop-eclipse-plugin-2.7.2.jar放到eclipse的plugins目錄下。
在eclipse下配置插件,windows->
show view->other


3. 在eclipse中選擇Windows→Preference按鈕,彈出一個對話框,在該對話框左側會多出一個Hadoop Map/Reduce選項,然后單擊此選項,在右側設置Hadoop的安裝目錄。
4. 創建MapReduce工程
選擇File→Other命令,找到Map/Reduce Project,然后選擇它,如下所示:

5. 創建測試類(上傳文件), Run As Java Application
~~~
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class TestHDFS {
public static void main(String[] args) throws IOException, InterruptedException, URISyntaxException {
Configuration conf = new Configuration();
/*conf.set("fs.defaultFS", "hdfs://192.168.231.4:9000");
FileSystem hdfs = FileSystem.get(conf);*/
FileSystem hdfs = FileSystem.get(new URI("hdfs://192.168.231.4:9000"),conf,"hadoop");
Path src = new Path("e:/xx.txt");
Path dst = new Path("/music/xx.txt");
hdfs.copyFromLocalFile(src, dst);
System.out.println("Upload to :"+conf.get("fs.default.name"));
FileStatus files[] = hdfs.listStatus(dst);
for(FileStatus file:files){
System.out.println(file.getPath());
}
}
~~~
6. 創建測試類(創建文件)
~~~
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class TestHDFS2 {
public static void main(String[] args) throws IOException, InterruptedException, URISyntaxException {
Configuration conf = new Configuration();
//conf.set("fs.defaultFS", "hdfs://192.168.1.109:9000");
//FileSystem hdfs = FileSystem.get(conf);
FileSystem hdfs = FileSystem.get(new URI("hdfs://192.168.231.4:9000"),conf,"hadoop");
byte[] buff = "hello word".getBytes();
Path dst = new Path("/music/hello.txt");
FSDataOutputStream outputStream = hdfs.create(dst);
outputStream.write(buff,0,buff.length);
outputStream.close();
boolean isExists = hdfs.exists(dst);
System.out.println(isExists);
}
}
~~~
7. 創建測試類(讀取文件)
~~~
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class TestHDFS3 {
public static void main(String[] args) throws IOException, InterruptedException, URISyntaxException {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(new URI("hdfs://192.168.231.4:9000"),conf,"hadoop");
FSDataInputStream is = hdfs.open(new Path("/music/xx.txt"));
int i = is.read();
while(i != -1)
{
System.out.print((char)i);
i = is.read();
}
is.close();
}
}
~~~
8. 創建測試類(查看/music目錄下所有文件)
~~~
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class TestHDFS4 {
public static void main(String[] args) throws IOException, InterruptedException {
Configuration conf = new Configuration();
String uri = "hdfs://192.168.231.4:9000";
FileSystem hdfs = FileSystem.get(URI.create(uri),conf,"hadoop");
FileStatus[] status = hdfs.listStatus(new Path("/music"));
for(FileStatus sta : status){
System.out.println(sta);
}
}
}
~~~
9. 創建測試類(刪除文件及目錄)
~~~
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class TestHDFS5 {
public static void main(String[] args) throws IOException, InterruptedException {
Configuration conf = new Configuration();
String uri = "hdfs://192.168.231.4:9000";
FileSystem hdfs = FileSystem.get(URI.create(uri),conf,"hadoop");
//Path dst = new Path("/music/aa.jpg");
Path dst = new Path("/music");
boolean isdeleted = hdfs.delete(dst, true);
System.out.println(isdeleted);
}
}
~~~
- 空白目錄
- 第一章 Linux虛擬機安裝
- 第二章 SSH配置
- 第三章 jdk配置
- 第四章 Hadoop配置-單機
- 第五章 Hadoop配置-集群
- 第六章 HDFS
- 第七章 MapReduce
- 7.1 MapReduce(上)
- 7.2 MapReduce(下)
- 7.3 MapReduce實驗1 去重
- 7.4 MapReduce實驗2 單例排序
- 7.5 MapReduce實驗3 TopK
- 7.6 MapReduce實驗4 倒排索引
- 第八章 Hive
- Hive安裝
- 數據定義
- 數據操作
- 第九章 HBase
- 第十章 SaCa RealRec數據科學平臺
- 第十一章 Spark Core
- 第十二章 Spark Streaming
- 第十章 Spark測試題