eclipse - 在HDFS上创建目录

时间:2024-03-07 12:25:35

1.在eclipse中配置好maven

eclipse-maven

2.创建maven项目

eclipse-project

3.在pom.xml中导入相应依赖

  1 <dependencies>
  2 	<dependency>
  3 		<groupId>junit</groupId>
  4 		<artifactId>junit</artifactId>
  5 		<version>RELEASE</version>
  6 	</dependency>
  7 	<dependency>
  8 		<groupId>org.apache.logging.log4j</groupId>
  9 		<artifactId>log4j-core</artifactId>
 10 		<version>2.8.2</version>
 11 	</dependency>
 12 	<dependency>
 13 		<groupId>org.apache.hadoop</groupId>
 14 		<artifactId>hadoop-common</artifactId>
 15 		<version>2.7.2</version>
 16 	</dependency>
 17 	<dependency>
 18 		<groupId>org.apache.hadoop</groupId>
 19 		<artifactId>hadoop-client</artifactId>
 20 		<version>2.7.2</version>
 21 	</dependency>
 22 	<dependency>
 23 		<groupId>org.apache.hadoop</groupId>
 24 		<artifactId>hadoop-hdfs</artifactId>
 25 		<version>2.7.2</version>
 26 	</dependency>
 27 	<dependency>
 28 		<groupId>jdk.tools</groupId>
 29 		<artifactId>jdk.tools</artifactId>
 30 		<version>1.8</version>
 31 		<scope>system</scope>
 32 		<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
 33 	</dependency>
 34 </dependencies>

4.在项目中的src/main/resources目录下新建一个文件  —  log4j.properties

  1 log4j.rootLogger=INFO, stdout
  2 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
  3 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
  4 log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
  5 log4j.appender.logfile=org.apache.log4j.FileAppender
  6 log4j.appender.logfile.File=target/spring.log
  7 log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
  8 log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n

5.在项目中的src/main/目录下创建java文件夹,创建com.hadoop.demo包->HDFSClientDemo类

  1 public class HDFSClient {
  2 
  3 	public static void main(String[] args) throws IOException {
  4 		//操作集群
  5 		//获取文件系统
  6 		Configuration conf = new Configuration();
  7 
  8 		//设置配置集群信息 键值对
  9 		//因本机为配置hosts,所以"hdfs://hadoop161:9000"无法识别,要改成IP地址
 10 		conf.set("fs.defaultFS","hdfs://192.168.12.161:9000");
 11 
 12 		//拦截本机user用户改成集群上的hadoop用户
 13 		//Run As>Run Configurations>Java Application下的HDFSClient>(x)=Arguments下的VM arguments 输入:-DHADOOP_USER_NAME=hadoop
 14 
 15 		//获取HDFS客户端对象
 16 		FileSystem fs = FileSystem.get(conf);
 17 		//在hdfs上创建路径
 18 		fs.mkdirs(new Path("/0200/abc"));
 19 		//关闭资源(释放资源)
 20 		fs.close();
 21 		//验证程序是否结束
 22 		System.out.println("over");
 23 
 24 	}
 25 
 26 }

完成。

相关文章