spark 概念及linux 本地模式部署请点这里
一:配置文件
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="/POM/4.0.0"
xmlns:xsi="http:///2001/XMLSchema-instance"
xsi:schemaLocation="/POM/4.0.0 /xsd/maven-4.0.">
<modelVersion>4.0.0</modelVersion>
<groupId></groupId>
<artifactId>zymTest</artifactId>
<version>1.0-SNAPSHOT</version>
<dependencies>
<!-- /artifact//spark-sql -->
<dependency>
<groupId></groupId>
<artifactId>spark-sql_2.13</artifactId>
<version>3.2.0</version>
</dependency>
<!-- /artifact/mysql/mysql-connector-java -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>6.0.6</version>
</dependency>
</dependencies>
<properties>
<>8</>
<>8</>
</properties>
</project>
二:测试类
import ;
import ;
import ;
import ;
import ;
import ;
import ;
import ;
import ;
import ;
import scala.Tuple2;
import ;
import ;
public class SparkTest {
public static void main(String[] args) {
//testSparkRddTxt();
//testSparkRddCsv();
//testSparkRddMysql();
testSparkRddJson();
}
//spark 测试外部文件(txt)
//rdd:resilient distributed dataset ,弹性分布式数据集
public static void testSparkRddTxt(){
//1.环境准备
SparkConf sparkConf = new SparkConf();
("","localhost");
//("SPARK_LOCAL_HOSTNAME","localhost");
("JavaSparkDemo").setMaster("local[*]");
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
("WARN");
//2.处理数据
JavaRDD<String> fileRDD = ("D:\\TEMP\\");
JavaRDD<String> wordsRDD = (line -> ((" ")).iterator());
JavaPairRDD<String, Integer> wordAndOneRDD = (word -> new Tuple2<>(word, 1));
JavaPairRDD<String, Integer> wordAndCountRDD = ((a, b) -> a + b);
//3.输出结果
List<Tuple2<String, Integer>> result = ();
(::println);
//4.关闭资源
();
}
//spark 测试外部文件(csv)
public static void testSparkRddCsv(){
//1.环境准备
SparkConf sparkConf = new SparkConf();
("","localhost");
("JavaSparkDemo").setMaster("local[*]");
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
("WARN");
//2.读取外部文件创建rdd,以字符串读取
JavaRDD<String> fileRDD = ("D:\\TEMP\\");
//3.把文件内容使用,分割
JavaRDD<String> wordsRDD = (line -> ((",")).iterator());
JavaPairRDD<String, Integer> wordAndOneRDD = (word -> new Tuple2<>(word, 1));
JavaPairRDD<String, Integer> wordAndCountRDD = ((a, b) -> a + b);
(());
();
}
//spark 操作mysql数据库
//添加依赖:mysql-connector-java
public static void testSparkRddMysql(){
SparkSession spark = SparkSession
.builder()
.appName("SparkSQLTest3")
.config("", "localhost")
.config("", "some-value")
.master("local[*]")
.getOrCreate();
//DataSet 是具有强类型的数据集合
Dataset<Row> jdbcDF = ()
.format("jdbc")
.option("url", "jdbc:mysql://10.0.173.220:3307/dtbk_rzt?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai")
.option("dbtable", "(SELECT * FROM location_authorize) tmp")
.option("user", "dtbk_dev_2")
.option("password", "1qaz@WSX")
.option("driver","")
.load();
();
();
//转化为RDD
JavaRDD<Row> rowJavaRDD = ();
(());
();
}
//spark 测试json
public static void testSparkRddJson(){
//1.环境准备
SparkSession spark = SparkSession
.builder()
.appName("SparkSQLTest3")
.config("", "localhost")
.config("", "some-value")
.master("local[*]")
.getOrCreate();
Dataset<Row> df = ().json("D:\\TEMP\\");
();
();
("t_person");
("select age,name from t_person where age > 3").show();
();
}
}
三:总结
测试类中一共有写了四个测试方法,包含分析txt文件,csv文件,json数据处理,直连mysql数据库,方法都经过测试,可以正常打印结果,特别是jdbc 直连mysql可以直接写sql语句,很方便