使用Spark进行搜狗日志分析实例——map join的使用

时间:2021-10-07 12:17:47

map join相对reduce join来说,可以减少在shuff阶段的网络传输,从而提高效率,所以大表与小表关联时,尽量将小表数据先用广播变量导入内存,后面各个executor都可以直接使用

package sogolog

import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext} class RddFile {
def readFileToRdd(path: String): RDD[String] = {
val conf = new SparkConf().setMaster("local").setAppName("sougoDemo")
val sc = new SparkContext(conf);
//使用这种方法能够避免中文乱码
readFileToRdd(path,sc)
} def readFileToRdd(path: String,sc :SparkContext): RDD[String] = {
//使用这种方法能够避免中文乱码
sc.hadoopFile(path,classOf[TextInputFormat], classOf[LongWritable], classOf[Text]).map{
pair => new String(pair._2.getBytes, 0, pair._2.getLength, "GBK")}
}
}
package sogolog

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD import scala.collection.mutable.ArrayBuffer object MapSideJoin {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setMaster("local").setAppName("sougoDemo")
val sc = new SparkContext(conf);
val userRdd = new RddFile().readFileToRdd("J:\\scala\\workspace\\first-spark-demo\\sougofile\\user",sc) //解析用户信息
val userMapRDD:RDD[(String,String)] = userRdd.map(line=>(line.split("\t")(0),line.split("\t")(1))) //将用户信息设置为广播变量,方便各个任务引用
val userMapBroadCast =sc.broadcast(userMapRDD.collectAsMap()) val searchLogRdd = new RddFile().readFileToRdd("J:\\scala\\workspace\\first-spark-demo\\sougofile\\SogouQ.reduced",sc) val joinResult = searchLogRdd.mapPartitionsWithIndex((index,f)=>{
val userMap = userMapBroadCast.value
var result = ArrayBuffer[String]() var count = 0 //搜索日志表join用户表
//原来日志列为:时间 用户ID 关键词 排名 URL
//新的日志列为:时间 用户ID 用户名 关键词 排名 URL
f.foreach( log=>{
count=count+1;
val lineArrs = log.split("\t")
val uid = lineArrs(1)
val newLine:StringBuilder = new StringBuilder()
if(userMap.contains(uid)){
newLine.append(lineArrs(0)).append("\t")
newLine.append(lineArrs(1)).append("\t")
newLine.append(userMap.get(uid).get).append("\t") //从广播变量中根据用户ID获取用户名
for (i<- 2 to lineArrs.length-1){
newLine.append(lineArrs(i)).append("\t")
} result .+= (newLine.toString())
}
})
println("partition_"+index+"处理的行数为:"+count)
result.iterator }) //打印结果
joinResult.collect().foreach(println)
}
}

结果展示:

使用Spark进行搜狗日志分析实例——map join的使用