spark streaming中的窗口函数虽然不如flink那么丰富,但是特别有用,看下面例子:
kafkaStream.transform { rdd => offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges rdd}.map(_._2).map((_, 1)).reduceByKeyAndWindow((v1: Int, v2: Int) => { v1 + v2 }, Seconds(8), Seconds(4))
表示每隔4秒(后面的4秒),计算最近8秒(前面的8秒)的数据。
第一个时间称为窗口长度,第二个时间称为滑动长度,其含义表示每隔4秒计算最近最近8秒的数据,这可以用于一些业务场景,例如网站记录,每隔1个小时计算最近两个小时的pv量,还有一种业务场景的话先在内存中做累加再更新到redis中做累加,比如说每隔5秒统计最近5秒的数据的总和,再刷到redis中做累加,因为频繁操作redis的话会存在问题,还有一个时间如下:
val ssc = new StreamingContext(sparkConf, Seconds(4))此处设置的batch Interval是在spark streaming中生成基本Job的时间单位,窗口和滑动时间间隔一定是该batch Interval的整数倍,若要在内存中做简单的累加只要设置窗口长度和滑动长度相同即可。
持久化:
因为要窗口函数要用前面所用到的rdd,在这里必须checkpoint,
看下面一个例子:
package com.jingde.sparkstreamlast import kafka.serializer.StringDecoder import org.apache.log4j.{ Level, Logger } import org.apache.spark.SparkConf import org.apache.spark.rdd.RDD import org.apache.spark.streaming.kafka._ import org.apache.spark.streaming.{ Seconds, StreamingContext } import org.apache.spark.streaming.kafka.KafkaUtils import org.apache.spark.streaming.kafka.OffsetRange import org.apache.log4j.{ Level, Logger } import org.I0Itec.zkclient.ZkClient import org.I0Itec.zkclient.exception.ZkMarshallingError import org.I0Itec.zkclient.serialize.ZkSerializer import kafka.utils.ZkUtils import kafka.utils.ZKGroupTopicDirs import org.apache.spark.streaming.dstream.InputDStream import kafka.common.TopicAndPartition import kafka.message.MessageAndMetadata import kafka.api.OffsetRequest import kafka.api.PartitionOffsetRequestInfo import kafka.consumer.SimpleConsumer import kafka.api.TopicMetadataRequest object StreamingFromKafka { val groupId = "logs" val topic = "streaming" val zkClient = new ZkClient("localhost:9999", 60000, 60000, new ZkSerializer { override def serialize(data: Object): Array[Byte] = { try { return data.toString().getBytes("UTF-8") } catch { case e: ZkMarshallingError => return null } } override def deserialize(bytes: Array[Byte]): Object = { try { return new String(bytes, "UTF-8") } catch { case e: ZkMarshallingError => return null } } }) val topicDirs = new ZKGroupTopicDirs("spark_streaming_test", topic) val zkTopicPath = s"${topicDirs.consumerOffsetDir}" def main(args: Array[String]): Unit = { Logger.getLogger("org.apache.spark").setLevel(Level.WARN) Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF) val sparkConf = new SparkConf().setAppName("DirectKafkaWordCount") sparkConf.setMaster("local[*]") sparkConf.set("spark.streaming.kafka.maxRatePerPartition", "2") sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") val ssc = new StreamingContext(sparkConf, Seconds(5)) val kafkaParams = Map("metadata.broker.list" -> "localhost:9092", "group.id" -> groupId, "zookeeper.connect" -> "localhost:9999", "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString) val topics = Set(topic) val children = zkClient.countChildren(s"${topicDirs.consumerOffsetDir}") var kafkaStream: InputDStream[(String, String)] = null var fromOffsets: Map[TopicAndPartition, Long] = Map() ssc.checkpoint("D:\\tmp\\storm-hdfs") //这里是hdfs路径,因为要做窗口函数,需要用到前面的rdd,这里必须要用checkpoint if (children > 0) { //---get partition leader begin---- val topicList = List(topic) val req = new TopicMetadataRequest(topicList, 0) //得到该topic的一些信息,比如broker,partition分布情况 val getLeaderConsumer = new SimpleConsumer("localhost", 9092, 10000, 10000, "OffsetLookup") // brokerList的host 、brokerList的port、过期时间、过期时间 val res = getLeaderConsumer.send(req) //TopicMetadataRequest topic broker partition 的一些信息 val topicMetaOption = res.topicsMetadata.headOption val partitions = topicMetaOption match { case Some(tm) => tm.partitionsMetadata.map(pm => (pm.partitionId, pm.leader.get.host)).toMap[Int, String] case None => Map[Int, String]() } for (i <- 0 until children) { val partitionOffset = zkClient.readData[String](s"${topicDirs.consumerOffsetDir}/${i}") val tp = TopicAndPartition(topic, i) //---additional begin----- val requestMin = OffsetRequest(Map(tp -> PartitionOffsetRequestInfo(OffsetRequest.EarliestTime, 1))) // -2,1 val consumerMin = new SimpleConsumer(partitions(i), 9092, 10000, 10000, "getMinOffset") val curOffsets = consumerMin.getOffsetsBefore(requestMin).partitionErrorAndOffsets(tp).offsets var nextOffset = partitionOffset.toLong if (curOffsets.length > 0 && nextOffset < curOffsets.head) { //如果下一个offset小于当前的offset nextOffset = curOffsets.head } //---additional end----- fromOffsets += (tp -> nextOffset) fromOffsets += (tp -> partitionOffset.toLong) //将不同 partition 对应的 offset 增加到 fromOffsets 中 } val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.topic, mmd.message()) //这个会将 kafka 的消息进行 transform,最终 kafak 的数据都会变成 (topic_name, message) 这样的 tuple kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, fromOffsets, messageHandler) } else { kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics) } var offsetRanges = Array[OffsetRange]() kafkaStream.transform { rdd => offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges rdd }.map(_._2).map((_, 1)).reduceByKeyAndWindow((v1: Int, v2: Int) => { v1 + v2 }, Seconds(5), //每隔5秒(后面的5秒),计算最近5秒(前面的5秒)的数据 Seconds(5)).foreachRDD { rdd => rdd.foreachPartition { element => element.foreach { println } } for (o <- offsetRanges) { ZkUtils.updatePersistentPath(zkClient, s"${topicDirs.consumerOffsetDir}/${o.partition}", o.fromOffset.toString) } } ssc.start() ssc.awaitTermination() } }