kafka是一个消息中间件,用于各个系统之间传递消息,并且消息可持久化!
可以认为是队列模型,也可以看作是生产者消费着模型;
简单的生产者消费者客户端代码如下:
package com.pt.util.kafka; import java.util.Date;
import java.util.Properties; import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig; public class MyProducer {
public static void sendMsg(String msg) {
Properties props = new Properties();
//brokers list
props.put("metadata.broker.list", "192.168.91.231:9092,192.168.91.231:9093");
/* *
* the serializer when preparing the message for transmission to the Broker
* Note that the encoder must accept the same type
* as defined in the KeyedMessage object in the next step.
*
*/
props.put("serializer.class", "kafka.serializer.StringEncoder");
/* *
* defines what class to use to determine
* which Partition in the Topic the message is to be sent to
*/
props.put("partitioner.class", "example.producer.SimplePartitioner");
/* *
* tells Kafka that you want your Producer to require an
* acknowledgement from the Broker that the message was received
*/
props.put("request.required.acks", "1"); ProducerConfig config = new ProducerConfig(props);
/*
* Note that the Producer is a Java Generic and you need to tell it the type of two parameters.
* The first is the type of the Partition key, the second the type of the message.
*/
Producer<String, String> producer = new Producer<String, String>(config); long runtime = new Date().getTime();
String ip = "192.168.91.231";
/*
* The “panteng” is the Topic to write to.
* Here we are passing the IP as the partition key.
* Note that if you do not include a key,
* even if you've defined a partitioner class, Kafka will assign the message to a random partition.
*/
KeyedMessage<String, String> data = new KeyedMessage<String, String>(
"panteng", ip, msg);
producer.send(data);
producer.close();
}
}
Producer,java
package cn.outofmemory.kafka; import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties; import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties; public class KafkaConsumer { private final ConsumerConnector consumer; public KafkaConsumer() {
Properties props = new Properties();
//zookeeper 配置
props.put("zookeeper.connect", "192.168.91.231:2181");
//group 代表一个消费组
props.put("group.id", "jd-group"); //zk连接超时
props.put("zookeeper.session.timeout.ms", "4000");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.offset.reset", "smallest");
//序列化类
props.put("serializer.class", "kafka.serializer.StringEncoder");
ConsumerConfig config = new ConsumerConfig(props);
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);
} public void consume() {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put("panteng", new Integer(1)); StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties()); Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get("panteng").get(0);
ConsumerIterator<String, String> it = stream.iterator();
while (it.hasNext())
System.out.println(it.next().message());
} public void stop(){
try {
consumer.shutdown();
} catch (Exception e) {
// TODO: handle exception
e.printStackTrace();
} }
public static void main(String[] args) {
new KafkaConsumer().consume();
}
}
Consumer.java