kafka的数据发送和接收java_API

时间:2022-10-22 23:34:46

往消息队列里面发送数据

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

import java.io.IOException;
import java.util.Properties;

import scala.math.Numeric.IntIsIntegral;

/**
*kafka 消息发送
*
*/

public class SendDatatToKafka {
public static void main(String[] args) {
SendDatatToKafka s = new SendDatatToKafka();
try {
s.send("test", "jack", "rose");
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}

public void send(String topic, String key, String data) throws IOException {
Properties props = new Properties();
props.put("metadata.broker.list", "slave1:9092");
props.put("serializer.class", "kafka.serializer.StringEncoder");
// key.serializer.class默认为serializer.class
props.put("key.serializer.class", "kafka.serializer.StringEncoder");
props.put("request.required.acks", "1");
ProducerConfig config = new ProducerConfig(props);

Producer<String, String> producer = new Producer<String, String>(config);
for (int i = 0; i < 1000; i++) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
producer.send(new KeyedMessage<String, String>(topic, key, data + i));

}

producer.close();
}
}

消息队列接收消息


/**
*接收消息队列
*/

import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

public class GetDataFromKafka implements Runnable {

private String topic;
private String path;
public GetDataFromKafka(String topic,String path) {
this.path=path;
this.topic = topic;
}
public static void main(String[] args) {
GetDataFromKafka gdkast=new GetDataFromKafka("test", "d:\\clusterMonitor.rrd");
new Thread(gdkast).start();
}
@Override
public void run() {
System.out.println("start runing consumer");

Properties properties = new Properties();
properties.put("zookeeper.connect",
"192.168.10.219:2181");// 声明zk
properties.put("group.id", "CMMtest");// 必须要使用别的组名称, // 如果生产者和消费者都在同一组,则不能访问同一组内的topic数据
properties.put("auto.offset.reset", "largest");
ConsumerConnector consumer = Consumer
.createJavaConsumerConnector(new ConsumerConfig(properties));
// TODO Auto-generated method stub

Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, 1); // 一次从主题中获取一个数据
Map<String, List<KafkaStream<byte[], byte[]>>> messageStreams = consumer
.createMessageStreams(topicCountMap);
KafkaStream<byte[], byte[]> stream = messageStreams.get(topic).get(0);// 获取每次接收到的这个数据
ConsumerIterator<byte[], byte[]> iterator = stream.iterator();
while (iterator.hasNext()) {
String message = new String(iterator.next().message());
//hostName+";"+ip+";"+commandName+";"+res+";"+System.currentTimeMillis();
//这里指的注意,如果没有下面这个语句的执行很有可能回从头来读消息的
consumer.commitOffsets();
System.out.println(message);
}
}
}