在上一篇中,我们实现了按 cookieId 和 time 进行二次排序,现在又有新问题:假如我需要按 cookieId 和 cookieId&time 的组合进行分析呢?此时最好的办法是自定义 InputFormat,让 mapreduce 一次读取一个 cookieId 下的所有记录,然后再按 time 进行切分 session,逻辑伪码如下:
for OneSplit in MyInputFormat.getSplit() // OneSplit 是某个 cookieId 下的所有记录
for session in OneSplit // session 是按 time 把 OneSplit 进行了二次分割
for line in session // line 是 session 中的每条记录,对应原始日志的某条记录
1、原理:
public interface InputFormat<K, V> {
InputSplit[] getSplits(JobConf job, int numSplits) throws IOException;
RecordReader<K, V> createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException;
}
K createKey();
V createValue();
long getPos() throws IOException;
public void close() throws IOException;
float getProgress() throws IOException;
}
2、代码:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
|
package
MyInputFormat;
import
org.apache.hadoop.fs.Path;
import
org.apache.hadoop.io.LongWritable;
import
org.apache.hadoop.io.Text;
import
org.apache.hadoop.io.compress.CompressionCodec;
import
org.apache.hadoop.io.compress.CompressionCodecFactory;
import
org.apache.hadoop.mapreduce.InputSplit;
import
org.apache.hadoop.mapreduce.JobContext;
import
org.apache.hadoop.mapreduce.RecordReader;
import
org.apache.hadoop.mapreduce.TaskAttemptContext;
import
org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
public
class
TrackInputFormat
extends
FileInputFormat<LongWritable, Text> {
@SuppressWarnings
(
"deprecation"
)
@Override
public
RecordReader<LongWritable, Text> createRecordReader(
InputSplit split, TaskAttemptContext context) {
return
new
TrackRecordReader();
}
@Override
protected
boolean
isSplitable(JobContext context, Path file) {
CompressionCodec codec =
new
CompressionCodecFactory(
context.getConfiguration()).getCodec(file);
return
codec ==
null
;
}
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
|
package
MyInputFormat;
import
java.io.IOException;
import
java.io.InputStream;
import
org.apache.commons.logging.Log;
import
org.apache.commons.logging.LogFactory;
import
org.apache.hadoop.conf.Configuration;
import
org.apache.hadoop.fs.FSDataInputStream;
import
org.apache.hadoop.fs.FileSystem;
import
org.apache.hadoop.fs.Path;
import
org.apache.hadoop.io.LongWritable;
import
org.apache.hadoop.io.Text;
import
org.apache.hadoop.io.compress.CompressionCodec;
import
org.apache.hadoop.io.compress.CompressionCodecFactory;
import
org.apache.hadoop.mapreduce.InputSplit;
import
org.apache.hadoop.mapreduce.RecordReader;
import
org.apache.hadoop.mapreduce.TaskAttemptContext;
import
org.apache.hadoop.mapreduce.lib.input.FileSplit;
/**
* Treats keys as offset in file and value as line.
*
* @deprecated Use
* {@link org.apache.hadoop.mapreduce.lib.input.LineRecordReader}
* instead.
*/
public
class
TrackRecordReader
extends
RecordReader<LongWritable, Text> {
private
static
final
Log LOG = LogFactory.getLog(TrackRecordReader.
class
);
private
CompressionCodecFactory compressionCodecs =
null
;
private
long
start;
private
long
pos;
private
long
end;
private
NewLineReader in;
private
int
maxLineLength;
private
LongWritable key =
null
;
private
Text value =
null
;
// ----------------------
// 行分隔符,即一条记录的分隔符
private
byte
[] separator =
"END\n"
.getBytes();
// --------------------
public
void
initialize(InputSplit genericSplit, TaskAttemptContext context)
throws
IOException {
FileSplit split = (FileSplit) genericSplit;
Configuration job = context.getConfiguration();
this
.maxLineLength = job.getInt(
"mapred.linerecordreader.maxlength"
,
Integer.MAX_VALUE);
start = split.getStart();
end = start + split.getLength();
final
Path file = split.getPath();
compressionCodecs =
new
CompressionCodecFactory(job);
final
CompressionCodec codec = compressionCodecs.getCodec(file);
FileSystem fs = file.getFileSystem(job);
FSDataInputStream fileIn = fs.open(split.getPath());
boolean
skipFirstLine =
false
;
if
(codec !=
null
) {
in =
new
NewLineReader(codec.createInputStream(fileIn), job);
end = Long.MAX_VALUE;
}
else
{
if
(start !=
0
) {
skipFirstLine =
true
;
this
.start -= separator.length;
//
// --start;
fileIn.seek(start);
}
in =
new
NewLineReader(fileIn, job);
}
if
(skipFirstLine) {
// skip first line and re-establish "start".
start += in.readLine(
new
Text(),
0
,
(
int
) Math.min((
long
) Integer.MAX_VALUE, end - start));
}
this
.pos = start;
}
public
boolean
nextKeyValue()
throws
IOException {
if
(key ==
null
) {
key =
new
LongWritable();
}
key.set(pos);
if
(value ==
null
) {
value =
new
Text();
}
int
newSize =
0
;
while
(pos < end) {
newSize = in.readLine(value, maxLineLength,
Math.max((
int
) Math.min(Integer.MAX_VALUE, end - pos),
maxLineLength));
if
(newSize ==
0
) {
break
;
}
pos += newSize;
if
(newSize < maxLineLength) {
break
;
}
LOG.info(
"Skipped line of size "
+ newSize +
" at pos "
+ (pos - newSize));
}
if
(newSize ==
0
) {
key =
null
;
value =
null
;
return
false
;
}
else
{
return
true
;
}
}
@Override
public
LongWritable getCurrentKey() {
return
key;
}
@Override
public
Text getCurrentValue() {
return
value;
}
/**
* Get the progress within the split
*/
public
float
getProgress() {
if
(start == end) {
return
0
.0f;
}
else
{
return
Math.min(
1
.0f, (pos - start) / (
float
) (end - start));
}
}
public
synchronized
void
close()
throws
IOException {
if
(in !=
null
) {
in.close();
}
}
public
class
NewLineReader {
private
static
final
int
DEFAULT_BUFFER_SIZE =
64
*
1024
;
private
int
bufferSize = DEFAULT_BUFFER_SIZE;
private
InputStream in;
private
byte
[] buffer;
private
int
bufferLength =
0
;
private
int
bufferPosn =
0
;
public
NewLineReader(InputStream in) {
this
(in, DEFAULT_BUFFER_SIZE);
}
public
NewLineReader(InputStream in,
int
bufferSize) {
this
.in = in;
this
.bufferSize = bufferSize;
this
.buffer =
new
byte
[
this
.bufferSize];
}
public
NewLineReader(InputStream in, Configuration conf)
throws
IOException {
this
(in, conf.getInt(
"io.file.buffer.size"
, DEFAULT_BUFFER_SIZE));
}
public
void
close()
throws
IOException {
in.close();
}
public
int
readLine(Text str,
int
maxLineLength,
int
maxBytesToConsume)
throws
IOException {
str.clear();
Text record =
new
Text();
int
txtLength =
0
;
long
bytesConsumed = 0L;
boolean
newline =
false
;
int
sepPosn =
0
;
do
{
// 已经读到buffer的末尾了,读下一个buffer
if
(
this
.bufferPosn >=
this
.bufferLength) {
bufferPosn =
0
;
bufferLength = in.read(buffer);
// 读到文件末尾了,则跳出,进行下一个文件的读取
if
(bufferLength <=
0
) {
break
;
}
}
int
startPosn =
this
.bufferPosn;
for
(; bufferPosn < bufferLength; bufferPosn++) {
// 处理上一个buffer的尾巴被切成了两半的分隔符(如果分隔符中重复字符过多在这里会有问题)
if
(sepPosn >
0
&& buffer[bufferPosn] != separator[sepPosn]) {
sepPosn =
0
;
}
// 遇到行分隔符的第一个字符
if
(buffer[bufferPosn] == separator[sepPosn]) {
bufferPosn++;
int
i =
0
;
// 判断接下来的字符是否也是行分隔符中的字符
for
(++sepPosn; sepPosn < separator.length; i++, sepPosn++) {
// buffer的最后刚好是分隔符,且分隔符被不幸地切成了两半
if
(bufferPosn + i >= bufferLength) {
bufferPosn += i -
1
;
break
;
}
// 一旦其中有一个字符不相同,就判定为不是分隔符
if
(
this
.buffer[
this
.bufferPosn + i] != separator[sepPosn]) {
sepPosn =
0
;
break
;
}
}
// 的确遇到了行分隔符
if
(sepPosn == separator.length) {
bufferPosn += i;
newline =
true
;
sepPosn =
0
;
break
;
}
}
}
int
readLength =
this
.bufferPosn - startPosn;
bytesConsumed += readLength;
// 行分隔符不放入块中
if
(readLength > maxLineLength - txtLength) {
readLength = maxLineLength - txtLength;
}
if
(readLength >
0
) {
record.append(
this
.buffer, startPosn, readLength);
txtLength += readLength;
// 去掉记录的分隔符
if
(newline) {
str.set(record.getBytes(),
0
, record.getLength()
- separator.length);
}
}
}
while
(!newline && (bytesConsumed < maxBytesToConsume));
if
(bytesConsumed > (
long
) Integer.MAX_VALUE) {
throw
new
IOException(
"Too many bytes before newline: "
+ bytesConsumed);
}
return
(
int
) bytesConsumed;
}
public
int
readLine(Text str,
int
maxLineLength)
throws
IOException {
return
readLine(str, maxLineLength, Integer.MAX_VALUE);
}
public
int
readLine(Text str)
throws
IOException {
return
readLine(str, Integer.MAX_VALUE, Integer.MAX_VALUE);
}
}
}
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
|
package
MyInputFormat;
import
java.io.IOException;
import
org.apache.hadoop.conf.Configuration;
import
org.apache.hadoop.fs.FileSystem;
import
org.apache.hadoop.fs.Path;
import
org.apache.hadoop.io.LongWritable;
import
org.apache.hadoop.io.Text;
import
org.apache.hadoop.mapreduce.Job;
import
org.apache.hadoop.mapreduce.Mapper;
import
org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
public
class
TestMyInputFormat {
public
static
class
MapperClass
extends
Mapper<LongWritable, Text, Text, Text> {
public
void
map(LongWritable key, Text value, Context context)
throws
IOException,
InterruptedException {
System.out.println(
"key:\t "
+ key);
System.out.println(
"value:\t "
+ value);
System.out.println(
"-------------------------"
);
}
}
public
static
void
main(String[] args)
throws
IOException, InterruptedException, ClassNotFoundException {
Configuration conf =
new
Configuration();
Path outPath =
new
Path(
"/hive/11"
);
FileSystem.get(conf).delete(outPath,
true
);
Job job =
new
Job(conf,
"TestMyInputFormat"
);
job.setInputFormatClass(TrackInputFormat.
class
);
job.setJarByClass(TestMyInputFormat.
class
);
job.setMapperClass(TestMyInputFormat.MapperClass.
class
);
job.setNumReduceTasks(
0
);
job.setMapOutputKeyClass(Text.
class
);
job.setMapOutputValueClass(Text.
class
);
FileInputFormat.addInputPath(job,
new
Path(args[
0
]));
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job, outPath);
System.exit(job.waitForCompletion(
true
) ?
0
:
1
);
}
}
|
3、测试数据:
cookieId time url cookieOverFlag
1
2
3
4
5
6
7
8
9
|
1
a 1_hao123
1
a 1_baidu
1
b 1_google 2END
2
c 2_google
2
c 2_hao123
2
c 2_google 1END
3
a 3_baidu
3
a 3_sougou
3
b 3_soso 2END
|
4、结果:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
key:
0
value:
1
a 1_hao123
1
a 1_baidu
1
b 1_google
2
-------------------------
key:
47
value:
2
c 2_google
2
c 2_hao123
2
c 2_google
1
-------------------------
key:
96
value:
3
a 3_baidu
3
a 3_sougou
3
b 3_soso
2
-------------------------
|
REF:
自定义hadoop map/reduce输入文件切割InputFormat
http://hi.baidu.com/lzpsky/item/0d9d84c05afb43ba0c0a7b27
MapReduce高级编程之自定义InputFormat
http://datamining.xmu.edu.cn/bbs/home.php?mod=space&uid=91&do=blog&id=190
http://irwenqiang.iteye.com/blog/1448164
http://my.oschina.net/leejun2005/blog/133424