话不多说,看代码和效果
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
|
/**
* 根据map中的某个key 去除list中重复的map
* @author shijing
* @param list
* @param mapkey
* @return
*/
public static list<map<string, object>> removerepeatmapbykey(list<map<string, object>>
list, string mapkey){
if (collectionutils.isnullorempty(list)) return null ;
//把list中的数据转换成msp,去掉同一id值多余数据,保留查找到第一个id值对应的数据
list<map<string, object>> listmap = new arraylist<>();
map<string, map> msp = new hashmap<>();
for ( int i = list.size()- 1 ; i>= 0 ; i--){
map map = list.get(i);
string id = (string)map.get(mapkey);
map.remove(mapkey);
msp.put(id, map);
}
//把msp再转换成list,就会得到根据某一字段去掉重复的数据的list<map>
set<string> mspkey = msp.keyset();
for (string key: mspkey){
map newmap = msp.get(key);
newmap.put(mapkey, key);
listmap.add(newmap);
}
return listmap;
}
|
测试:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
public static void main(string[] args) {
map<string, map> msp = new hashmap<string, map>();
list<map<string, object>> list = new arraylist<map<string, object>>();
list<map<string, object>> listmap = new arraylist<map<string,object>>();
map<string, object> map1 = new hashmap<string, object>();
map1.put( "id" , "1123" );
map1.put( "name" , "张三" );
map<string, object> map2 = new hashmap<string, object>();
map2.put( "id" , "2" );
map2.put( "name" , "李四" );
map<string, object> map3 = new hashmap<string, object>();
map3.put( "id" , "1123" );
map3.put( "name" , "王五" );
map<string, object> map4 = new hashmap<string, object>();
map4.put( "id" , "3" );
map4.put( "name" , "赵六" );
list.add(map1);
list.add(map2);
list.add(map3);
list.add(map4);
system.out.println( "初始数据:" + list.tostring());
system.out.println( "去重之后:" + removerepeatmapbykey(list, "id" ));
}
|
结果:
初始数据:[{name=张三, id=1123}, {name=李四, id=2}, {name=王五, id=1123}, {name=赵六, id=3}]
去重之后:[{name=李四, id=2}, {name=赵六, id=3}, {name=张三, id=1123}]
总结
以上就是这篇文章的全部内容了,希望本文的内容对大家的学习或者工作具有一定的参考学习价值,谢谢大家对服务器之家的支持。如果你想了解更多相关内容请查看下面相关链接
原文链接:https://blog.csdn.net/moneyshi/article/details/81220421