【Java8源码分析】集合框架-HashMap

时间:2022-07-22 17:19:11

一、HashMap的存储结构

总共有两种存储类

// 1. 哈希冲突时采用链表法的类,一个哈希桶多于8个元素改为TreeNode
static class Node<K,V> implements Map.Entry<K,V>
// 2. 哈希冲突时采用红黑树存储的类,一个哈希桶少于6个元素改为Node
static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V>

下面详细看一下Node类

   // 每个哈希桶的存储结构,重写了equals和hashCode
static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
V value;
Node<K,V> next;

Node(int hash, K key, V value, Node<K,V> next) {
this.hash = hash;
this.key = key;
this.value = value;
this.next = next;
}

public final K getKey() { return key; }
public final V getValue() { return value; }
public final String toString() { return key + "=" + value; }

public final int hashCode() {
return Objects.hashCode(key) ^ Objects.hashCode(value);
}

public final V setValue(V newValue) {
V oldValue = value;
value = newValue;
return oldValue;
}

public final boolean equals(Object o) {
if (o == this)
return true;
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>)o;
if (Objects.equals(key, e.getKey()) &&
Objects.equals(value, e.getValue()))
return true;
}
return false;
}
}

二、hash值计算和hash桶映射

下面为hash值计算方法,至于这个算法为什么高效和均匀,有待研究

    // hash算法,算法比较高效、均匀
static final int hash(Object key) {
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
}

由hash值映射hash桶的标号

// n为hash桶的个数,比较好理解
(n - 1) & hash

三、源码分析

package java.util;

import java.io.IOException;
import java.io.InvalidObjectException;
import java.io.Serializable;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;

public class HashMap<K,V> extends AbstractMap<K,V>
implements Map<K,V>, Cloneable, Serializable {


private static final long serialVersionUID = 362498820763181265L;

// 默认容器初始大小为16
static final int DEFAULT_INITIAL_CAPACITY = 1 << 4;

static final int MAXIMUM_CAPACITY = 1 << 30;

// 默认装载因子0.75
static final float DEFAULT_LOAD_FACTOR = 0.75f;

// 在解决哈希冲突时,超过8个元素,采用红黑树替换链表
static final int TREEIFY_THRESHOLD = 8;

// 在解决哈希冲突时,低于6个元素,将红黑书转为链表
static final int UNTREEIFY_THRESHOLD = 6;

// 采用红黑树替换链表时,要求容器容量最小为64,否则采用扩容方式
static final int MIN_TREEIFY_CAPACITY = 64;

// hash算法,算法比较高效、均匀
static final int hash(Object key) {
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
}

// 返回不小于cap的2的次方的数
static final int tableSizeFor(int cap) {
int n = cap - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}

/* ---------------- Fields -------------- */

// 所有的哈希桶
transient Node<K,V>[] table;

// 用作缓存
transient Set<Map.Entry<K,V>> entrySet;

transient int size;

// 这个在ArrayList和LinkedList里已经见过,用来实现fast-fail
transient int modCount;

// HashMap的阈值,用于判断是否需要调整HashMap的容量(threshold = 容量*装载因子)
int threshold;

// 装载因子
final float loadFactor;

// 构造方法,对于能预估容量大小的,可以指定一个初始容量,减少扩容操作
// 装载因子一般采用默认的0.75即可
public HashMap(int initialCapacity, float loadFactor) {
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal initial capacity: " +
initialCapacity);
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException("Illegal load factor: " +
loadFactor);
this.loadFactor = loadFactor;
// 不小于容量的2的次方数
this.threshold = tableSizeFor(initialCapacity);
}

public HashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}

public HashMap() {
this.loadFactor = DEFAULT_LOAD_FACTOR;
}

// 该构造方法先初始化一个空的hashmap,再把所有元素添加进去
public HashMap(Map<? extends K, ? extends V> m) {
this.loadFactor = DEFAULT_LOAD_FACTOR;
// 把一个Map全部添加入HashMap
putMapEntries(m, false);
}

final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
int s = m.size();
if (s > 0) {
if (table == null) { // pre-size
float ft = ((float)s / loadFactor) + 1.0F;
int t = ((ft < (float)MAXIMUM_CAPACITY) ?
(int)ft : MAXIMUM_CAPACITY);
if (t > threshold)
threshold = tableSizeFor(t);
}
else if (s > threshold)
resize();
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
K key = e.getKey();
V value = e.getValue();
putVal(hash(key), key, value, false, evict);
}
}
}

public int size() {
return size;
}

public boolean isEmpty() {
return size == 0;
}

// 由key获取value
public V get(Object key) {
Node<K,V> e;
return (e = getNode(hash(key), key)) == null ? null : e.value;
}

final Node<K,V> getNode(int hash, Object key) {
Node<K,V>[] tab; Node<K,V> first, e; int n; K k;
if ((tab = table) != null && (n = tab.length) > 0 &&
// 注意这里的 (n-1) & hash 为根据hash值计算出hash桶
(first = tab[(n - 1) & hash]) != null) {
// 检查第一个节点,对于没有hash冲突的桶,第一个元素即为查找元素
if (first.hash == hash &&
((k = first.key) == key || (key != null && key.equals(k))))
return first;
if ((e = first.next) != null) {
// 如果hash桶已经树化,即超过8个元素转为红黑树
if (first instanceof TreeNode)
return ((TreeNode<K,V>)first).getTreeNode(hash, key);
// 否则遍历链表查找
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
return e;
} while ((e = e.next) != null);
}
}
return null;
}

public boolean containsKey(Object key) {
return getNode(hash(key), key) != null;
}

// 添加元素
public V put(K key, V value) {
return putVal(hash(key), key, value, false, true);
}

// onlyIfAbsent如果为true,只有在hashmap没有该key的时候才添加
// evict如果为false,hashmap为创建模式
// 这两个参数均为实现java8的新接口而设置
final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
boolean evict) {
Node<K,V>[] tab; Node<K,V> p; int n, i;
if ((tab = table) == null || (n = tab.length) == 0)
n = (tab = resize()).length;
if ((p = tab[i = (n - 1) & hash]) == null)
// 如果hash桶为空,直接插入
tab[i] = newNode(hash, key, value, null);
else {
// 此处e为key值跟要插入元素相等的元素
// 下面代码为找出e
Node<K,V> e; K k;
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
e = p;
else if (p instanceof TreeNode)
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
else {
for (int binCount = 0; ; ++binCount) {
if ((e = p.next) == null) {
p.next = newNode(hash, key, value, null);
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
// 超过树化临界值
treeifyBin(tab, hash);
break;
}
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
break;
p = e;
}
}
if (e != null) { // existing mapping for key
V oldValue = e.value;
if (!onlyIfAbsent || oldValue == null)
e.value = value;
// 回调函数
afterNodeAccess(e);
return oldValue;
}
}
++modCount;
if (++size > threshold)
resize();
// 回调函数
afterNodeInsertion(evict);
return null;
}

// 扩容函数,如果hash桶为空,初始化默认大小,否则双倍扩容
// 注意!!因为扩容为2的倍数,根据hash桶的计算方法,元素哈希值不变
// 所以元素在新的hash桶的下标,要不跟旧的hash桶下标一致,要不增加1倍
final Node<K,V>[] resize() {
Node<K,V>[] oldTab = table;
int oldCap = (oldTab == null) ? 0 : oldTab.length;
int oldThr = threshold;
// 下面代码为更新新容量,和新的阀值
int newCap, newThr = 0;
if (oldCap > 0) {
if (oldCap >= MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return oldTab;
}
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
oldCap >= DEFAULT_INITIAL_CAPACITY)
// 双倍扩容
newThr = oldThr << 1;
}
else if (oldThr > 0)
newCap = oldThr;
else {
newCap = DEFAULT_INITIAL_CAPACITY;
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
}
if (newThr == 0) {
float ft = (float)newCap * loadFactor;
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
(int)ft : Integer.MAX_VALUE);
}
threshold = newThr;

// 新建扩容后的hash桶,需要把旧桶里的元素搬到新桶下去
// 需根据元素的hash值重新计算新桶中的位置
@SuppressWarnings({"rawtypes","unchecked"})
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
table = newTab;
if (oldTab != null) {
for (int j = 0; j < oldCap; ++j) {
Node<K,V> e;
if ((e = oldTab[j]) != null) {
oldTab[j] = null;
if (e.next == null)
newTab[e.hash & (newCap - 1)] = e;
else if (e instanceof TreeNode)
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
else { // preserve order
Node<K,V> loHead = null, loTail = null;
Node<K,V> hiHead = null, hiTail = null;
Node<K,V> next;
do {
next = e.next;
if ((e.hash & oldCap) == 0) {
if (loTail == null)
loHead = e;
else
loTail.next = e;
loTail = e;
}
else {
if (hiTail == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
}
} while ((e = next) != null);
if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
}
if (hiTail != null) {
hiTail.next = null;
newTab[j + oldCap] = hiHead;
}
}
}
}
}
return newTab;
}

// 把链表转为红黑树
final void treeifyBin(Node<K,V>[] tab, int hash) {
int n, index; Node<K,V> e;
if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
resize();
else if ((e = tab[index = (n - 1) & hash]) != null) {
TreeNode<K,V> hd = null, tl = null;
do {
TreeNode<K,V> p = replacementTreeNode(e, null);
if (tl == null)
hd = p;
else {
p.prev = tl;
tl.next = p;
}
tl = p;
} while ((e = e.next) != null);
if ((tab[index] = hd) != null)
hd.treeify(tab);
}
}
}

四、总结说明

  • HashMap默认的初始容量为16,装载因子为0.75
  • Hash冲突中链表结构的数量大于8个,则调用树化转为红黑树结构,红黑树查找稍微快些;红黑树结构的数量小于6个时,则转为链表结构
  • 如果加载因子越大,对空间的利用更充分,但是查找效率会降低(链表长度会越来越长);如果加载因子太小,那么表中的数据将过于稀疏(很多空间还没用,就开始扩容了),对空间造成严重浪费。如果我们在构造方法中不指定,则系统默认加载因子为0.75,这是一个比较理想的值,一般情况下我们是无需修改的。
  • 一般对哈希表的散列很自然地会想到用hash值对length取模(即除法散列法),Hashtable中也是这样实现的,这种方法基本能保证元素在哈希表中散列的比较均匀,但取模会用到除法运算,效率很低,HashMap中则通过h&(length-1)的方法来代替取模,同样实现了均匀的散列,但效率要高很多,这也是HashMap对Hashtable的一个改进。
  • 哈希表的容量一定要是2的整数次幂。首先,length为2的整数次幂的话,h&(length-1)就相当于对length取模,这样便保证了散列的均匀,同时也提升了效率;其次,length为2的整数次幂的话,为偶数,这样length-1为奇数,奇数的最后一位是1,这样便保证了h&(length-1)的最后一位可能为0,也可能为1(这取决于h的值),即与后的结果可能为偶数,也可能为奇数,这样便可以保证散列的均匀性,而如果length为奇数的话,很明显length-1为偶数,它的最后一位是0,这样h&(length-1)的最后一位肯定为0,即只能为偶数,这样任何hash值都只会被散列到数组的偶数下标位置上,这便浪费了近一半的空间,因此,length取2的整数次幂,是为了使不同hash值发生碰撞的概率较小,这样就能使元素在哈希表中均匀地散列。

五、参考

http://blog.csdn.net/ns_code/article/details/36034955