浅读java.util.Map及其实现类(三)

时间:2021-08-27 16:21:45

ConcurrentHashMap   


源码分析 JDK1.8 


我们按照以下常用操作来分析其内部源码
public static void main(String[] args) { 
ConcurrentHashMap<String, Integer> ccMap = new ConcurrentHashMap<>();
ccMap.put("A", 12306);
ccMap.remove("B");
ccMap.put("A", 456);
System.out.printf("%s %d",ccMap.get("A"),ccMap.size());
ccMap.clear();
}
new ConcurrentHashMap<K,V>
    public ConcurrentHashMap() {
//默认构造
}
//MAXIMUM_CAPACITY = 1 << 30 最大容量
public ConcurrentHashMap(int initialCapacity) { //initialCapacity初始容量
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
//tableSizeFor是通过一系列位运算得到的sizeCtl
//关于运算可以参见 http://download.csdn.net/detail/crazyzxljing0621/9881657 中 3.2节
this.sizeCtl = cap;
//sizeCtl代表一个标识
// 负数为初始化或正在调整大小 -1为正在初始化 反之则为 -N N = 1+正在参与调整大小的线程数
// table为NULL时则为初始表大小,初始化后记录下一次调整大小的值
}
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) { //容量,加载因子,线程量(预计并发量)
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel)
initialCapacity = concurrencyLevel; // 确保容量不能小于线程量
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
//通过容量和加载因子设定size,因子越大size越紧凑越小,可能的冲突就越大,
//但是size过大也造成资源浪费
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
//从构造函数发现并没有初始化容器的操作,都只是进行一些参数的计算和决策,真正的容器初始化在哪里呢?继续往下看
ConcurrentHashMap.put(K)
    public V put(K key, V value) { 
return putVal(key, value, false);
}
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException(); //ConcurrentHashMap键值都不可为空
int hash = spread(key.hashCode());//分布哈希值降低冲突概率
int binCount = 0;
for (Node<K,V>[] tab = table;;) {//首次new之后put进来table是有值的,值内为main在debug时由classLoad调用产生的指向的是堆栈K,V
                    //
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { //根据tab和hash来计算目标位置是否有值,如果有则为Node<K,V>
if (casTabAt(tab, i, null,new Node<K,V>(hash, key, value, null))) //CAS直接放进去没有锁,直接放进去
break;
//compare and swap,解决多线程并行情况下使用锁造成性能损耗的一种机制,
//CAS操作包含三个操作数——内存位置(V)、预期原值(A)和新值(B)。如果内存位置的值与预期原值相匹配,
//那么处理器会自动将该位置值更新为新值。否则,处理器不做任何操作。无论哪种情况,
//它都会在CAS指令之前返回该位置的值。CAS有效地说明了“我认为位置V应该包含值A;如果包含该值,则将B放到这个位置;
//否则,不要更改该位置,只告诉我这个位置现在的值即可。 --- from 百度CAS百科
}
else if ((fh = f.hash) == MOVED)//当前正在进行调整状态 一起进行表扩容
tab = helpTransfer(tab, f);
else {
V oldVal = null;
synchronized (f) { //锁住位置上的Node
if (tabAt(tab, i) == f) { //核对这个Node
if (fh >= 0) { //如果节点是链表存储 static final int MOVED = -1; // hash for forwarding nodes
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
//比对hash是否一致
if (e.hash == hash &&((ek = e.key) == key || (ek != null && key.equals(ek)))) {
//把旧值放起来
oldVal = e.val;
if (!onlyIfAbsent) //把新值更新到原位置上的Node
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
//如果一致无法匹配,最后新建一个Node放到当前位置Node的尾部,链表法解决hash冲突
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
else if (f instanceof TreeBin) { //树节点以红黑树方式来保存节点上的值
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) { //把值放到树下新节点中
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD) //当超过了TREEIFY_THRESHOLD=8这个阈值则把链表转为Tree
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
addCount(1L, binCount);//计数+1
return null;
}
public void putAll(Map<? extends K, ? extends V> m) {
tryPresize(m.size());//创建一个合适大小的容器 遍历写入即可
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
putVal(e.getKey(), e.getValue(), false);
}
//总的来说put使用了synchronized来确保插入新值,先确定当前hash位置下有没有值,如果没有直接插入,如果有则根据树或链表节点来插
//当链表节点过长超过默认8的时则转为树节点

//((fh = f.hash) == MOVED)当发现节点正在扩容时,则帮助一起扩容
ConcurrentHashMap.remove(K)
     public boolean remove(Object key, Object value) {
if (key == null)
throw new NullPointerException();
return value != null && replaceNode(key, null, value) != null;
}
final V replaceNode(Object key, V value, Object cv) {
int hash = spread(key.hashCode());//与put一样得到hash
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null) //表不存在或位置下本来就没有任何值
break;
else if ((fh = f.hash) == MOVED) //同put,扩容状态,协助扩容
tab = helpTransfer(tab, f);
else {
V oldVal = null;
boolean validated = false;
synchronized (f) { //锁住要变化的位置值
if (tabAt(tab, i) == f) {
if (fh >= 0) { //链表
validated = true;
for (Node<K,V> e = f, pred = null;;) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) { //param与位置上的值进行hash匹配
V ev = e.val; //得到旧值
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
//被替换的值与旧值hash匹配成功,当remove(K,V) replace(K,newV,oldV)使用equals
// remove(K) 则传入的是 repalceNode(K,null,null)
oldVal = ev; //得到旧值
if (value != null) //替换操作把新值覆盖旧值节点上的value
e.val = value;
else if (pred != null) //
pred.next = e.next;
else
setTabAt(tab, i, e.next);//在tab的i位置下插入e.next其实就是把当前的抹了
}
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
}
else if (f instanceof TreeBin) { //树
validated = true;
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value;
else if (t.removeTreeNode(p)) //红黑树的方式删除tab上的节点
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
addCount(-1L, -1);//删除成功计数-1
return oldVal;
}
break;
}
}
}
return null;
}
ConcurrentHashMap.clear()
  public void clear() {
long delta = 0L; // negative number of deletions
int i = 0;
Node<K,V>[] tab = table;
while (tab != null && i < tab.length) {
int fh;
Node<K,V> f = tabAt(tab, i); //遍历并得到每个值的位置
if (f == null)
++i;
else if ((fh = f.hash) == MOVED) { //正在扩容,协助扩容,i=0 从头循环
tab = helpTransfer(tab, f);
i = 0; // restart
}
else {
synchronized (f) {//锁住目标
if (tabAt(tab, i) == f) {
Node<K,V> p = (fh >= 0 ? f :(f instanceof TreeBin) ?((TreeBin<K,V>)f).first : null);
//根据链表与树得到值TreeBin是节点中存的值类型,TreeBin指向了TreeNode
while (p != null) {
--delta;
p = p.next; //一个一个都删掉
}
setTabAt(tab, i++, null); //tab每个位置都改成null
}
}
}
}
if (delta != 0L)
addCount(delta, -1); //计数修改
}
计数与扩容
   private final void addCount(long x, int check) {
CounterCell[] as; long b, s;
//CounterCell一个计数单元容器由LongAdder与Striped64改编
if ((as = counterCells) != null ||
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { //compareAndSwapLong将 s=b+x更新到basecount这是一个安全的高并发的操作
CounterCell a; long v; int m;
boolean uncontended = true;
if (as == null || (m = as.length - 1) < 0 ||
(a = as[ThreadLocalRandom.getProbe() & m]) == null ||
!(uncontended =
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
fullAddCount(x, uncontended);
return;
} //上面就没看了反正就是一个jdk8的安全的高并发的计数增加
if (check <= 1)
return;
s = sumCount();
}
if (check >= 0) { //check只有remove的时候传的是-1,其他时候都要检测是否要对容器进行扩容,
Node<K,V>[] tab, nt; int n, sc;
while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
(n = tab.length) < MAXIMUM_CAPACITY) { //计算一个扩容后大小
int rs = resizeStamp(n);
if (sc < 0) { //负数说明没有人在扩容
if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
transferIndex <= 0)
break;
if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
transfer(tab, nt); //协助调整大小nt为需要扩到新表的下一个数据
}
else if (U.compareAndSwapInt(this, SIZECTL, sc,
(rs << RESIZE_STAMP_SHIFT) + 2))
transfer(tab, null); //自己开始扩容
s = sumCount();//看扩容后大小是否满足
}
}
}

总结

1. CAS 无锁算法
2. synchronized代码块
3. 扩容标识
4. 协助扩容
5. TreeBin/TreeNode/Node


参考

http://www.cnblogs.com/Mainz/p/3546347.html

http://www.importnew.com/22007.html

http://blog.csdn.net/u010723709/article/details/48007881

http://blog.csdn.net/u011392897/article/details/60479937

http://wen866595.iteye.com/blog/2067912