源码 锁 自旋 Spin go程调度顺序 死锁 读写锁 go程 均分时间片

 

github.com\google\gopacket@v1.1.19\pcap\pcap_windows.go

switch goroutines
// waitForPacket waits for a packet or for the timeout to expire.
func (p *Handle) waitForPacket() {
    // can't use select() so instead just switch goroutines
    runtime.Gosched()
}
 
 
 
func labLock() {
	log.Println("in-0")
	var Num int
	var NumMax int = 4

	var concurrenceRWLockNum sync.RWMutex

	concurrenceRWLockNum.RLock()
	if Num == NumMax {
		concurrenceRWLockNum.RUnlock()
		return
	} else {
		concurrenceRWLockNum.Lock()
		if Num < NumMax {
			Num++
			defer func() {
				concurrenceRWLockNum.Lock()
				Num--
				concurrenceRWLockNum.Unlock()
			}()
		}
		concurrenceRWLockNum.Unlock()
		concurrenceRWLockNum.RUnlock()

	}
	log.Println("in-1")
}

  

 

 

fatal error: all goroutines are asleep - deadlock!

goroutine 1 [semacquire]:
sync.runtime_SemacquireMutex(0x56c807?, 0x18?, 0x61dda0?)
C:/Program Files/Go/src/runtime/sema.go:77 +0x25
sync.(*RWMutex).Lock(0x0?)
C:/Program Files/Go/src/sync/rwmutex.go:152 +0x71
main.labLock()

/main.go:29 +0x98  

    } else {


main.main()

 修正,调整顺序

        concurrenceRWLockNum.RUnlock()
        concurrenceRWLockNum.Lock()

 

Go\src\sync\runtime.go

// runtime_doSpin does active spinning.
func runtime_doSpin()

 

Go\src\sync\mutex.go

// Lock locks m.
// If the lock is already in use, the calling goroutine
// blocks until the mutex is available.
func (m *Mutex) Lock() {
    // Fast path: grab unlocked mutex.
    if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
        if race.Enabled {
            race.Acquire(unsafe.Pointer(m))
        }
        return
    }
    // Slow path (outlined so that the fast path can be inlined)
    m.lockSlow()
}

// TryLock tries to lock m and reports whether it succeeded.
//
// Note that while correct uses of TryLock do exist, they are rare,
// and use of TryLock is often a sign of a deeper problem
// in a particular use of mutexes.
func (m *Mutex) TryLock() bool {
    old := m.state
    if old&(mutexLocked|mutexStarving) != 0 {
        return false
    }

    // There may be a goroutine waiting for the mutex, but we are
    // running now and can try to grab the mutex before that
    // goroutine wakes up.
    if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) {
        return false
    }

    if race.Enabled {
        race.Acquire(unsafe.Pointer(m))
    }
    return true
}

func (m *Mutex) lockSlow() {
    var waitStartTime int64
    starving := false
    awoke := false
    iter := 0
    old := m.state
    for {
        // Don't spin in starvation mode, ownership is handed off to waiters
        // so we won't be able to acquire the mutex anyway.
        if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
            // Active spinning makes sense.
            // Try to set mutexWoken flag to inform Unlock
            // to not wake other blocked goroutines.
            if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
                atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
                awoke = true
            }
            runtime_doSpin()
            iter++
            old = m.state
            continue
        }
        new := old
        // Don't try to acquire starving mutex, new arriving goroutines must queue.
        if old&mutexStarving == 0 {
            new |= mutexLocked
        }
        if old&(mutexLocked|mutexStarving) != 0 {
            new += 1 << mutexWaiterShift
        }
        // The current goroutine switches mutex to starvation mode.
        // But if the mutex is currently unlocked, don't do the switch.
        // Unlock expects that starving mutex has waiters, which will not
        // be true in this case.
        if starving && old&mutexLocked != 0 {
            new |= mutexStarving
        }
        if awoke {
            // The goroutine has been woken from sleep,
            // so we need to reset the flag in either case.
            if new&mutexWoken == 0 {
                throw("sync: inconsistent mutex state")
            }
            new &^= mutexWoken
        }
        if atomic.CompareAndSwapInt32(&m.state, old, new) {
            if old&(mutexLocked|mutexStarving) == 0 {
                break // locked the mutex with CAS
            }
            // If we were already waiting before, queue at the front of the queue.
            queueLifo := waitStartTime != 0
            if waitStartTime == 0 {
                waitStartTime = runtime_nanotime()
            }
            runtime_SemacquireMutex(&m.sema, queueLifo, 1)
            starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
            old = m.state
            if old&mutexStarving != 0 {
                // If this goroutine was woken and mutex is in starvation mode,
                // ownership was handed off to us but mutex is in somewhat
                // inconsistent state: mutexLocked is not set and we are still
                // accounted as waiter. Fix that.
                if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
                    throw("sync: inconsistent mutex state")
                }
                delta := int32(mutexLocked - 1<<mutexWaiterShift)
                if !starving || old>>mutexWaiterShift == 1 {
                    // Exit starvation mode.
                    // Critical to do it here and consider wait time.
                    // Starvation mode is so inefficient, that two goroutines
                    // can go lock-step infinitely once they switch mutex
                    // to starvation mode.
                    delta -= mutexStarving
                }
                atomic.AddInt32(&m.state, delta)
                break
            }
            awoke = true
            iter = 0
        } else {
            old = m.state
        }
    }

    if race.Enabled {
        race.Acquire(unsafe.Pointer(m))
    }
}

 

func labLock() {
	log.Println("in-0")
	var wg sync.WaitGroup
	N := 16
	wg.Add(N + N)
	var lk sync.RWMutex
	for i := 0; i < N; i++ {
		go func(i int) {
			defer wg.Done()
			lk.Lock()
			log.Println("1=", i)
			time.Sleep(2 * time.Second)
			lk.Unlock()
		}(i)
	}
	for i := 0; i < N; i++ {
		go func(i int) {
			defer wg.Done()
			lk.Lock()
			log.Println("2=", i)
			time.Sleep(2 * time.Second)
			lk.Unlock()
		}(i)
	}
	wg.Wait()
	log.Println("in-1")
}

  

2022/09/02 09:23:29 in-0
2022/09/02 09:23:29 1= 8
2022/09/02 09:23:31 2= 2
2022/09/02 09:23:33 1= 9
2022/09/02 09:23:35 1= 10
2022/09/02 09:23:37 1= 11
2022/09/02 09:23:39 1= 12
2022/09/02 09:23:41 1= 13
2022/09/02 09:23:43 1= 14
2022/09/02 09:23:45 1= 15
2022/09/02 09:23:47 2= 0
2022/09/02 09:23:49 2= 1
2022/09/02 09:23:51 2= 15
2022/09/02 09:23:53 2= 6
2022/09/02 09:23:55 2= 7
2022/09/02 09:23:57 2= 8
2022/09/02 09:23:59 2= 9
2022/09/02 09:24:01 2= 10
2022/09/02 09:24:03 2= 11
2022/09/02 09:24:05 2= 12
2022/09/02 09:24:07 2= 13
2022/09/02 09:24:09 2= 14
2022/09/02 09:24:11 1= 5
2022/09/02 09:24:13 2= 5
2022/09/02 09:24:15 2= 3
2022/09/02 09:24:17 1= 4
2022/09/02 09:24:19 2= 4
2022/09/02 09:24:21 1= 6
2022/09/02 09:24:23 1= 1
2022/09/02 09:24:25 1= 7
2022/09/02 09:24:27 1= 0
2022/09/02 09:24:29 1= 2
2022/09/02 09:24:31 1= 3
2022/09/02 09:24:33 in-1

  

 

posted @ 2022-09-01 13:39  papering  阅读(58)  评论(0编辑  收藏  举报