从计算机底层深入Golang高并发

从计算机底层深入Golang高并发

1.源码流程架构图

在这里插入图片描述

2.源码解读

runtime/proc.go下的newpro()


func newproc(fn *funcval) {//计算额外参数的地址argpgp := getg()pc := getcallerpc()//s1使用systemstack调用newproc1 systemstack(func() {newg := newproc1(fn, gp, pc)_p_ := getg().m.p.ptr()//s1将放到运行队列 runqput(_p_, newg, true)//s1 主是否启动,是否唤醒if mainStarted {wakep()}})
}func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {//调用getg获取当前的g,会编译为讯取FS寄存器(TLS),这里会获到g_g_ := getg()if fn == nil {_g_.m.throwing = -1 // do not dump full stacksthrow("go of nil func value")}//禁用抢咪,因为它可以在本地var中保存p,进入可见 设g对应的m的locks++acquirem() // disable preemption because it can be holding p in a local var//获取m拥有的p_p_ := _g_.m.p.ptr()//新建一个gnewg := gfget(_p_)if newg == nil {newg = malg(_StackMin)casgstatus(newg, _Gidle, _Gdead)allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.}if newg.stack.hi == 0 {throw("newproc1: newg missing stack")}if readgstatus(newg) != _Gdead {throw("newproc1: new g is not Gdead")}totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frametotalSize = alignUp(totalSize, sys.StackAlign)sp := newg.stack.hi - totalSizespArg := spif usesLR {// caller's LR*(*uintptr)(unsafe.Pointer(sp)) = 0prepGoExitFrame(sp)spArg += sys.MinFrameSize}//设置g的调度memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))newg.sched.sp = spnewg.stktopsp = spnewg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same functionnewg.sched.g = guintptr(unsafe.Pointer(newg))gostartcallfn(&newg.sched, fn)newg.gopc = callerpcnewg.ancestors = saveAncestors(callergp)newg.startpc = fn.fnif isSystemGoroutine(newg, false) {atomic.Xadd(&sched.ngsys, +1)} else {// Only user goroutines inherit pprof labels.if _g_.m.curg != nil {newg.labels = _g_.m.curg.labels}}// Track initial transition?newg.trackingSeq = uint8(fastrand())if newg.trackingSeq%gTrackingPeriod == 0 {newg.tracking = true}//设置g的状态为待运行casgstatus(newg, _Gdead, _Grunnable)gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo))if _p_.goidcache == _p_.goidcacheend {// Sched.goidgen is the last allocated id,// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].// At startup sched.goidgen=0, so main goroutine receives goid=1._p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)_p_.goidcache -= _GoidCacheBatch - 1_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch}newg.goid = int64(_p_.goidcache)_p_.goidcache++if raceenabled {newg.racectx = racegostart(callerpc)}if trace.enabled {traceGoCreate(newg, newg.startpc)}releasem(_g_.m)return newg
}func main() {g := getg() //获取g// Racectx of m0->g0 is used only as the parent of the main goroutine.// It must not be used for anything else.g.m.g0.racectx = 0// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.// Using decimal instead of binary GB and MB because// they look nicer in the stack overflow failure message.if goarch.PtrSize == 8 {maxstacksize = 1000000000} else {maxstacksize = 250000000}// An upper limit for max stack size. Used to avoid random crashes// after calling SetMaxStack and trying to allocate a stack that is too big,// since stackalloc works with 32-bit sizes.maxstackceiling = 2 * maxstacksize// 标记主函数已调用.mainStarted = true//判断是否已就绪if GOARCH != "wasm" { // no threads on wasm yet, so no sysmonsystemstack(func() {newm(sysmon, nil, -1)})}// Lock the main goroutine onto this, the main OS thread,// during initialization. Most programs won't care, but a few// do require certain calls to be made by the main thread.// Those can arrange for main.main to run in the main thread// by calling runtime.LockOSThread during initialization// to preserve the lock.lockOSThread()if g.m != &m0 {throw("runtime.main not on m0")}// Record when the world started.// Must be before doInit for tracing init.runtimeInitTime = nanotime()if runtimeInitTime == 0 {throw("nanotime returning zero")}if debug.inittrace != 0 {inittrace.id = getg().goidinittrace.active = true}//初始化doInit(&runtime_inittask) // Must be before defer.// Defer unlock so that runtime.Goexit during init does the unlock too.needUnlock := truedefer func() {if needUnlock {unlockOSThread()}}()gcenable()main_init_done = make(chan bool)if iscgo {if _cgo_thread_start == nil {throw("_cgo_thread_start missing")}if GOOS != "windows" {if _cgo_setenv == nil {throw("_cgo_setenv missing")}if _cgo_unsetenv == nil {throw("_cgo_unsetenv missing")}}if _cgo_notify_runtime_init_done == nil {throw("_cgo_notify_runtime_init_done missing")}// Start the template thread in case we enter Go from// a C-created thread and need to create a new thread.startTemplateThread()cgocall(_cgo_notify_runtime_init_done, nil)}doInit(&main_inittask)// Disable init tracing after main init done to avoid overhead// of collecting statistics in malloc and newprocinittrace.active = falseclose(main_init_done)needUnlock = falseunlockOSThread()if isarchive || islibrary {// A program compiled with -buildmode=c-archive or c-shared// has a main, but it is not executed.return}//进行间接调用fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtimefn()if raceenabled {racefini()}// Make racy client program work: if panicking on// another goroutine at the same time as main returns,// let the other goroutine finish printing the panic trace.// Once it does, it will exit. See issues 3934 and 20018.if atomic.Load(&runningPanicDefers) != 0 {// Running deferred functions should not take long.for c := 0; c < 1000; c++ {if atomic.Load(&runningPanicDefers) == 0 {break}Gosched()}}if atomic.Load(&panicking) != 0 {gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)}exit(0)for {var x *int32*x = 0}
}func sysmon() {lock(&sched.lock)sched.nmsys++checkdead()unlock(&sched.lock)lasttrace := int64(0)idle := 0 // how many cycles in succession we had not wokeup somebodydelay := uint32(0)for {if idle == 0 { // start with 20us sleep...delay = 20} else if idle > 50 { // start doubling the sleep after 1ms...delay *= 2}if delay > 10*1000 { // up to 10msdelay = 10 * 1000}usleep(delay)// sysmon should not enter deep sleep if schedtrace is enabled so that// it can print that information at the right time.//// It should also not enter deep sleep if there are any active P's so// that it can retake P's from syscalls, preempt long running G's, and// poll the network if all P's are busy for long stretches.//// It should wakeup from deep sleep if any P's become active either due// to exiting a syscall or waking up due to a timer expiring so that it// can resume performing those duties. If it wakes from a syscall it// resets idle and delay as a bet that since it had retaken a P from a// syscall before, it may need to do it again shortly after the// application starts work again. It does not reset idle when waking// from a timer to avoid adding system load to applications that spend// most of their time sleeping.now := nanotime()if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {lock(&sched.lock)if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {syscallWake := falsenext, _ := timeSleepUntil()if next > now {atomic.Store(&sched.sysmonwait, 1)unlock(&sched.lock)// Make wake-up period small enough// for the sampling to be correct.sleep := forcegcperiod / 2if next-now < sleep {sleep = next - now}shouldRelax := sleep >= osRelaxMinNSif shouldRelax {osRelax(true)}syscallWake = notetsleep(&sched.sysmonnote, sleep)if shouldRelax {osRelax(false)}lock(&sched.lock)atomic.Store(&sched.sysmonwait, 0)noteclear(&sched.sysmonnote)}if syscallWake {idle = 0delay = 20}}unlock(&sched.lock)}lock(&sched.sysmonlock)// Update now in case we blocked on sysmonnote or spent a long time// blocked on schedlock or sysmonlock above.now = nanotime()// trigger libc interceptors if neededif *cgo_yield != nil {asmcgocall(*cgo_yield, nil)}// poll network if not polled for more than 10mslastpoll := int64(atomic.Load64(&sched.lastpoll))if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))list := netpoll(0) // non-blocking - returns list of goroutinesif !list.empty() {// Need to decrement number of idle locked M's// (pretending that one more is running) before injectglist.// Otherwise it can lead to the following situation:// injectglist grabs all P's but before it starts M's to run the P's,// another M returns from syscall, finishes running its G,// observes that there is no work to do and no other running M's// and reports deadlock.incidlelocked(-1)injectglist(&list)incidlelocked(1)}}if GOOS == "netbsd" && needSysmonWorkaround {// netpoll is responsible for waiting for timer// expiration, so we typically don't have to worry// about starting an M to service timers. (Note that// sleep for timeSleepUntil above simply ensures sysmon// starts running again when that timer expiration may// cause Go code to run again).//// However, netbsd has a kernel bug that sometimes// misses netpollBreak wake-ups, which can lead to// unbounded delays servicing timers. If we detect this// overrun, then startm to get something to handle the// timer.//// See issue 42515 and// https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094.if next, _ := timeSleepUntil(); next < now {startm(nil, false)}}if atomic.Load(&scavenge.sysmonWake) != 0 {// Kick the scavenger awake if someone requested it.wakeScavenger()}// S1重新获取系统调用中阻塞的P,点长时间运行的Gif retake(now) != 0 {idle = 0} else {idle++}// check if we need to force a GCif t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {lock(&forcegc.lock)forcegc.idle = 0var list gListlist.push(forcegc.g)injectglist(&list)unlock(&forcegc.lock)}if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {lasttrace = nowschedtrace(debug.scheddetail > 0)}unlock(&sched.sysmonlock)}
}func retake(now int64) uint32 {n := 0// Prevent allp slice changes. This lock will be completely// uncontended unless we're already stopping the world.lock(&allpLock)// We can't use a range loop over allp because we may// temporarily drop the allpLock. Hence, we need to re-fetch// allp each time around the loop.for i := 0; i < len(allp); i++ {_p_ := allp[i]if _p_ == nil {// This can happen if procresize has grown// allp but not yet created new Ps.continue}pd := &_p_.sysmonticks := _p_.statussysretake := falseif s == _Prunning || s == _Psyscall {// Preempt G if it's running for too long.t := int64(_p_.schedtick)if int64(pd.schedtick) != t {pd.schedtick = uint32(t)pd.schedwhen = now} else if pd.schedwhen+forcePreemptNS <= now {preemptone(_p_)// In case of syscall, preemptone() doesn't// work, because there is no M wired to P.sysretake = true}}//如果P在系统中调用( _Psyscall),且经历过了sysmon循环(20us-10ms),则抢占这个Pif s == _Psyscall {// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).t := int64(_p_.syscalltick)if !sysretake && int64(pd.syscalltick) != t {pd.syscalltick = uint32(t)pd.syscallwhen = nowcontinue}// On the one hand we don't want to retake Ps if there is no other work to do,// but on the other hand we want to retake them eventually// because they can prevent the sysmon thread from deep sleep.//如果当前P,Local队列没有其它G,当前有其它G处理Idle状态,并且syscall执行事件不超过10ms,则不用解绑当前Pif runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {continue}// Drop allpLock so we can take sched.lock.unlock(&allpLock)// Need to decrement number of idle locked M's// (pretending that one more is running) before the CAS.// Otherwise the M from which we retake can exit the syscall,// increment nmidle and report deadlock.incidlelocked(-1)if atomic.Cas(&_p_.status, s, _Pidle) {if trace.enabled {traceGoSysBlock(_p_)traceProcStop(_p_)}n++_p_.syscalltick++handoffp(_p_)}incidlelocked(1)lock(&allpLock)}}unlock(&allpLock)return uint32(n)
}func startm(_p_ *p, spinning bool) {// Disable preemption.//// Every owned P must have an owner that will eventually stop it in the// event of a GC stop request. startm takes transient ownership of a P// (either from argument or pidleget below) and transfers ownership to// a started M, which will be responsible for performing the stop.//// Preemption must be disabled during this transient ownership,// otherwise the P this is running on may enter GC stop while still// holding the transient P, leaving that P in limbo and deadlocking the// STW.//// Callers passing a non-nil P must already be in non-preemptible// context, otherwise such preemption could occur on function entry to// startm. Callers passing a nil P may be preemptible, so we must// disable preemption before acquiring a P from pidleget below.mp := acquirem()lock(&sched.lock)if _p_ == nil { //从"空闲P链表"获取一个空间的P_p_ = pidleget()if _p_ == nil {unlock(&sched.lock)if spinning {// The caller incremented nmspinning, but there are no idle Ps,// so it's okay to just undo the increment and give up.if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {throw("startm: negative nmspinning")}}releasem(mp)return}} //nmp := mget() //从"空闲m链表"获取一个空间的m//如果没有空闲的m,则会创建一个if nmp == nil {// No M is available, we must drop sched.lock and call newm.// However, we already own a P to assign to the M.//// Once sched.lock is released, another G (e.g., in a syscall),// could find no idle P while checkdead finds a runnable G but// no running M's because this new M hasn't started yet, thus// throwing in an apparent deadlock.//// Avoid this situation by pre-allocating the ID for the new M,// thus marking it as 'running' before we drop sched.lock. This// new M will eventually run the scheduler to execute any// queued G's.id := mReserveID()unlock(&sched.lock)var fn func()if spinning {// The caller incremented nmspinning, so set m.spinning in the new M.fn = mspinning}//会新建一个的m实例,m的实例包含一个go,然后调用newsproc动一个系统线程newm(fn, _p_, id)// Ownership transfer of _p_ committed by start in newm.// Preemption is now safe.releasem(mp)return}unlock(&sched.lock)if nmp.spinning {throw("startm: m is spinning")}if nmp.nextp != 0 {throw("startm: m has p")}if spinning && !runqempty(_p_) {throw("startm: p has runnable gs")}// The caller incremented nmspinning, so set m.spinning in the new M.nmp.spinning = spinningnmp.nextp.set(_p_)notewakeup(&nmp.park)// Ownership transfer of _p_ committed by wakeup. Preemption is now// safe.releasem(mp)
}

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/219419.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

代码随想录二刷 | 二叉树 | 112. 路径总和

代码随想录二刷 &#xff5c; 二叉树 &#xff5c; 112. 路径总和 题目描述解题思路递归迭代 代码实现递归迭代 题目描述 112.路径总和 给你二叉树的根节点 root 和一个表示目标和的整数 targetSum 。判断该树中是否存在 根节点到叶子节点 的路径&#xff0c;这条路径上所有节…

leetcode-138-随机链表的复制(Java实现)

题目&#xff1a; 给你一个长度为 n 的链表&#xff0c;每个节点包含一个额外增加的随机指针 random &#xff0c;该指针可以指向链表中的任何节点或空节点。 构造这个链表的 深拷贝。 深拷贝应该正好由 n 个 全新 节点组成&#xff0c;其中每个新节点的值都设为其对应的原节点…

【LeetCode刷题】-- 166.分数到小数

166.分数到小数 class Solution {public String fractionToDecimal(int numerator, int denominator) {StringBuilder sb new StringBuilder();HashMap<Long,Integer> map new HashMap<>();//为了防止溢出&#xff0c;将分子和分母都转成64位整数long a numerat…

女生想通过培训转行软件测试类可行吗?

首先&#xff0c;女生转行IT行业做软件测试是可以的&#xff0c;因为软件测试岗&#xff0c;尤其是其中的功能性测试岗&#xff0c;入行门槛并不高&#xff0c;有很多女生在做&#xff0c;且我个人认为还蛮适合女生的&#xff0c;因为女生相对来说更细心&#xff0c;文档能力也…

HashMap构造函数解析与应用场景

目录 1. HashMap简介 2. HashMap的构造函数 2.1 默认构造函数 2.2 指定初始容量和加载因子的构造函数 3. 构造函数参数的影响 3.1 初始容量的选择 3.2 加载因子的选择 4. 构造函数的应用场景 4.1 默认构造函数的应用场景 4.2 指定初始容量和加载因子的构造函数的应用…

Linux(23):Linux 核心编译与管理

编译前的任务&#xff1a;认识核心与取得核心原始码 Linux 其实指的是核心。这个【核心(kernel)】是整个操作系统的最底层&#xff0c;他负责了整个硬件的驱动&#xff0c;以及提供各种系统所需的核心功能&#xff0c;包括防火墙机制、是否支持 LVM 或 Quota 等文件系统等等&a…

FFmpeg的AVcodecParser

文章目录 结构体操作函数支持的AVCodecParser 这个模块是AVCodec中的子模块&#xff0c;专门用来提前解析码流的元数据&#xff0c;为后面的解码做准备&#xff0c;这一点对cuda-NVdec非常明显&#xff0c;英伟达解码器的元数据解析是放在CPU上的&#xff0c;所以就非常依赖这个…

预测性维护对制造企业设备管理的作用

制造企业设备管理和维护对于生产效率和成本控制至关重要。然而&#xff0c;传统的维护方法往往无法准确预测设备故障&#xff0c;导致生产中断和高额维修费用。为了应对这一挑战&#xff0c;越来越多的制造企业开始采用预测性维护技术。 预测性维护是通过传感器数据、机器学习和…

jmeter,取“临时重定向的登录接口”响应头中的cookie

1、线程组--创建线程组&#xff1b; 2、线程组--添加--取样器--HTTP请求&#xff1b; 3、Http请求--添加--后置处理器--正则表达式提取器&#xff1b; 4、线程组--添加--监听器--查看结果树&#xff1b; 5、线程组--添加--取样器--调试取样器。 首先理解 自动重定向 与跟随…

【漏洞复现】红帆OA iorepsavexml.aspx文件上传漏洞

漏洞描述 广州红帆科技深耕医疗行业20余年,专注医院行政管控,与企业微信、阿里钉钉全方位结合,推出web移动一体化办公解决方案——iOffice20(医微云)。提供行政办公、专业科室应用、决策辅助等信息化工具,采取平台化管理模式,取代医疗机构过往多系统分散式管理,实现医…

【Qt】使用QDataStream向QByteArray内读写数据时,输出QByteArray数据为空解决方案

原因 今天写示例时&#xff0c;用到使用QDataStream类向QByteArray读写数据&#xff0c;但打印出来为空。 下面是简化代码&#xff1a; QByteArray ba;QDataStream out(&ba, QIODevice::WriteOnly);out << "helloworld";qDebug().noquote() << &quo…

地图自定义省市区合并展示数据整合

需求一&#xff1a;将省级地图下的两个市合并成一个区域&#xff0c;中间的分割线隐藏。 1、访问下方地址&#xff0c;搜索并下载省级地图json文件。 地址&#xff1a;https://datav.aliyun.com/portal/school/atlas/area_selector 2、切换到边界生成器&#xff0c;上传刚刚下…

手写VUE后台管理系统10 - 封装Axios实现异常统一处理

目录 前后端交互约定安装创建Axios实例拦截器封装请求方法业务异常处理 axios 是一个易用、简洁且高效的http库 axios 中文文档&#xff1a;http://www.axios-js.com/zh-cn/docs/ 前后端交互约定 在本项目中&#xff0c;前后端交互统一使用 application/json;charsetUTF-8 的请…

ubuntu debian mini安装系统 有线选项消失或ens33 ethernet 未托管解决方法

nmcli device status#修改NetworkManager.conf如下 sed s/false/true/ /etc/NetworkManager/NetworkManager.confsed -i s/false/true/ /etc/NetworkManager/NetworkManager.conf#重启生效systemctl restart NetworkManager

[NCTF2019]Fake XML cookbook1

提示 xml注入 一般遇到像登录页之类的就因该想到sql注入、弱口令或者xml等 随便输入抓包 这里明显就是xml注入 这里我们来简单了解一下xml注入 这里是普通的xml注入 xml注入其实和sql注入类似&#xff0c;利用了xml的解析机制如果系统没有将‘<’‘>’进行转义&#xff0…

图扑物联 | WEB组态可视化软件

什么是组态&#xff1f; 组态的概念来自于20世纪70年代中期出现的第一代集散控制系统&#xff08;Distributed Control System&#xff09;&#xff0c;可理解为“配置”、“设置”等&#xff0c;是指通过人机开发界面&#xff0c;用类似“搭积木”的简单方式来搭建软件功能&a…

mipi dsi协议DBI/DPI接口

MIPI dsi协议中的DBI/DPI接口主要用于主机和display设备之间的数据传输&#xff0c;说的更通俗一点就是DSI RX控制器和实际的显示面板之间的接口&#xff1b;dsi 协议spec中对DBI/DPI有描述&#xff1a; DSI协议中对DBI 接口模式命名为command mode operation&#xff0c;对DP…

C++ list常用操作

目录 一、介绍 二、list的常用操作 1、构造 2、迭代器 3、元素访问 4、容量操作 一、介绍 std::list文档链接 list是可以在常数范围内在任意位置进行插入和删除的序列式容器&#xff0c;并且该容器可以前后双向迭代。list的底层是双向链表结构&#xff0c;双向链表中每个…

关于多重背包的笔记

多重背包可以看作01背包的拓展&#xff0c; 01背包是选或者不选。多重背包是选0个一直到选s个。 for (int i 1; i < n; i) {for (int j m; j > w[i]; --j){f[j] max(f[j], f[j - 1*w[i]] 1*v[i], f[j - 2*w[i]] 2*v[i],...f[j - s*w[i]] s*v[i]);} } 由上述伪代码…

13.Spring 整合 Kafka + 发送系统通知 + 显示系统通知

目录 1.Spring 整合 Kafka 2.发送系统通知 2.1 封装事件对象 2.2 开发事件的生产者和消费者 2.3 触发事件&#xff1a;在评论、点赞、关注后通知​编辑 3.显示系统通知 3.1 通知列表 3.1.1 数据访问层 3.1.2 业务层 3.1.3 表现层 3.2 开发通知详情 3.2.1 开发数据…