<ruby id="bdb3f"></ruby>

    <p id="bdb3f"><cite id="bdb3f"></cite></p>

      <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
        <p id="bdb3f"><cite id="bdb3f"></cite></p>

          <pre id="bdb3f"></pre>
          <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

          <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
          <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

          <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                <ruby id="bdb3f"></ruby>

                ??碼云GVP開源項目 12k star Uniapp+ElementUI 功能強大 支持多語言、二開方便! 廣告
                # 7.4 大對象分配 大對象(large object)(>32kb)直接從 Go 堆上進行分配,不涉及 mcache/mcentral/mheap 之間的三級過程,也就相對簡單。 ## 從堆上分配 ``` // 大對象分配 var s *mspan (...) systemstack(func() { s = largeAlloc(size, needzero, noscan) }) s.freeindex = 1 s.allocCount = 1 x = unsafe.Pointer(s.base()) size = s.elemsize ``` 可以看到,大對象所分配的 mspan 是直接通過`largeAlloc`進行分配的。 ``` func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan { // 對象太大,溢出 if size+_PageSize &lt; size { throw("out of memory") } // 根據分配的大小計算需要分配的頁數 npages := size &gt;&gt; _PageShift if size&amp;_PageMask != 0 { npages++ } (...) // 從堆上分配 s := mheap_.alloc(npages, makeSpanClass(0, noscan), true, needzero) if s == nil { throw("out of memory") } s.limit = s.base() + size (...) return s } ``` 從堆上分配調用了`alloc`方法,這個方法需要指明要分配的頁數、span 的大小等級、是否為大對象、是否清零: ``` func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero bool) *mspan { var s *mspan systemstack(func() { s = h.alloc_m(npage, spanclass, large) }) if s != nil { // 需要清零時,對分配的 span 進行清零 if needzero &amp;&amp; s.needzero != 0 { memclrNoHeapPointers(unsafe.Pointer(s.base()), s.npages&lt;&lt;_PageShift) } // 標記已經清零 s.needzero = 0 } return s } ``` `alloc_m`是實際實現,在系統棧上執行: ``` //go:systemstack func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan { _g_ := getg() (...) lock(&amp;h.lock) (...) _g_.m.mcache.local_scan = 0 (...) _g_.m.mcache.local_tinyallocs = 0 s := h.allocSpanLocked(npage, &amp;memstats.heap_inuse) if s != nil { (...) s.state = mSpanInUse s.allocCount = 0 s.spanclass = spanclass if sizeclass := spanclass.sizeclass(); sizeclass == 0 { s.elemsize = s.npages &lt;&lt; _PageShift s.divShift = 0 s.divMul = 0 s.divShift2 = 0 s.baseMask = 0 } else { s.elemsize = uintptr(class_to_size[sizeclass]) m := &amp;class_to_divmagic[sizeclass] s.divShift = m.shift s.divMul = m.mul s.divShift2 = m.shift2 s.baseMask = m.baseMask } // Mark in-use span in arena page bitmap. arena, pageIdx, pageMask := pageIndexOf(s.base()) arena.pageInUse[pageIdx] |= pageMask // update stats, sweep lists h.pagesInUse += uint64(npage) if large { (...) mheap_.largealloc += uint64(s.elemsize) mheap_.nlargealloc++ (...) } } (...) unlock(&amp;h.lock) return s } ``` `allocSpanlocked`用來從堆上根據頁數來進行實際的分配工作: ``` func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { var s *mspan // 從堆中獲取 span s = h.pickFreeSpan(npage) if s != nil { goto HaveSpan } // 堆中沒無法獲取到 span,這時需要對堆進行增長 if !h.grow(npage) { return nil } // 再獲取一次 s = h.pickFreeSpan(npage) if s != nil { goto HaveSpan } throw("grew heap, but no adequate free span found") HaveSpan: (...) if s.npages &gt; npage { t := (*mspan)(h.spanalloc.alloc()) t.init(s.base()+npage&lt;&lt;_PageShift, s.npages-npage) s.npages = npage h.setSpan(t.base()-1, s) h.setSpan(t.base(), t) h.setSpan(t.base()+t.npages*pageSize-1, t) t.needzero = s.needzero start, end := t.physPageBounds() if s.scavenged &amp;&amp; start &lt; end { memstats.heap_released += uint64(end - start) t.scavenged = true } s.state = mSpanManual t.state = mSpanManual h.freeSpanLocked(t, false, false, s.unusedsince) s.state = mSpanFree } if s.scavenged { sysUsed(unsafe.Pointer(s.base()), s.npages&lt;&lt;_PageShift) s.scavenged = false s.state = mSpanManual h.scavengeLargest(s.npages * pageSize) s.state = mSpanFree } s.unusedsince = 0 h.setSpans(s.base(), npage, s) (...) if s.inList() { throw("still in list") } return s } ``` 從堆上獲取 span 會同時檢查`free`和`scav`樹堆: ``` func (h *mheap) pickFreeSpan(npage uintptr) *mspan { tf := h.free.find(npage) ts := h.scav.find(npage) var s *mspan // 選擇更小的 span,然后返回 if tf != nil &amp;&amp; (ts == nil || tf.spanKey.npages &lt;= ts.spanKey.npages) { s = tf.spanKey h.free.removeNode(tf) } else if ts != nil &amp;&amp; (tf == nil || tf.spanKey.npages &gt; ts.spanKey.npages) { s = ts.spanKey h.scav.removeNode(ts) } return s } ``` free 和 scav 均為樹堆,其數據結構的性質我們已經很熟悉了。 ## 從操作系統申請 而對棧進行增長則需要向操作系統申請: ``` func (h *mheap) grow(npage uintptr) bool { ask := npage &lt;&lt; _PageShift nBase := round(h.curArena.base+ask, physPageSize) if nBase &gt; h.curArena.end { // Not enough room in the current arena. Allocate more // arena space. This may not be contiguous with the // current arena, so we have to request the full ask. av, asize := h.sysAlloc(ask) if av == nil { print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n") return false } if uintptr(av) == h.curArena.end { // The new space is contiguous with the old // space, so just extend the current space. h.curArena.end = uintptr(av) + asize } else { // The new space is discontiguous. Track what // remains of the current space and switch to // the new space. This should be rare. if size := h.curArena.end - h.curArena.base; size != 0 { h.growAddSpan(unsafe.Pointer(h.curArena.base), size) } // Switch to the new space. h.curArena.base = uintptr(av) h.curArena.end = uintptr(av) + asize } // The memory just allocated counts as both released // and idle, even though it's not yet backed by spans. // // The allocation is always aligned to the heap arena // size which is always &gt; physPageSize, so its safe to // just add directly to heap_released. Coalescing, if // possible, will also always be correct in terms of // accounting, because s.base() must be a physical // page boundary. memstats.heap_released += uint64(asize) memstats.heap_idle += uint64(asize) // Recalculate nBase nBase = round(h.curArena.base+ask, physPageSize) } // Grow into the current arena. v := h.curArena.base h.curArena.base = nBase h.growAddSpan(unsafe.Pointer(v), nBase-v) return true } func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) { // Scavenge some pages to make up for the virtual memory space // we just allocated, but only if we need to. h.scavengeIfNeededLocked(size) s := (*mspan)(h.spanalloc.alloc()) s.init(uintptr(v), size/pageSize) h.setSpans(s.base(), s.npages, s) s.state = mSpanFree // [v, v+size) is always in the Prepared state. The new span // must be marked scavenged so the allocator transitions it to // Ready when allocating from it. s.scavenged = true // This span is both released and idle, but grow already // updated both memstats. h.coalesce(s) h.free.insert(s) } ``` 通過`h.sysAlloc`獲取從操作系統申請而來的內存,首先嘗試 從已經保留的 arena 中獲得內存,無法獲取到合適的內存后,才會正式向操作系統申請,而后對其進行初始化: ``` func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { n = round(n, heapArenaBytes) // 優先從已經保留的 arena 中獲取 v = h.arena.alloc(n, heapArenaBytes, &amp;memstats.heap_sys) if v != nil { size = n goto mapped } // 如果獲取不到,再嘗試增長 arena hint for h.arenaHints != nil { hint := h.arenaHints p := hint.addr if hint.down { p -= n } if p+n &lt; p { // 溢出 v = nil } else if arenaIndex(p+n-1) &gt;= 1&lt;&lt;arenaBits { // 溢出 v = nil } else { v = sysReserve(unsafe.Pointer(p), n) } if p == uintptr(v) { // 獲取成功,更新 arena hint if !hint.down { p += n } hint.addr = p size = n break } // 失敗,丟棄并重新嘗試 if v != nil { sysFree(v, n, nil) } h.arenaHints = hint.next h.arenaHintAlloc.free(unsafe.Pointer(hint)) } if size == 0 { (...) v, size = sysReserveAligned(nil, n, heapArenaBytes) if v == nil { return nil, 0 } // 創建新的 hint 來增長此區域 hint := (*arenaHint)(h.arenaHintAlloc.alloc()) hint.addr, hint.down = uintptr(v), true hint.next, mheap_.arenaHints = mheap_.arenaHints, hint hint = (*arenaHint)(h.arenaHintAlloc.alloc()) hint.addr = uintptr(v) + size hint.next, mheap_.arenaHints = mheap_.arenaHints, hint } // 檢查不能使用的指針 (...) // 正式開始使用保留的內存 sysMap(v, size, &amp;memstats.heap_sys) mapped: // 創建 arena 的 metadata for ri := arenaIndex(uintptr(v)); ri &lt;= arenaIndex(uintptr(v)+size-1); ri++ { l2 := h.arenas[ri.l1()] if l2 == nil { // 分配 L2 arena map l2 = (*[1 &lt;&lt; arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil)) if l2 == nil { throw("out of memory allocating heap arena map") } (...) } if l2[ri.l2()] != nil { throw("arena already initialized") } var r *heapArena r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &amp;memstats.gc_sys)) if r == nil { r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &amp;memstats.gc_sys)) if r == nil { throw("out of memory allocating heap arena metadata") } } // 將 arena 添加到 arena 列表中 if len(h.allArenas) == cap(h.allArenas) { size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize if size == 0 { size = physPageSize } newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &amp;memstats.gc_sys)) if newArray == nil { throw("out of memory allocating allArenas") } oldSlice := h.allArenas *(*notInHeapSlice)(unsafe.Pointer(&amp;h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)} copy(h.allArenas, oldSlice) } h.allArenas = h.allArenas[:len(h.allArenas)+1] h.allArenas[len(h.allArenas)-1] = ri (...) } (...) return } ``` 這個過程略顯復雜: 1. 首先會通過現有的 arena 中獲得已經保留的內存區域,如果能獲取到,則直接對 arena 進行初始化; 2. 如果沒有,則會通過`sysReserve`為 arena 保留新的內存區域,并通過`sysReserveAligned`對操作系統對齊的區域進行重排,而后使用`sysMap`正式使用所在區塊的內存。 3. 在 arena 初始化階段,本質上是為 arena 創建 metadata,這部分內存屬于堆外內存,即不會被 GC 所追蹤的內存,因而通過 persistentalloc 進行分配。 `persistentalloc`是`sysAlloc`之上的一層封裝,它分配到的內存用于不能被釋放。 ``` func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { var p *notInHeap systemstack(func() { p = persistentalloc1(size, align, sysStat) }) return unsafe.Pointer(p) } //go:systemstack func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { const ( maxBlock = 64 &lt;&lt; 10 // VM reservation granularity is 64K on windows ) // 不允許分配大小為 0 的空間 if size == 0 { throw("persistentalloc: size == 0") } // 對齊數必須為 2 的指數、且不大于 PageSize if align != 0 { if align&amp;(align-1) != 0 { throw("persistentalloc: align is not a power of 2") } if align &gt; _PageSize { throw("persistentalloc: align is too large") } } else { // 若未指定則默認為 8 align = 8 } // 分配大內存:分配的大小如果超過最大的 block 大小,則直接調用 sysAlloc 進行分配 if size &gt;= maxBlock { return (*notInHeap)(sysAlloc(size, sysStat)) } // 分配小內存:在 m 上進行 // 先獲取 m mp := acquirem() var persistent *persistentAlloc if mp != nil &amp;&amp; mp.p != 0 { // 如果能夠獲取到 m 且同時持有 p,則直接分配到 p 的 palloc 上 persistent = &amp;mp.p.ptr().palloc } else { // 否則就分配到全局的 globalAlloc.persistentAlloc 上 lock(&amp;globalAlloc.mutex) persistent = &amp;globalAlloc.persistentAlloc } // 四舍五入 off 到 align 的倍數 persistent.off = round(persistent.off, align) if persistent.off+size &gt; persistentChunkSize || persistent.base == nil { persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &amp;memstats.other_sys)) if persistent.base == nil { if persistent == &amp;globalAlloc.persistentAlloc { unlock(&amp;globalAlloc.mutex) } throw("runtime: cannot allocate memory") } for { chunks := uintptr(unsafe.Pointer(persistentChunks)) *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&amp;persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { break } } persistent.off = sys.PtrSize } p := persistent.base.add(persistent.off) persistent.off += size releasem(mp) if persistent == &amp;globalAlloc.persistentAlloc { unlock(&amp;globalAlloc.mutex) } (...) return p } ``` 可以看到,這里申請到的內存會被記錄到`globalAlloc`中: ``` var globalAlloc struct { mutex persistentAlloc } type persistentAlloc struct { base *notInHeap // 空結構,內存首地址 off uintptr // 偏移量 } ```
                  <ruby id="bdb3f"></ruby>

                  <p id="bdb3f"><cite id="bdb3f"></cite></p>

                    <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
                      <p id="bdb3f"><cite id="bdb3f"></cite></p>

                        <pre id="bdb3f"></pre>
                        <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

                        <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
                        <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

                        <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                              <ruby id="bdb3f"></ruby>

                              哎呀哎呀视频在线观看