| // Copyright 2009 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| // See malloc.h for overview. |
| // |
| // TODO(rsc): double-check stats. |
| |
| package runtime |
| #include <stddef.h> |
| #include <errno.h> |
| #include <stdlib.h> |
| #include "go-alloc.h" |
| #include "runtime.h" |
| #include "arch.h" |
| #include "malloc.h" |
| #include "go-string.h" |
| #include "interface.h" |
| #include "go-type.h" |
| |
| MHeap runtime_mheap; |
| |
| extern MStats mstats; // defined in extern.go |
| |
| extern volatile int32 runtime_MemProfileRate |
| __asm__ ("runtime.MemProfileRate"); |
| |
| // Allocate an object of at least size bytes. |
| // Small objects are allocated from the per-thread cache's free lists. |
| // Large objects (> 32 kB) are allocated straight from the heap. |
| void* |
| runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) |
| { |
| M *m; |
| G *g; |
| int32 sizeclass, rate; |
| MCache *c; |
| uintptr npages; |
| MSpan *s; |
| void *v; |
| |
| m = runtime_m(); |
| g = runtime_g(); |
| if(g->status == Gsyscall) |
| dogc = 0; |
| if(runtime_gcwaiting && g != m->g0 && m->locks == 0 && g->status != Gsyscall) { |
| runtime_gosched(); |
| m = runtime_m(); |
| } |
| if(m->mallocing) |
| runtime_throw("malloc/free - deadlock"); |
| m->mallocing = 1; |
| if(size == 0) |
| size = 1; |
| |
| c = m->mcache; |
| c->local_nmalloc++; |
| if(size <= MaxSmallSize) { |
| // Allocate from mcache free lists. |
| sizeclass = runtime_SizeToClass(size); |
| size = runtime_class_to_size[sizeclass]; |
| v = runtime_MCache_Alloc(c, sizeclass, size, zeroed); |
| if(v == nil) |
| runtime_throw("out of memory"); |
| c->local_alloc += size; |
| c->local_total_alloc += size; |
| c->local_by_size[sizeclass].nmalloc++; |
| } else { |
| // TODO(rsc): Report tracebacks for very large allocations. |
| |
| // Allocate directly from heap. |
| npages = size >> PageShift; |
| if((size & PageMask) != 0) |
| npages++; |
| s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1); |
| if(s == nil) |
| runtime_throw("out of memory"); |
| size = npages<<PageShift; |
| c->local_alloc += size; |
| c->local_total_alloc += size; |
| v = (void*)(s->start << PageShift); |
| |
| // setup for mark sweep |
| runtime_markspan(v, 0, 0, true); |
| } |
| if(!(flag & FlagNoGC)) |
| runtime_markallocated(v, size, (flag&FlagNoPointers) != 0); |
| |
| m->mallocing = 0; |
| |
| if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) { |
| if(size >= (uint32) rate) |
| goto profile; |
| if((uint32) m->mcache->next_sample > size) |
| m->mcache->next_sample -= size; |
| else { |
| // pick next profile time |
| // If you change this, also change allocmcache. |
| if(rate > 0x3fffffff) // make 2*rate not overflow |
| rate = 0x3fffffff; |
| m->mcache->next_sample = runtime_fastrand1() % (2*rate); |
| profile: |
| runtime_setblockspecial(v, true); |
| runtime_MProf_Malloc(v, size); |
| } |
| } |
| |
| if(dogc && mstats.heap_alloc >= mstats.next_gc) |
| runtime_gc(0); |
| return v; |
| } |
| |
| void* |
| __go_alloc(uintptr size) |
| { |
| return runtime_mallocgc(size, 0, 0, 1); |
| } |
| |
| // Free the object whose base pointer is v. |
| void |
| __go_free(void *v) |
| { |
| M *m; |
| int32 sizeclass; |
| MSpan *s; |
| MCache *c; |
| uint32 prof; |
| uintptr size; |
| |
| if(v == nil) |
| return; |
| |
| // If you change this also change mgc0.c:/^sweep, |
| // which has a copy of the guts of free. |
| |
| m = runtime_m(); |
| if(m->mallocing) |
| runtime_throw("malloc/free - deadlock"); |
| m->mallocing = 1; |
| |
| if(!runtime_mlookup(v, nil, nil, &s)) { |
| runtime_printf("free %p: not an allocated block\n", v); |
| runtime_throw("free runtime_mlookup"); |
| } |
| prof = runtime_blockspecial(v); |
| |
| // Find size class for v. |
| sizeclass = s->sizeclass; |
| c = m->mcache; |
| if(sizeclass == 0) { |
| // Large object. |
| size = s->npages<<PageShift; |
| *(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed" |
| // Must mark v freed before calling unmarkspan and MHeap_Free: |
| // they might coalesce v into other spans and change the bitmap further. |
| runtime_markfreed(v, size); |
| runtime_unmarkspan(v, 1<<PageShift); |
| runtime_MHeap_Free(&runtime_mheap, s, 1); |
| } else { |
| // Small object. |
| size = runtime_class_to_size[sizeclass]; |
| if(size > sizeof(uintptr)) |
| ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed" |
| // Must mark v freed before calling MCache_Free: |
| // it might coalesce v and other blocks into a bigger span |
| // and change the bitmap further. |
| runtime_markfreed(v, size); |
| c->local_by_size[sizeclass].nfree++; |
| runtime_MCache_Free(c, v, sizeclass, size); |
| } |
| c->local_nfree++; |
| c->local_alloc -= size; |
| if(prof) |
| runtime_MProf_Free(v, size); |
| m->mallocing = 0; |
| } |
| |
| int32 |
| runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp) |
| { |
| uintptr n, i; |
| byte *p; |
| MSpan *s; |
| |
| runtime_m()->mcache->local_nlookup++; |
| s = runtime_MHeap_LookupMaybe(&runtime_mheap, v); |
| if(sp) |
| *sp = s; |
| if(s == nil) { |
| runtime_checkfreed(v, 1); |
| if(base) |
| *base = nil; |
| if(size) |
| *size = 0; |
| return 0; |
| } |
| |
| p = (byte*)((uintptr)s->start<<PageShift); |
| if(s->sizeclass == 0) { |
| // Large object. |
| if(base) |
| *base = p; |
| if(size) |
| *size = s->npages<<PageShift; |
| return 1; |
| } |
| |
| if((byte*)v >= (byte*)s->limit) { |
| // pointers past the last block do not count as pointers. |
| return 0; |
| } |
| |
| n = runtime_class_to_size[s->sizeclass]; |
| if(base) { |
| i = ((byte*)v - p)/n; |
| *base = p + i*n; |
| } |
| if(size) |
| *size = n; |
| |
| return 1; |
| } |
| |
| MCache* |
| runtime_allocmcache(void) |
| { |
| int32 rate; |
| MCache *c; |
| |
| runtime_lock(&runtime_mheap); |
| c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc); |
| mstats.mcache_inuse = runtime_mheap.cachealloc.inuse; |
| mstats.mcache_sys = runtime_mheap.cachealloc.sys; |
| runtime_unlock(&runtime_mheap); |
| |
| // Set first allocation sample size. |
| rate = runtime_MemProfileRate; |
| if(rate > 0x3fffffff) // make 2*rate not overflow |
| rate = 0x3fffffff; |
| if(rate != 0) |
| c->next_sample = runtime_fastrand1() % (2*rate); |
| |
| return c; |
| } |
| |
| void |
| runtime_purgecachedstats(M* m) |
| { |
| MCache *c; |
| |
| // Protected by either heap or GC lock. |
| c = m->mcache; |
| mstats.heap_alloc += c->local_cachealloc; |
| c->local_cachealloc = 0; |
| mstats.heap_objects += c->local_objects; |
| c->local_objects = 0; |
| mstats.nmalloc += c->local_nmalloc; |
| c->local_nmalloc = 0; |
| mstats.nfree += c->local_nfree; |
| c->local_nfree = 0; |
| mstats.nlookup += c->local_nlookup; |
| c->local_nlookup = 0; |
| mstats.alloc += c->local_alloc; |
| c->local_alloc= 0; |
| mstats.total_alloc += c->local_total_alloc; |
| c->local_total_alloc= 0; |
| } |
| |
| extern uintptr runtime_sizeof_C_MStats |
| __asm__ ("runtime.Sizeof_C_MStats"); |
| |
| #define MaxArena32 (2U<<30) |
| |
| void |
| runtime_mallocinit(void) |
| { |
| byte *p; |
| uintptr arena_size, bitmap_size; |
| extern byte end[]; |
| byte *want; |
| uintptr limit; |
| |
| runtime_sizeof_C_MStats = sizeof(MStats); |
| |
| p = nil; |
| arena_size = 0; |
| bitmap_size = 0; |
| |
| // for 64-bit build |
| USED(p); |
| USED(arena_size); |
| USED(bitmap_size); |
| |
| runtime_InitSizes(); |
| |
| limit = runtime_memlimit(); |
| |
| // Set up the allocation arena, a contiguous area of memory where |
| // allocated data will be found. The arena begins with a bitmap large |
| // enough to hold 4 bits per allocated word. |
| if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { |
| // On a 64-bit machine, allocate from a single contiguous reservation. |
| // 16 GB should be big enough for now. |
| // |
| // The code will work with the reservation at any address, but ask |
| // SysReserve to use 0x000000f800000000 if possible. |
| // Allocating a 16 GB region takes away 36 bits, and the amd64 |
| // doesn't let us choose the top 17 bits, so that leaves the 11 bits |
| // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means |
| // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb. |
| // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and |
| // they are otherwise as far from ff (likely a common byte) as possible. |
| // Choosing 0x00 for the leading 6 bits was more arbitrary, but it |
| // is not a common ASCII code point either. Using 0x11f8 instead |
| // caused out of memory errors on OS X during thread allocations. |
| // These choices are both for debuggability and to reduce the |
| // odds of the conservative garbage collector not collecting memory |
| // because some non-pointer block of memory had a bit pattern |
| // that matched a memory address. |
| // |
| // Actually we reserve 17 GB (because the bitmap ends up being 1 GB) |
| // but it hardly matters: fc is not valid UTF-8 either, and we have to |
| // allocate 15 GB before we get that far. |
| // |
| // If this fails we fall back to the 32 bit memory mechanism |
| arena_size = (uintptr)(16LL<<30); |
| bitmap_size = arena_size / (sizeof(void*)*8/4); |
| p = runtime_SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size); |
| } |
| if (p == nil) { |
| // On a 32-bit machine, we can't typically get away |
| // with a giant virtual address space reservation. |
| // Instead we map the memory information bitmap |
| // immediately after the data segment, large enough |
| // to handle another 2GB of mappings (256 MB), |
| // along with a reservation for another 512 MB of memory. |
| // When that gets used up, we'll start asking the kernel |
| // for any memory anywhere and hope it's in the 2GB |
| // following the bitmap (presumably the executable begins |
| // near the bottom of memory, so we'll have to use up |
| // most of memory before the kernel resorts to giving out |
| // memory before the beginning of the text segment). |
| // |
| // Alternatively we could reserve 512 MB bitmap, enough |
| // for 4GB of mappings, and then accept any memory the |
| // kernel threw at us, but normally that's a waste of 512 MB |
| // of address space, which is probably too much in a 32-bit world. |
| bitmap_size = MaxArena32 / (sizeof(void*)*8/4); |
| arena_size = 512<<20; |
| if(limit > 0 && arena_size+bitmap_size > limit) { |
| bitmap_size = (limit / 9) & ~((1<<PageShift) - 1); |
| arena_size = bitmap_size * 8; |
| } |
| |
| // SysReserve treats the address we ask for, end, as a hint, |
| // not as an absolute requirement. If we ask for the end |
| // of the data segment but the operating system requires |
| // a little more space before we can start allocating, it will |
| // give out a slightly higher pointer. Except QEMU, which |
| // is buggy, as usual: it won't adjust the pointer upward. |
| // So adjust it upward a little bit ourselves: 1/4 MB to get |
| // away from the running binary image and then round up |
| // to a MB boundary. |
| want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)-1)); |
| if(0xffffffff - (uintptr)want <= bitmap_size + arena_size) |
| want = 0; |
| p = runtime_SysReserve(want, bitmap_size + arena_size); |
| if(p == nil) |
| runtime_throw("runtime: cannot reserve arena virtual address space"); |
| if((uintptr)p & (((uintptr)1<<PageShift)-1)) |
| runtime_printf("runtime: SysReserve returned unaligned address %p; asked for %p", p, bitmap_size+arena_size); |
| } |
| if((uintptr)p & (((uintptr)1<<PageShift)-1)) |
| runtime_throw("runtime: SysReserve returned unaligned address"); |
| |
| runtime_mheap.bitmap = p; |
| runtime_mheap.arena_start = p + bitmap_size; |
| runtime_mheap.arena_used = runtime_mheap.arena_start; |
| runtime_mheap.arena_end = runtime_mheap.arena_start + arena_size; |
| |
| // Initialize the rest of the allocator. |
| runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc); |
| runtime_m()->mcache = runtime_allocmcache(); |
| |
| // See if it works. |
| runtime_free(runtime_malloc(1)); |
| } |
| |
| void* |
| runtime_MHeap_SysAlloc(MHeap *h, uintptr n) |
| { |
| byte *p; |
| |
| |
| if(n > (uintptr)(h->arena_end - h->arena_used)) { |
| // We are in 32-bit mode, maybe we didn't use all possible address space yet. |
| // Reserve some more space. |
| byte *new_end; |
| uintptr needed; |
| |
| needed = (uintptr)h->arena_used + n - (uintptr)h->arena_end; |
| // Round wanted arena size to a multiple of 256MB. |
| needed = (needed + (256<<20) - 1) & ~((256<<20)-1); |
| new_end = h->arena_end + needed; |
| if(new_end <= h->arena_start + MaxArena32) { |
| p = runtime_SysReserve(h->arena_end, new_end - h->arena_end); |
| if(p == h->arena_end) |
| h->arena_end = new_end; |
| } |
| } |
| if(n <= (uintptr)(h->arena_end - h->arena_used)) { |
| // Keep taking from our reservation. |
| p = h->arena_used; |
| runtime_SysMap(p, n); |
| h->arena_used += n; |
| runtime_MHeap_MapBits(h); |
| return p; |
| } |
| |
| // If using 64-bit, our reservation is all we have. |
| if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU) |
| return nil; |
| |
| // On 32-bit, once the reservation is gone we can |
| // try to get memory at a location chosen by the OS |
| // and hope that it is in the range we allocated bitmap for. |
| p = runtime_SysAlloc(n); |
| if(p == nil) |
| return nil; |
| |
| if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) { |
| runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n", |
| p, h->arena_start, h->arena_start+MaxArena32); |
| runtime_SysFree(p, n); |
| return nil; |
| } |
| |
| if(p+n > h->arena_used) { |
| h->arena_used = p+n; |
| if(h->arena_used > h->arena_end) |
| h->arena_end = h->arena_used; |
| runtime_MHeap_MapBits(h); |
| } |
| |
| return p; |
| } |
| |
| // Runtime stubs. |
| |
| void* |
| runtime_mal(uintptr n) |
| { |
| return runtime_mallocgc(n, 0, 1, 1); |
| } |
| |
| func new(typ *Type) (ret *uint8) { |
| uint32 flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0; |
| ret = runtime_mallocgc(typ->__size, flag, 1, 1); |
| } |
| |
| func GC() { |
| runtime_gc(1); |
| } |
| |
| func SetFinalizer(obj Eface, finalizer Eface) { |
| byte *base; |
| uintptr size; |
| const FuncType *ft; |
| |
| if(obj.__type_descriptor == nil) { |
| runtime_printf("runtime.SetFinalizer: first argument is nil interface\n"); |
| goto throw; |
| } |
| if(obj.__type_descriptor->__code != GO_PTR) { |
| runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection); |
| goto throw; |
| } |
| if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) { |
| runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n"); |
| goto throw; |
| } |
| ft = nil; |
| if(finalizer.__type_descriptor != nil) { |
| if(finalizer.__type_descriptor->__code != GO_FUNC) |
| goto badfunc; |
| ft = (const FuncType*)finalizer.__type_descriptor; |
| if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor)) |
| goto badfunc; |
| } |
| |
| if(!runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft)) { |
| runtime_printf("runtime.SetFinalizer: finalizer already set\n"); |
| goto throw; |
| } |
| return; |
| |
| badfunc: |
| runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.__type_descriptor->__reflection, *obj.__type_descriptor->__reflection); |
| throw: |
| runtime_throw("runtime.SetFinalizer"); |
| } |