Source file src/runtime/type.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Runtime type representation.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"internal/goarch"
    12  	"internal/goexperiment"
    13  	"internal/runtime/atomic"
    14  	"unsafe"
    15  )
    16  
    17  type nameOff = abi.NameOff
    18  type typeOff = abi.TypeOff
    19  type textOff = abi.TextOff
    20  
    21  type _type = abi.Type
    22  
    23  // rtype is a wrapper that allows us to define additional methods.
    24  type rtype struct {
    25  	*abi.Type // embedding is okay here (unlike reflect) because none of this is public
    26  }
    27  
    28  func (t rtype) string() string {
    29  	s := t.nameOff(t.Str).Name()
    30  	if t.TFlag&abi.TFlagExtraStar != 0 {
    31  		return s[1:]
    32  	}
    33  	return s
    34  }
    35  
    36  func (t rtype) uncommon() *uncommontype {
    37  	return t.Uncommon()
    38  }
    39  
    40  func (t rtype) name() string {
    41  	if t.TFlag&abi.TFlagNamed == 0 {
    42  		return ""
    43  	}
    44  	s := t.string()
    45  	i := len(s) - 1
    46  	sqBrackets := 0
    47  	for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
    48  		switch s[i] {
    49  		case ']':
    50  			sqBrackets++
    51  		case '[':
    52  			sqBrackets--
    53  		}
    54  		i--
    55  	}
    56  	return s[i+1:]
    57  }
    58  
    59  // pkgpath returns the path of the package where t was defined, if
    60  // available. This is not the same as the reflect package's PkgPath
    61  // method, in that it returns the package path for struct and interface
    62  // types, not just named types.
    63  func (t rtype) pkgpath() string {
    64  	if u := t.uncommon(); u != nil {
    65  		return t.nameOff(u.PkgPath).Name()
    66  	}
    67  	switch t.Kind_ & abi.KindMask {
    68  	case abi.Struct:
    69  		st := (*structtype)(unsafe.Pointer(t.Type))
    70  		return st.PkgPath.Name()
    71  	case abi.Interface:
    72  		it := (*interfacetype)(unsafe.Pointer(t.Type))
    73  		return it.PkgPath.Name()
    74  	}
    75  	return ""
    76  }
    77  
    78  // getGCMask returns the pointer/nonpointer bitmask for type t.
    79  //
    80  // nosplit because it is used during write barriers and must not be preempted.
    81  //
    82  //go:nosplit
    83  func getGCMask(t *_type) *byte {
    84  	if t.TFlag&abi.TFlagGCMaskOnDemand != 0 {
    85  		// Split the rest into getGCMaskOnDemand so getGCMask itself is inlineable.
    86  		return getGCMaskOnDemand(t)
    87  	}
    88  	return t.GCData
    89  }
    90  
    91  // inProgress is a byte whose address is a sentinel indicating that
    92  // some thread is currently building the GC bitmask for a type.
    93  var inProgress byte
    94  
    95  // nosplit because it is used during write barriers and must not be preempted.
    96  //
    97  //go:nosplit
    98  func getGCMaskOnDemand(t *_type) *byte {
    99  	// For large types, GCData doesn't point directly to a bitmask.
   100  	// Instead it points to a pointer to a bitmask, and the runtime
   101  	// is responsible for (on first use) creating the bitmask and
   102  	// storing a pointer to it in that slot.
   103  	// TODO: we could use &t.GCData as the slot, but types are
   104  	// in read-only memory currently.
   105  	addr := unsafe.Pointer(t.GCData)
   106  
   107  	if GOOS == "aix" {
   108  		addr = add(addr, firstmoduledata.data-aixStaticDataBase)
   109  	}
   110  
   111  	for {
   112  		p := (*byte)(atomic.Loadp(addr))
   113  		switch p {
   114  		default: // Already built.
   115  			return p
   116  		case &inProgress: // Someone else is currently building it.
   117  			// Just wait until the builder is done.
   118  			// We can't block here, so spinning while having
   119  			// the OS thread yield is about the best we can do.
   120  			osyield()
   121  			continue
   122  		case nil: // Not built yet.
   123  			// Attempt to get exclusive access to build it.
   124  			if !atomic.Casp1((*unsafe.Pointer)(addr), nil, unsafe.Pointer(&inProgress)) {
   125  				continue
   126  			}
   127  
   128  			// Build gcmask for this type.
   129  			bytes := goarch.PtrSize * divRoundUp(t.PtrBytes/goarch.PtrSize, 8*goarch.PtrSize)
   130  			p = (*byte)(persistentalloc(bytes, goarch.PtrSize, &memstats.other_sys))
   131  			systemstack(func() {
   132  				buildGCMask(t, bitCursor{ptr: p, n: 0})
   133  			})
   134  
   135  			// Store the newly-built gcmask for future callers.
   136  			atomic.StorepNoWB(addr, unsafe.Pointer(p))
   137  			return p
   138  		}
   139  	}
   140  }
   141  
   142  // A bitCursor is a simple cursor to memory to which we
   143  // can write a set of bits.
   144  type bitCursor struct {
   145  	ptr *byte   // base of region
   146  	n   uintptr // cursor points to bit n of region
   147  }
   148  
   149  // Write to b cnt bits starting at bit 0 of data.
   150  // Requires cnt>0.
   151  func (b bitCursor) write(data *byte, cnt uintptr) {
   152  	// Starting byte for writing.
   153  	p := addb(b.ptr, b.n/8)
   154  
   155  	// Note: if we're starting halfway through a byte, we load the
   156  	// existing lower bits so we don't clobber them.
   157  	n := b.n % 8                    // # of valid bits in buf
   158  	buf := uintptr(*p) & (1<<n - 1) // buffered bits to start
   159  
   160  	// Work 8 bits at a time.
   161  	for cnt > 8 {
   162  		// Read 8 more bits, now buf has 8-15 valid bits in it.
   163  		buf |= uintptr(*data) << n
   164  		n += 8
   165  		data = addb(data, 1)
   166  		cnt -= 8
   167  		// Write 8 of the buffered bits out.
   168  		*p = byte(buf)
   169  		buf >>= 8
   170  		n -= 8
   171  		p = addb(p, 1)
   172  	}
   173  	// Read remaining bits.
   174  	buf |= (uintptr(*data) & (1<<cnt - 1)) << n
   175  	n += cnt
   176  
   177  	// Flush remaining bits.
   178  	if n > 8 {
   179  		*p = byte(buf)
   180  		buf >>= 8
   181  		n -= 8
   182  		p = addb(p, 1)
   183  	}
   184  	*p &^= 1<<n - 1
   185  	*p |= byte(buf)
   186  }
   187  
   188  func (b bitCursor) offset(cnt uintptr) bitCursor {
   189  	return bitCursor{ptr: b.ptr, n: b.n + cnt}
   190  }
   191  
   192  // buildGCMask writes the ptr/nonptr bitmap for t to dst.
   193  // t must have a pointer.
   194  func buildGCMask(t *_type, dst bitCursor) {
   195  	// Note: we want to avoid a situation where buildGCMask gets into a
   196  	// very deep recursion, because M stacks are fixed size and pretty small
   197  	// (16KB). We do that by ensuring that any recursive
   198  	// call operates on a type at most half the size of its parent.
   199  	// Thus, the recursive chain can be at most 64 calls deep (on a
   200  	// 64-bit machine).
   201  	// Recursion is avoided by using a "tail call" (jumping to the
   202  	// "top" label) for any recursive call with a large subtype.
   203  top:
   204  	if t.PtrBytes == 0 {
   205  		throw("pointerless type")
   206  	}
   207  	if t.TFlag&abi.TFlagGCMaskOnDemand == 0 {
   208  		// copy t.GCData to dst
   209  		dst.write(t.GCData, t.PtrBytes/goarch.PtrSize)
   210  		return
   211  	}
   212  	// The above case should handle all kinds except
   213  	// possibly arrays and structs.
   214  	switch t.Kind() {
   215  	case abi.Array:
   216  		a := t.ArrayType()
   217  		if a.Len == 1 {
   218  			// Avoid recursive call for element type that
   219  			// isn't smaller than the parent type.
   220  			t = a.Elem
   221  			goto top
   222  		}
   223  		e := a.Elem
   224  		for i := uintptr(0); i < a.Len; i++ {
   225  			buildGCMask(e, dst)
   226  			dst = dst.offset(e.Size_ / goarch.PtrSize)
   227  		}
   228  	case abi.Struct:
   229  		s := t.StructType()
   230  		var bigField abi.StructField
   231  		for _, f := range s.Fields {
   232  			ft := f.Typ
   233  			if !ft.Pointers() {
   234  				continue
   235  			}
   236  			if ft.Size_ > t.Size_/2 {
   237  				// Avoid recursive call for field type that
   238  				// is larger than half of the parent type.
   239  				// There can be only one.
   240  				bigField = f
   241  				continue
   242  			}
   243  			buildGCMask(ft, dst.offset(f.Offset/goarch.PtrSize))
   244  		}
   245  		if bigField.Typ != nil {
   246  			// Note: this case causes bits to be written out of order.
   247  			t = bigField.Typ
   248  			dst = dst.offset(bigField.Offset / goarch.PtrSize)
   249  			goto top
   250  		}
   251  	default:
   252  		throw("unexpected kind")
   253  	}
   254  }
   255  
   256  // reflectOffs holds type offsets defined at run time by the reflect package.
   257  //
   258  // When a type is defined at run time, its *rtype data lives on the heap.
   259  // There are a wide range of possible addresses the heap may use, that
   260  // may not be representable as a 32-bit offset. Moreover the GC may
   261  // one day start moving heap memory, in which case there is no stable
   262  // offset that can be defined.
   263  //
   264  // To provide stable offsets, we add pin *rtype objects in a global map
   265  // and treat the offset as an identifier. We use negative offsets that
   266  // do not overlap with any compile-time module offsets.
   267  //
   268  // Entries are created by reflect.addReflectOff.
   269  var reflectOffs struct {
   270  	lock mutex
   271  	next int32
   272  	m    map[int32]unsafe.Pointer
   273  	minv map[unsafe.Pointer]int32
   274  }
   275  
   276  func reflectOffsLock() {
   277  	lock(&reflectOffs.lock)
   278  	if raceenabled {
   279  		raceacquire(unsafe.Pointer(&reflectOffs.lock))
   280  	}
   281  }
   282  
   283  func reflectOffsUnlock() {
   284  	if raceenabled {
   285  		racerelease(unsafe.Pointer(&reflectOffs.lock))
   286  	}
   287  	unlock(&reflectOffs.lock)
   288  }
   289  
   290  func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name {
   291  	if off == 0 {
   292  		return name{}
   293  	}
   294  	base := uintptr(ptrInModule)
   295  	for md := &firstmoduledata; md != nil; md = md.next {
   296  		if base >= md.types && base < md.etypes {
   297  			res := md.types + uintptr(off)
   298  			if res > md.etypes {
   299  				println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   300  				throw("runtime: name offset out of range")
   301  			}
   302  			return name{Bytes: (*byte)(unsafe.Pointer(res))}
   303  		}
   304  	}
   305  
   306  	// No module found. see if it is a run time name.
   307  	reflectOffsLock()
   308  	res, found := reflectOffs.m[int32(off)]
   309  	reflectOffsUnlock()
   310  	if !found {
   311  		println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:")
   312  		for next := &firstmoduledata; next != nil; next = next.next {
   313  			println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   314  		}
   315  		throw("runtime: name offset base pointer out of range")
   316  	}
   317  	return name{Bytes: (*byte)(res)}
   318  }
   319  
   320  func (t rtype) nameOff(off nameOff) name {
   321  	return resolveNameOff(unsafe.Pointer(t.Type), off)
   322  }
   323  
   324  func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type {
   325  	if off == 0 || off == -1 {
   326  		// -1 is the sentinel value for unreachable code.
   327  		// See cmd/link/internal/ld/data.go:relocsym.
   328  		return nil
   329  	}
   330  	base := uintptr(ptrInModule)
   331  	var md *moduledata
   332  	for next := &firstmoduledata; next != nil; next = next.next {
   333  		if base >= next.types && base < next.etypes {
   334  			md = next
   335  			break
   336  		}
   337  	}
   338  	if md == nil {
   339  		reflectOffsLock()
   340  		res := reflectOffs.m[int32(off)]
   341  		reflectOffsUnlock()
   342  		if res == nil {
   343  			println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:")
   344  			for next := &firstmoduledata; next != nil; next = next.next {
   345  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   346  			}
   347  			throw("runtime: type offset base pointer out of range")
   348  		}
   349  		return (*_type)(res)
   350  	}
   351  	if t := md.typemap[off]; t != nil {
   352  		return t
   353  	}
   354  	res := md.types + uintptr(off)
   355  	if res > md.etypes {
   356  		println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes))
   357  		throw("runtime: type offset out of range")
   358  	}
   359  	return (*_type)(unsafe.Pointer(res))
   360  }
   361  
   362  func (t rtype) typeOff(off typeOff) *_type {
   363  	return resolveTypeOff(unsafe.Pointer(t.Type), off)
   364  }
   365  
   366  func (t rtype) textOff(off textOff) unsafe.Pointer {
   367  	if off == -1 {
   368  		// -1 is the sentinel value for unreachable code.
   369  		// See cmd/link/internal/ld/data.go:relocsym.
   370  		return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
   371  	}
   372  	base := uintptr(unsafe.Pointer(t.Type))
   373  	var md *moduledata
   374  	for next := &firstmoduledata; next != nil; next = next.next {
   375  		if base >= next.types && base < next.etypes {
   376  			md = next
   377  			break
   378  		}
   379  	}
   380  	if md == nil {
   381  		reflectOffsLock()
   382  		res := reflectOffs.m[int32(off)]
   383  		reflectOffsUnlock()
   384  		if res == nil {
   385  			println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:")
   386  			for next := &firstmoduledata; next != nil; next = next.next {
   387  				println("\ttypes", hex(next.types), "etypes", hex(next.etypes))
   388  			}
   389  			throw("runtime: text offset base pointer out of range")
   390  		}
   391  		return res
   392  	}
   393  	res := md.textAddr(uint32(off))
   394  	return unsafe.Pointer(res)
   395  }
   396  
   397  type uncommontype = abi.UncommonType
   398  
   399  type interfacetype = abi.InterfaceType
   400  
   401  type arraytype = abi.ArrayType
   402  
   403  type chantype = abi.ChanType
   404  
   405  type slicetype = abi.SliceType
   406  
   407  type functype = abi.FuncType
   408  
   409  type ptrtype = abi.PtrType
   410  
   411  type name = abi.Name
   412  
   413  type structtype = abi.StructType
   414  
   415  func pkgPath(n name) string {
   416  	if n.Bytes == nil || *n.Data(0)&(1<<2) == 0 {
   417  		return ""
   418  	}
   419  	i, l := n.ReadVarint(1)
   420  	off := 1 + i + l
   421  	if *n.Data(0)&(1<<1) != 0 {
   422  		i2, l2 := n.ReadVarint(off)
   423  		off += i2 + l2
   424  	}
   425  	var nameOff nameOff
   426  	copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.Data(off)))[:])
   427  	pkgPathName := resolveNameOff(unsafe.Pointer(n.Bytes), nameOff)
   428  	return pkgPathName.Name()
   429  }
   430  
   431  // typelinksinit scans the types from extra modules and builds the
   432  // moduledata typemap used to de-duplicate type pointers.
   433  func typelinksinit() {
   434  	if firstmoduledata.next == nil {
   435  		return
   436  	}
   437  	typehash := make(map[uint32][]*_type, len(firstmoduledata.typelinks))
   438  
   439  	modules := activeModules()
   440  	prev := modules[0]
   441  	for _, md := range modules[1:] {
   442  		// Collect types from the previous module into typehash.
   443  	collect:
   444  		for _, tl := range prev.typelinks {
   445  			var t *_type
   446  			if prev.typemap == nil {
   447  				t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl)))
   448  			} else {
   449  				t = prev.typemap[typeOff(tl)]
   450  			}
   451  			// Add to typehash if not seen before.
   452  			tlist := typehash[t.Hash]
   453  			for _, tcur := range tlist {
   454  				if tcur == t {
   455  					continue collect
   456  				}
   457  			}
   458  			typehash[t.Hash] = append(tlist, t)
   459  		}
   460  
   461  		if md.typemap == nil {
   462  			// If any of this module's typelinks match a type from a
   463  			// prior module, prefer that prior type by adding the offset
   464  			// to this module's typemap.
   465  			tm := make(map[typeOff]*_type, len(md.typelinks))
   466  			pinnedTypemaps = append(pinnedTypemaps, tm)
   467  			md.typemap = tm
   468  			for _, tl := range md.typelinks {
   469  				t := (*_type)(unsafe.Pointer(md.types + uintptr(tl)))
   470  				for _, candidate := range typehash[t.Hash] {
   471  					seen := map[_typePair]struct{}{}
   472  					if typesEqual(t, candidate, seen) {
   473  						t = candidate
   474  						break
   475  					}
   476  				}
   477  				md.typemap[typeOff(tl)] = t
   478  			}
   479  		}
   480  
   481  		prev = md
   482  	}
   483  }
   484  
   485  type _typePair struct {
   486  	t1 *_type
   487  	t2 *_type
   488  }
   489  
   490  func toRType(t *abi.Type) rtype {
   491  	return rtype{t}
   492  }
   493  
   494  // typesEqual reports whether two types are equal.
   495  //
   496  // Everywhere in the runtime and reflect packages, it is assumed that
   497  // there is exactly one *_type per Go type, so that pointer equality
   498  // can be used to test if types are equal. There is one place that
   499  // breaks this assumption: buildmode=shared. In this case a type can
   500  // appear as two different pieces of memory. This is hidden from the
   501  // runtime and reflect package by the per-module typemap built in
   502  // typelinksinit. It uses typesEqual to map types from later modules
   503  // back into earlier ones.
   504  //
   505  // Only typelinksinit needs this function.
   506  func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
   507  	tp := _typePair{t, v}
   508  	if _, ok := seen[tp]; ok {
   509  		return true
   510  	}
   511  
   512  	// mark these types as seen, and thus equivalent which prevents an infinite loop if
   513  	// the two types are identical, but recursively defined and loaded from
   514  	// different modules
   515  	seen[tp] = struct{}{}
   516  
   517  	if t == v {
   518  		return true
   519  	}
   520  	kind := t.Kind_ & abi.KindMask
   521  	if kind != v.Kind_&abi.KindMask {
   522  		return false
   523  	}
   524  	rt, rv := toRType(t), toRType(v)
   525  	if rt.string() != rv.string() {
   526  		return false
   527  	}
   528  	ut := t.Uncommon()
   529  	uv := v.Uncommon()
   530  	if ut != nil || uv != nil {
   531  		if ut == nil || uv == nil {
   532  			return false
   533  		}
   534  		pkgpatht := rt.nameOff(ut.PkgPath).Name()
   535  		pkgpathv := rv.nameOff(uv.PkgPath).Name()
   536  		if pkgpatht != pkgpathv {
   537  			return false
   538  		}
   539  	}
   540  	if abi.Bool <= kind && kind <= abi.Complex128 {
   541  		return true
   542  	}
   543  	switch kind {
   544  	case abi.String, abi.UnsafePointer:
   545  		return true
   546  	case abi.Array:
   547  		at := (*arraytype)(unsafe.Pointer(t))
   548  		av := (*arraytype)(unsafe.Pointer(v))
   549  		return typesEqual(at.Elem, av.Elem, seen) && at.Len == av.Len
   550  	case abi.Chan:
   551  		ct := (*chantype)(unsafe.Pointer(t))
   552  		cv := (*chantype)(unsafe.Pointer(v))
   553  		return ct.Dir == cv.Dir && typesEqual(ct.Elem, cv.Elem, seen)
   554  	case abi.Func:
   555  		ft := (*functype)(unsafe.Pointer(t))
   556  		fv := (*functype)(unsafe.Pointer(v))
   557  		if ft.OutCount != fv.OutCount || ft.InCount != fv.InCount {
   558  			return false
   559  		}
   560  		tin, vin := ft.InSlice(), fv.InSlice()
   561  		for i := 0; i < len(tin); i++ {
   562  			if !typesEqual(tin[i], vin[i], seen) {
   563  				return false
   564  			}
   565  		}
   566  		tout, vout := ft.OutSlice(), fv.OutSlice()
   567  		for i := 0; i < len(tout); i++ {
   568  			if !typesEqual(tout[i], vout[i], seen) {
   569  				return false
   570  			}
   571  		}
   572  		return true
   573  	case abi.Interface:
   574  		it := (*interfacetype)(unsafe.Pointer(t))
   575  		iv := (*interfacetype)(unsafe.Pointer(v))
   576  		if it.PkgPath.Name() != iv.PkgPath.Name() {
   577  			return false
   578  		}
   579  		if len(it.Methods) != len(iv.Methods) {
   580  			return false
   581  		}
   582  		for i := range it.Methods {
   583  			tm := &it.Methods[i]
   584  			vm := &iv.Methods[i]
   585  			// Note the mhdr array can be relocated from
   586  			// another module. See #17724.
   587  			tname := resolveNameOff(unsafe.Pointer(tm), tm.Name)
   588  			vname := resolveNameOff(unsafe.Pointer(vm), vm.Name)
   589  			if tname.Name() != vname.Name() {
   590  				return false
   591  			}
   592  			if pkgPath(tname) != pkgPath(vname) {
   593  				return false
   594  			}
   595  			tityp := resolveTypeOff(unsafe.Pointer(tm), tm.Typ)
   596  			vityp := resolveTypeOff(unsafe.Pointer(vm), vm.Typ)
   597  			if !typesEqual(tityp, vityp, seen) {
   598  				return false
   599  			}
   600  		}
   601  		return true
   602  	case abi.Map:
   603  		if goexperiment.SwissMap {
   604  			mt := (*abi.SwissMapType)(unsafe.Pointer(t))
   605  			mv := (*abi.SwissMapType)(unsafe.Pointer(v))
   606  			return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
   607  		}
   608  		mt := (*abi.OldMapType)(unsafe.Pointer(t))
   609  		mv := (*abi.OldMapType)(unsafe.Pointer(v))
   610  		return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
   611  	case abi.Pointer:
   612  		pt := (*ptrtype)(unsafe.Pointer(t))
   613  		pv := (*ptrtype)(unsafe.Pointer(v))
   614  		return typesEqual(pt.Elem, pv.Elem, seen)
   615  	case abi.Slice:
   616  		st := (*slicetype)(unsafe.Pointer(t))
   617  		sv := (*slicetype)(unsafe.Pointer(v))
   618  		return typesEqual(st.Elem, sv.Elem, seen)
   619  	case abi.Struct:
   620  		st := (*structtype)(unsafe.Pointer(t))
   621  		sv := (*structtype)(unsafe.Pointer(v))
   622  		if len(st.Fields) != len(sv.Fields) {
   623  			return false
   624  		}
   625  		if st.PkgPath.Name() != sv.PkgPath.Name() {
   626  			return false
   627  		}
   628  		for i := range st.Fields {
   629  			tf := &st.Fields[i]
   630  			vf := &sv.Fields[i]
   631  			if tf.Name.Name() != vf.Name.Name() {
   632  				return false
   633  			}
   634  			if !typesEqual(tf.Typ, vf.Typ, seen) {
   635  				return false
   636  			}
   637  			if tf.Name.Tag() != vf.Name.Tag() {
   638  				return false
   639  			}
   640  			if tf.Offset != vf.Offset {
   641  				return false
   642  			}
   643  			if tf.Name.IsEmbedded() != vf.Name.IsEmbedded() {
   644  				return false
   645  			}
   646  		}
   647  		return true
   648  	default:
   649  		println("runtime: impossible type kind", kind)
   650  		throw("runtime: impossible type kind")
   651  		return false
   652  	}
   653  }
   654  

View as plain text