util/deephash: move typeIsRecursive and canMemHash to types.go (#5386)

Also, rename canMemHash to typeIsMemHashable to be consistent.
There are zero changes to the semantics.

Signed-off-by: Joe Tsai <joetsai@digital-static.net>
This commit is contained in:
Joe Tsai
2022-08-16 09:31:19 -07:00
committed by GitHub
parent d53eb6fa11
commit 44d62b65d0
4 changed files with 206 additions and 188 deletions

View File

@ -364,7 +364,7 @@ func genHashStructFields(t reflect.Type) typeHasherFunc {
fields = append(fields, fieldInfo{
index: i,
typeInfo: getTypeInfo(sf.Type),
canMemHash: canMemHash(sf.Type),
canMemHash: typeIsMemHashable(sf.Type),
offset: sf.Offset,
size: sf.Type.Size(),
})
@ -445,7 +445,7 @@ func genTypeHasher(t reflect.Type) typeHasherFunc {
return (*hasher).hashString
case reflect.Slice:
et := t.Elem()
if canMemHash(et) {
if typeIsMemHashable(et) {
return (*hasher).hashSliceMem
}
eti := getTypeInfo(et)
@ -464,7 +464,7 @@ func genTypeHasher(t reflect.Type) typeHasherFunc {
return genHashStructFields(t)
case reflect.Pointer:
et := t.Elem()
if canMemHash(et) {
if typeIsMemHashable(et) {
return genHashPtrToMemoryRange(et)
}
if t.Implements(appenderToType) {
@ -574,7 +574,7 @@ func genHashArray(t reflect.Type, eti *typeInfo) typeHasherFunc {
return noopHasherFunc
}
et := t.Elem()
if canMemHash(et) {
if typeIsMemHashable(et) {
return genHashArrayMem(t.Len(), t.Size(), eti)
}
n := t.Len()
@ -625,7 +625,7 @@ func getTypeInfoLocked(t reflect.Type, incomplete map[reflect.Type]*typeInfo) *t
ti := &typeInfo{
rtype: t,
isRecursive: typeIsRecursive(t),
canMemHash: canMemHash(t),
canMemHash: typeIsMemHashable(t),
}
incomplete[t] = ti
@ -640,89 +640,6 @@ func getTypeInfoLocked(t reflect.Type, incomplete map[reflect.Type]*typeInfo) *t
return ti
}
// typeIsRecursive reports whether t has a path back to itself.
//
// For interfaces, it currently always reports true.
func typeIsRecursive(t reflect.Type) bool {
inStack := map[reflect.Type]bool{}
var visitType func(t reflect.Type) (isRecursiveSoFar bool)
visitType = func(t reflect.Type) (isRecursiveSoFar bool) {
// Check whether we have seen this type before.
if inStack[t] {
return true
}
inStack[t] = true
defer func() {
delete(inStack, t)
}()
// Any type that is memory hashable must not be recursive since
// cycles can only occur if pointers are involved.
if canMemHash(t) {
return false
}
// Recursively check types that may contain pointers.
switch t.Kind() {
default:
panic("unhandled kind " + t.Kind().String())
case reflect.String, reflect.UnsafePointer, reflect.Func:
return false
case reflect.Interface:
// Assume the worst for now. TODO(bradfitz): in some cases
// we should be able to prove that it's not recursive. Not worth
// it for now.
return true
case reflect.Array, reflect.Chan, reflect.Pointer, reflect.Slice:
return visitType(t.Elem())
case reflect.Map:
return visitType(t.Key()) || visitType(t.Elem())
case reflect.Struct:
if t.String() == "intern.Value" {
// Otherwise its interface{} makes this return true.
return false
}
for i, numField := 0, t.NumField(); i < numField; i++ {
if visitType(t.Field(i).Type) {
return true
}
}
return false
}
}
return visitType(t)
}
// canMemHash reports whether a slice of t can be hashed by looking at its
// contiguous bytes in memory alone. (e.g. structs with gaps aren't memhashable)
func canMemHash(t reflect.Type) bool {
if t.Size() == 0 {
return true
}
switch t.Kind() {
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64,
reflect.Complex64, reflect.Complex128:
return true
case reflect.Array:
return canMemHash(t.Elem())
case reflect.Struct:
var sumFieldSize uintptr
for i, numField := 0, t.NumField(); i < numField; i++ {
sf := t.Field(i)
if !canMemHash(sf.Type) {
return false
}
sumFieldSize += sf.Type.Size()
}
return sumFieldSize == t.Size() // ensure no gaps
}
return false
}
func (h *hasher) hashValue(v addressableValue, forceCycleChecking bool) {
if !v.IsValid() {
return