| /* |
| * Copyright 2019 Dgraph Labs, Inc. and Contributors |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| |
| package z |
| |
| import ( |
| "context" |
| "sync" |
| |
| "github.com/cespare/xxhash" |
| ) |
| |
| // TODO: Figure out a way to re-use memhash for the second uint64 hash, we |
| // already know that appending bytes isn't reliable for generating a |
| // second hash (see Ristretto PR #88). |
| // |
| // We also know that while the Go runtime has a runtime memhash128 |
| // function, it's not possible to use it to generate [2]uint64 or |
| // anything resembling a 128bit hash, even though that's exactly what |
| // we need in this situation. |
| func KeyToHash(key interface{}) (uint64, uint64) { |
| if key == nil { |
| return 0, 0 |
| } |
| switch k := key.(type) { |
| case uint64: |
| return k, 0 |
| case string: |
| return MemHashString(k), xxhash.Sum64String(k) |
| case []byte: |
| return MemHash(k), xxhash.Sum64(k) |
| case byte: |
| return uint64(k), 0 |
| case int: |
| return uint64(k), 0 |
| case int32: |
| return uint64(k), 0 |
| case uint32: |
| return uint64(k), 0 |
| case int64: |
| return uint64(k), 0 |
| default: |
| panic("Key type not supported") |
| } |
| } |
| |
| var ( |
| dummyCloserChan <-chan struct{} |
| ) |
| |
| // Closer holds the two things we need to close a goroutine and wait for it to |
| // finish: a chan to tell the goroutine to shut down, and a WaitGroup with |
| // which to wait for it to finish shutting down. |
| type Closer struct { |
| waiting sync.WaitGroup |
| |
| ctx context.Context |
| cancel context.CancelFunc |
| } |
| |
| // NewCloser constructs a new Closer, with an initial count on the WaitGroup. |
| func NewCloser(initial int) *Closer { |
| ret := &Closer{} |
| ret.ctx, ret.cancel = context.WithCancel(context.Background()) |
| ret.waiting.Add(initial) |
| return ret |
| } |
| |
| // AddRunning Add()'s delta to the WaitGroup. |
| func (lc *Closer) AddRunning(delta int) { |
| lc.waiting.Add(delta) |
| } |
| |
| // Ctx can be used to get a context, which would automatically get cancelled when Signal is called. |
| func (lc *Closer) Ctx() context.Context { |
| if lc == nil { |
| return context.Background() |
| } |
| return lc.ctx |
| } |
| |
| // Signal signals the HasBeenClosed signal. |
| func (lc *Closer) Signal() { |
| // Todo(ibrahim): Change Signal to return error on next badger breaking change. |
| lc.cancel() |
| } |
| |
| // HasBeenClosed gets signaled when Signal() is called. |
| func (lc *Closer) HasBeenClosed() <-chan struct{} { |
| if lc == nil { |
| return dummyCloserChan |
| } |
| return lc.ctx.Done() |
| } |
| |
| // Done calls Done() on the WaitGroup. |
| func (lc *Closer) Done() { |
| if lc == nil { |
| return |
| } |
| lc.waiting.Done() |
| } |
| |
| // Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done |
| // calls to balance out.) |
| func (lc *Closer) Wait() { |
| lc.waiting.Wait() |
| } |
| |
| // SignalAndWait calls Signal(), then Wait(). |
| func (lc *Closer) SignalAndWait() { |
| lc.Signal() |
| lc.Wait() |
| } |
| |
| // ZeroOut zeroes out all the bytes in the range [start, end). |
| func ZeroOut(dst []byte, start, end int) { |
| if start < 0 || start >= len(dst) { |
| return // BAD |
| } |
| if end >= len(dst) { |
| end = len(dst) |
| } |
| if end-start <= 0 { |
| return |
| } |
| b := dst[start:end] |
| for i := range b { |
| b[i] = 0x0 |
| } |
| } |