Refactor specification of supported digests
To make the definition of supported digests more clear, we have refactored the digest package to have a special Algorithm type. This represents the digest's prefix and we associated various supported hash implementations through function calls. Signed-off-by: Stephen J Day <stephen.day@docker.com>master
							parent
							
								
									eee6cad2cf
								
							
						
					
					
						commit
						44da954565
					
				|  | @ -86,7 +86,7 @@ | |||
| 		}, | ||||
| 		{ | ||||
| 			"ImportPath": "github.com/stevvooe/resumable", | ||||
| 			"Rev": "cf61dd331ceba0ab845444fdb626b9a465704e49" | ||||
| 			"Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"ImportPath": "github.com/yvasiyarov/go-metrics", | ||||
|  |  | |||
|  | @ -3,6 +3,9 @@ package sha256 | |||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/gob" | ||||
| 
 | ||||
| 	// import to ensure that our init function runs after the standard package
 | ||||
| 	_ "crypto/sha256" | ||||
| ) | ||||
| 
 | ||||
| // Len returns the number of bytes which have been written to the digest.
 | ||||
|  |  | |||
|  | @ -3,6 +3,9 @@ package sha512 | |||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/gob" | ||||
| 
 | ||||
| 	// import to ensure that our init function runs after the standard package
 | ||||
| 	_ "crypto/sha512" | ||||
| ) | ||||
| 
 | ||||
| // Len returns the number of bytes which have been written to the digest.
 | ||||
|  |  | |||
|  | @ -2,7 +2,6 @@ package digest | |||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"crypto" | ||||
| 	"fmt" | ||||
| 	"hash" | ||||
| 	"io" | ||||
|  | @ -19,9 +18,6 @@ const ( | |||
| 
 | ||||
| 	// DigestSha256EmptyTar is the canonical sha256 digest of empty data
 | ||||
| 	DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" | ||||
| 
 | ||||
| 	CanonicalAlgorithm = "sha256" | ||||
| 	CanonicalHash      = crypto.SHA256 // main digest algorithm used through distribution
 | ||||
| ) | ||||
| 
 | ||||
| // Digest allows simple protection of hex formatted digest strings, prefixed
 | ||||
|  | @ -43,7 +39,7 @@ const ( | |||
| type Digest string | ||||
| 
 | ||||
| // NewDigest returns a Digest from alg and a hash.Hash object.
 | ||||
| func NewDigest(alg string, h hash.Hash) Digest { | ||||
| func NewDigest(alg Algorithm, h hash.Hash) Digest { | ||||
| 	return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) | ||||
| } | ||||
| 
 | ||||
|  | @ -76,7 +72,7 @@ func ParseDigest(s string) (Digest, error) { | |||
| 
 | ||||
| // FromReader returns the most valid digest for the underlying content.
 | ||||
| func FromReader(rd io.Reader) (Digest, error) { | ||||
| 	digester := NewCanonicalDigester() | ||||
| 	digester := Canonical.New() | ||||
| 
 | ||||
| 	if _, err := io.Copy(digester.Hash(), rd); err != nil { | ||||
| 		return "", err | ||||
|  | @ -135,8 +131,8 @@ func (d Digest) Validate() error { | |||
| 		return ErrDigestInvalidFormat | ||||
| 	} | ||||
| 
 | ||||
| 	switch s[:i] { | ||||
| 	case "sha256", "sha384", "sha512": | ||||
| 	switch Algorithm(s[:i]) { | ||||
| 	case SHA256, SHA384, SHA512: | ||||
| 		break | ||||
| 	default: | ||||
| 		return ErrDigestUnsupported | ||||
|  | @ -147,8 +143,8 @@ func (d Digest) Validate() error { | |||
| 
 | ||||
| // Algorithm returns the algorithm portion of the digest. This will panic if
 | ||||
| // the underlying digest is not in a valid format.
 | ||||
| func (d Digest) Algorithm() string { | ||||
| 	return string(d[:d.sepIndex()]) | ||||
| func (d Digest) Algorithm() Algorithm { | ||||
| 	return Algorithm(d[:d.sepIndex()]) | ||||
| } | ||||
| 
 | ||||
| // Hex returns the hex digest portion of the digest. This will panic if the
 | ||||
|  |  | |||
|  | @ -10,7 +10,7 @@ func TestParseDigest(t *testing.T) { | |||
| 	for _, testcase := range []struct { | ||||
| 		input     string | ||||
| 		err       error | ||||
| 		algorithm string | ||||
| 		algorithm Algorithm | ||||
| 		hex       string | ||||
| 	}{ | ||||
| 		{ | ||||
|  |  | |||
|  | @ -1,6 +1,76 @@ | |||
| package digest | ||||
| 
 | ||||
| import "hash" | ||||
| import ( | ||||
| 	"crypto" | ||||
| 	"hash" | ||||
| ) | ||||
| 
 | ||||
| // Algorithm identifies and implementation of a digester by an identifier.
 | ||||
| // Note the that this defines both the hash algorithm used and the string
 | ||||
| // encoding.
 | ||||
| type Algorithm string | ||||
| 
 | ||||
| // supported digest types
 | ||||
| const ( | ||||
| 	SHA256         Algorithm = "sha256"           // sha256 with hex encoding
 | ||||
| 	SHA384         Algorithm = "sha384"           // sha384 with hex encoding
 | ||||
| 	SHA512         Algorithm = "sha512"           // sha512 with hex encoding
 | ||||
| 	TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only
 | ||||
| 
 | ||||
| 	// Canonical is the primary digest algorithm used with the distribution
 | ||||
| 	// project. Other digests may be used but this one is the primary storage
 | ||||
| 	// digest.
 | ||||
| 	Canonical = SHA256 | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// TODO(stevvooe): Follow the pattern of the standard crypto package for
 | ||||
| 	// registration of digests. Effectively, we are a registerable set and
 | ||||
| 	// common symbol access.
 | ||||
| 
 | ||||
| 	// algorithms maps values to hash.Hash implementations. Other algorithms
 | ||||
| 	// may be available but they cannot be calculated by the digest package.
 | ||||
| 	algorithms = map[Algorithm]crypto.Hash{ | ||||
| 		SHA256: crypto.SHA256, | ||||
| 		SHA384: crypto.SHA384, | ||||
| 		SHA512: crypto.SHA512, | ||||
| 	} | ||||
| ) | ||||
| 
 | ||||
| // Available returns true if the digest type is available for use. If this
 | ||||
| // returns false, New and Hash will return nil.
 | ||||
| func (a Algorithm) Available() bool { | ||||
| 	h, ok := algorithms[a] | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	// check availability of the hash, as well
 | ||||
| 	return h.Available() | ||||
| } | ||||
| 
 | ||||
| // New returns a new digester for the specified algorithm. If the algorithm
 | ||||
| // does not have a digester implementation, nil will be returned. This can be
 | ||||
| // checked by calling Available before calling New.
 | ||||
| func (a Algorithm) New() Digester { | ||||
| 	return &digester{ | ||||
| 		alg:  a, | ||||
| 		hash: a.Hash(), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Hash returns a new hash as used by the algorithm. If not available, nil is
 | ||||
| // returned. Make sure to check Available before calling.
 | ||||
| func (a Algorithm) Hash() hash.Hash { | ||||
| 	if !a.Available() { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	return algorithms[a].New() | ||||
| } | ||||
| 
 | ||||
| // TODO(stevvooe): Allow resolution of verifiers using the digest type and
 | ||||
| // this registration system.
 | ||||
| 
 | ||||
| // Digester calculates the digest of written data. Writes should go directly
 | ||||
| // to the return value of Hash, while calling Digest will return the current
 | ||||
|  | @ -10,24 +80,9 @@ type Digester interface { | |||
| 	Digest() Digest | ||||
| } | ||||
| 
 | ||||
| // NewDigester create a new Digester with the given hashing algorithm and
 | ||||
| // instance of that algo's hasher.
 | ||||
| func NewDigester(alg string, h hash.Hash) Digester { | ||||
| 	return &digester{ | ||||
| 		alg:  alg, | ||||
| 		hash: h, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // NewCanonicalDigester is a convenience function to create a new Digester with
 | ||||
| // our default settings.
 | ||||
| func NewCanonicalDigester() Digester { | ||||
| 	return NewDigester(CanonicalAlgorithm, CanonicalHash.New()) | ||||
| } | ||||
| 
 | ||||
| // digester provides a simple digester definition that embeds a hasher.
 | ||||
| type digester struct { | ||||
| 	alg  string | ||||
| 	alg  Algorithm | ||||
| 	hash hash.Hash | ||||
| } | ||||
| 
 | ||||
|  | @ -36,5 +91,5 @@ func (d *digester) Hash() hash.Hash { | |||
| } | ||||
| 
 | ||||
| func (d *digester) Digest() Digest { | ||||
| 	return NewDigest(d.alg, d.Hash()) | ||||
| 	return NewDigest(d.alg, d.hash) | ||||
| } | ||||
|  |  | |||
|  | @ -13,9 +13,9 @@ import ( | |||
| // is exposed through the digester type, which is just a hash plus a Digest
 | ||||
| // method.
 | ||||
| func TestResumableDetection(t *testing.T) { | ||||
| 	d := NewCanonicalDigester() | ||||
| 	d := Canonical.New() | ||||
| 
 | ||||
| 	if _, ok := d.Hash().(resumable.Hash); !ok { | ||||
| 		t.Fatalf("expected digester to implement resumable: %#v, %v", d, d.Hash()) | ||||
| 		t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -42,17 +42,17 @@ func NewSet() *Set { | |||
| // values or short values. This function does not test equality,
 | ||||
| // rather whether the second value could match against the first
 | ||||
| // value.
 | ||||
| func checkShortMatch(alg, hex, shortAlg, shortHex string) bool { | ||||
| func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { | ||||
| 	if len(hex) == len(shortHex) { | ||||
| 		if hex != shortHex { | ||||
| 			return false | ||||
| 		} | ||||
| 		if len(shortAlg) > 0 && alg != shortAlg { | ||||
| 		if len(shortAlg) > 0 && string(alg) != shortAlg { | ||||
| 			return false | ||||
| 		} | ||||
| 	} else if !strings.HasPrefix(hex, shortHex) { | ||||
| 		return false | ||||
| 	} else if len(shortAlg) > 0 && alg != shortAlg { | ||||
| 	} else if len(shortAlg) > 0 && string(alg) != shortAlg { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
|  | @ -68,7 +68,7 @@ func (dst *Set) Lookup(d string) (Digest, error) { | |||
| 	} | ||||
| 	var ( | ||||
| 		searchFunc func(int) bool | ||||
| 		alg        string | ||||
| 		alg        Algorithm | ||||
| 		hex        string | ||||
| 	) | ||||
| 	dgst, err := ParseDigest(d) | ||||
|  | @ -88,13 +88,13 @@ func (dst *Set) Lookup(d string) (Digest, error) { | |||
| 		} | ||||
| 	} | ||||
| 	idx := sort.Search(len(dst.entries), searchFunc) | ||||
| 	if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, alg, hex) { | ||||
| 	if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { | ||||
| 		return "", ErrDigestNotFound | ||||
| 	} | ||||
| 	if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { | ||||
| 		return dst.entries[idx].digest, nil | ||||
| 	} | ||||
| 	if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, alg, hex) { | ||||
| 	if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { | ||||
| 		return "", ErrDigestAmbiguous | ||||
| 	} | ||||
| 
 | ||||
|  | @ -172,7 +172,7 @@ func ShortCodeTable(dst *Set, length int) map[Digest]string { | |||
| } | ||||
| 
 | ||||
| type digestEntry struct { | ||||
| 	alg    string | ||||
| 	alg    Algorithm | ||||
| 	val    string | ||||
| 	digest Digest | ||||
| } | ||||
|  |  | |||
|  | @ -1,8 +1,6 @@ | |||
| package digest | ||||
| 
 | ||||
| import ( | ||||
| 	"crypto/sha256" | ||||
| 	"crypto/sha512" | ||||
| 	"hash" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
|  | @ -33,7 +31,7 @@ func NewDigestVerifier(d Digest) (Verifier, error) { | |||
| 	switch alg { | ||||
| 	case "sha256", "sha384", "sha512": | ||||
| 		return hashVerifier{ | ||||
| 			hash:   newHash(alg), | ||||
| 			hash:   alg.Hash(), | ||||
| 			digest: d, | ||||
| 		}, nil | ||||
| 	default: | ||||
|  | @ -95,19 +93,6 @@ func (lv *lengthVerifier) Verified() bool { | |||
| 	return lv.expected == lv.len | ||||
| } | ||||
| 
 | ||||
| func newHash(name string) hash.Hash { | ||||
| 	switch name { | ||||
| 	case "sha256": | ||||
| 		return sha256.New() | ||||
| 	case "sha384": | ||||
| 		return sha512.New384() | ||||
| 	case "sha512": | ||||
| 		return sha512.New() | ||||
| 	default: | ||||
| 		panic("unsupport algorithm: " + name) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| type hashVerifier struct { | ||||
| 	digest Digest | ||||
| 	hash   hash.Hash | ||||
|  |  | |||
|  | @ -321,7 +321,7 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut | |||
| 	if err != nil { | ||||
| 		return distribution.Descriptor{}, err | ||||
| 	} | ||||
| 	dgstr := digest.NewCanonicalDigester() | ||||
| 	dgstr := digest.Canonical.New() | ||||
| 	n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) | ||||
| 	if err != nil { | ||||
| 		return distribution.Descriptor{}, err | ||||
|  |  | |||
|  | @ -213,7 +213,7 @@ func TestBlobAPI(t *testing.T) { | |||
| 	// Now, push just a chunk
 | ||||
| 	layerFile.Seek(0, 0) | ||||
| 
 | ||||
| 	canonicalDigester := digest.NewCanonicalDigester() | ||||
| 	canonicalDigester := digest.Canonical.New() | ||||
| 	if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { | ||||
| 		t.Fatalf("error copying to digest: %v", err) | ||||
| 	} | ||||
|  | @ -637,7 +637,7 @@ func doPushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Diges | |||
| 
 | ||||
| // pushLayer pushes the layer content returning the url on success.
 | ||||
| func pushLayer(t *testing.T, ub *v2.URLBuilder, name string, dgst digest.Digest, uploadURLBase string, body io.Reader) string { | ||||
| 	digester := digest.NewCanonicalDigester() | ||||
| 	digester := digest.Canonical.New() | ||||
| 
 | ||||
| 	resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) | ||||
| 	if err != nil { | ||||
|  | @ -702,7 +702,7 @@ func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Resp | |||
| 
 | ||||
| 	uploadURL := u.String() | ||||
| 
 | ||||
| 	digester := digest.NewCanonicalDigester() | ||||
| 	digester := digest.Canonical.New() | ||||
| 
 | ||||
| 	req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) | ||||
| 	if err != nil { | ||||
|  |  | |||
|  | @ -1,6 +1,7 @@ | |||
| package storage | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"time" | ||||
|  | @ -13,7 +14,7 @@ import ( | |||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	errResumableDigestNotAvailable = fmt.Errorf("resumable digest not available") | ||||
| 	errResumableDigestNotAvailable = errors.New("resumable digest not available") | ||||
| ) | ||||
| 
 | ||||
| // layerWriter is used to control the various aspects of resumable
 | ||||
|  | @ -197,7 +198,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri | |||
| 		// the same, we don't need to read the data from the backend. This is
 | ||||
| 		// because we've written the entire file in the lifecycle of the
 | ||||
| 		// current instance.
 | ||||
| 		if bw.written == bw.size && digest.CanonicalAlgorithm == desc.Digest.Algorithm() { | ||||
| 		if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { | ||||
| 			canonical = bw.digester.Digest() | ||||
| 			verified = desc.Digest == canonical | ||||
| 		} | ||||
|  | @ -206,7 +207,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri | |||
| 		// paths. We may be able to make the size-based check a stronger
 | ||||
| 		// guarantee, so this may be defensive.
 | ||||
| 		if !verified { | ||||
| 			digester := digest.NewCanonicalDigester() | ||||
| 			digester := digest.Canonical.New() | ||||
| 
 | ||||
| 			digestVerifier, err := digest.NewDigestVerifier(desc.Digest) | ||||
| 			if err != nil { | ||||
|  |  | |||
|  | @ -164,7 +164,7 @@ func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string | |||
| 		blobStore:          lbs, | ||||
| 		id:                 uuid, | ||||
| 		startedAt:          startedAt, | ||||
| 		digester:           digest.NewCanonicalDigester(), | ||||
| 		digester:           digest.Canonical.New(), | ||||
| 		bufferedFileWriter: *fw, | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -262,7 +262,7 @@ func (pm *pathMapper) path(spec pathSpec) (string, error) { | |||
| 		if v.list { | ||||
| 			offset = "" // Limit to the prefix for listing offsets.
 | ||||
| 		} | ||||
| 		return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", v.alg, offset)...), nil | ||||
| 		return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil | ||||
| 	case repositoriesRootPathSpec: | ||||
| 		return path.Join(repoPrefix...), nil | ||||
| 	default: | ||||
|  | @ -447,7 +447,7 @@ func (uploadStartedAtPathSpec) pathSpec() {} | |||
| type uploadHashStatePathSpec struct { | ||||
| 	name   string | ||||
| 	id     string | ||||
| 	alg    string | ||||
| 	alg    digest.Algorithm | ||||
| 	offset int64 | ||||
| 	list   bool | ||||
| } | ||||
|  | @ -479,7 +479,7 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) | |||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) | ||||
| 	algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) | ||||
| 	hex := dgst.Hex() | ||||
| 	prefix := []string{algorithm} | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue