Update to use blob interfaces
Signed-off-by: Derek McGowan <derek@mcgstyle.net> (github: dmcgowan)master
							parent
							
								
									fddeb1c8d5
								
							
						
					
					
						commit
						9d64e461be
					
				|  | @ -8,18 +8,15 @@ import ( | |||
| 	"io/ioutil" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/docker/distribution" | ||||
| 	"github.com/docker/distribution/context" | ||||
| 	"github.com/docker/distribution/digest" | ||||
| ) | ||||
| 
 | ||||
| type httpLayer struct { | ||||
| 	*layers | ||||
| type httpBlob struct { | ||||
| 	*repository | ||||
| 
 | ||||
| 	size      int64 | ||||
| 	digest    digest.Digest | ||||
| 	createdAt time.Time | ||||
| 	desc distribution.Descriptor | ||||
| 
 | ||||
| 	rc     io.ReadCloser // remote read closer
 | ||||
| 	brd    *bufio.Reader // internal buffered io
 | ||||
|  | @ -27,48 +24,40 @@ type httpLayer struct { | |||
| 	err    error | ||||
| } | ||||
| 
 | ||||
| func (hl *httpLayer) CreatedAt() time.Time { | ||||
| 	return hl.createdAt | ||||
| } | ||||
| 
 | ||||
| func (hl *httpLayer) Digest() digest.Digest { | ||||
| 	return hl.digest | ||||
| } | ||||
| 
 | ||||
| func (hl *httpLayer) Read(p []byte) (n int, err error) { | ||||
| 	if hl.err != nil { | ||||
| 		return 0, hl.err | ||||
| func (hb *httpBlob) Read(p []byte) (n int, err error) { | ||||
| 	if hb.err != nil { | ||||
| 		return 0, hb.err | ||||
| 	} | ||||
| 
 | ||||
| 	rd, err := hl.reader() | ||||
| 	rd, err := hb.reader() | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	n, err = rd.Read(p) | ||||
| 	hl.offset += int64(n) | ||||
| 	hb.offset += int64(n) | ||||
| 
 | ||||
| 	// Simulate io.EOF error if we reach filesize.
 | ||||
| 	if err == nil && hl.offset >= hl.size { | ||||
| 	if err == nil && hb.offset >= hb.desc.Length { | ||||
| 		err = io.EOF | ||||
| 	} | ||||
| 
 | ||||
| 	return n, err | ||||
| } | ||||
| 
 | ||||
| func (hl *httpLayer) Seek(offset int64, whence int) (int64, error) { | ||||
| 	if hl.err != nil { | ||||
| 		return 0, hl.err | ||||
| func (hb *httpBlob) Seek(offset int64, whence int) (int64, error) { | ||||
| 	if hb.err != nil { | ||||
| 		return 0, hb.err | ||||
| 	} | ||||
| 
 | ||||
| 	var err error | ||||
| 	newOffset := hl.offset | ||||
| 	newOffset := hb.offset | ||||
| 
 | ||||
| 	switch whence { | ||||
| 	case os.SEEK_CUR: | ||||
| 		newOffset += int64(offset) | ||||
| 	case os.SEEK_END: | ||||
| 		newOffset = hl.size + int64(offset) | ||||
| 		newOffset = hb.desc.Length + int64(offset) | ||||
| 	case os.SEEK_SET: | ||||
| 		newOffset = int64(offset) | ||||
| 	} | ||||
|  | @ -76,60 +65,60 @@ func (hl *httpLayer) Seek(offset int64, whence int) (int64, error) { | |||
| 	if newOffset < 0 { | ||||
| 		err = fmt.Errorf("cannot seek to negative position") | ||||
| 	} else { | ||||
| 		if hl.offset != newOffset { | ||||
| 			hl.reset() | ||||
| 		if hb.offset != newOffset { | ||||
| 			hb.reset() | ||||
| 		} | ||||
| 
 | ||||
| 		// No problems, set the offset.
 | ||||
| 		hl.offset = newOffset | ||||
| 		hb.offset = newOffset | ||||
| 	} | ||||
| 
 | ||||
| 	return hl.offset, err | ||||
| 	return hb.offset, err | ||||
| } | ||||
| 
 | ||||
| func (hl *httpLayer) Close() error { | ||||
| 	if hl.err != nil { | ||||
| 		return hl.err | ||||
| func (hb *httpBlob) Close() error { | ||||
| 	if hb.err != nil { | ||||
| 		return hb.err | ||||
| 	} | ||||
| 
 | ||||
| 	// close and release reader chain
 | ||||
| 	if hl.rc != nil { | ||||
| 		hl.rc.Close() | ||||
| 	if hb.rc != nil { | ||||
| 		hb.rc.Close() | ||||
| 	} | ||||
| 
 | ||||
| 	hl.rc = nil | ||||
| 	hl.brd = nil | ||||
| 	hb.rc = nil | ||||
| 	hb.brd = nil | ||||
| 
 | ||||
| 	hl.err = fmt.Errorf("httpLayer: closed") | ||||
| 	hb.err = fmt.Errorf("httpBlob: closed") | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (hl *httpLayer) reset() { | ||||
| 	if hl.err != nil { | ||||
| func (hb *httpBlob) reset() { | ||||
| 	if hb.err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	if hl.rc != nil { | ||||
| 		hl.rc.Close() | ||||
| 		hl.rc = nil | ||||
| 	if hb.rc != nil { | ||||
| 		hb.rc.Close() | ||||
| 		hb.rc = nil | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (hl *httpLayer) reader() (io.Reader, error) { | ||||
| 	if hl.err != nil { | ||||
| 		return nil, hl.err | ||||
| func (hb *httpBlob) reader() (io.Reader, error) { | ||||
| 	if hb.err != nil { | ||||
| 		return nil, hb.err | ||||
| 	} | ||||
| 
 | ||||
| 	if hl.rc != nil { | ||||
| 		return hl.brd, nil | ||||
| 	if hb.rc != nil { | ||||
| 		return hb.brd, nil | ||||
| 	} | ||||
| 
 | ||||
| 	// If the offset is great than or equal to size, return a empty, noop reader.
 | ||||
| 	if hl.offset >= hl.size { | ||||
| 	if hb.offset >= hb.desc.Length { | ||||
| 		return ioutil.NopCloser(bytes.NewReader([]byte{})), nil | ||||
| 	} | ||||
| 
 | ||||
| 	blobURL, err := hl.ub.BuildBlobURL(hl.name, hl.digest) | ||||
| 	blobURL, err := hb.ub.BuildBlobURL(hb.name, hb.desc.Digest) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -139,40 +128,32 @@ func (hl *httpLayer) reader() (io.Reader, error) { | |||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if hl.offset > 0 { | ||||
| 	if hb.offset > 0 { | ||||
| 		// TODO(stevvooe): Get this working correctly.
 | ||||
| 
 | ||||
| 		// If we are at different offset, issue a range request from there.
 | ||||
| 		req.Header.Add("Range", fmt.Sprintf("1-")) | ||||
| 		context.GetLogger(hl.context).Infof("Range: %s", req.Header.Get("Range")) | ||||
| 		context.GetLogger(hb.context).Infof("Range: %s", req.Header.Get("Range")) | ||||
| 	} | ||||
| 
 | ||||
| 	resp, err := hl.client.Do(req) | ||||
| 	resp, err := hb.client.Do(req) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	switch { | ||||
| 	case resp.StatusCode == 200: | ||||
| 		hl.rc = resp.Body | ||||
| 		hb.rc = resp.Body | ||||
| 	default: | ||||
| 		defer resp.Body.Close() | ||||
| 		return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) | ||||
| 	} | ||||
| 
 | ||||
| 	if hl.brd == nil { | ||||
| 		hl.brd = bufio.NewReader(hl.rc) | ||||
| 	if hb.brd == nil { | ||||
| 		hb.brd = bufio.NewReader(hb.rc) | ||||
| 	} else { | ||||
| 		hl.brd.Reset(hl.rc) | ||||
| 		hb.brd.Reset(hb.rc) | ||||
| 	} | ||||
| 
 | ||||
| 	return hl.brd, nil | ||||
| } | ||||
| 
 | ||||
| func (hl *httpLayer) Length() int64 { | ||||
| 	return hl.size | ||||
| } | ||||
| 
 | ||||
| func (hl *httpLayer) Handler(r *http.Request) (http.Handler, error) { | ||||
| 	panic("Not implemented") | ||||
| 	return hb.brd, nil | ||||
| } | ||||
|  |  | |||
|  | @ -11,10 +11,10 @@ import ( | |||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/docker/distribution" | ||||
| 	"github.com/docker/distribution/digest" | ||||
| 	"github.com/docker/distribution/context" | ||||
| ) | ||||
| 
 | ||||
| type httpLayerUpload struct { | ||||
| type httpBlobUpload struct { | ||||
| 	repo   distribution.Repository | ||||
| 	client *http.Client | ||||
| 
 | ||||
|  | @ -26,32 +26,32 @@ type httpLayerUpload struct { | |||
| 	closed   bool | ||||
| } | ||||
| 
 | ||||
| func (hlu *httpLayerUpload) handleErrorResponse(resp *http.Response) error { | ||||
| func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { | ||||
| 	if resp.StatusCode == http.StatusNotFound { | ||||
| 		return &BlobUploadNotFoundError{Location: hlu.location} | ||||
| 		return &BlobUploadNotFoundError{Location: hbu.location} | ||||
| 	} | ||||
| 	return handleErrorResponse(resp) | ||||
| } | ||||
| 
 | ||||
| func (hlu *httpLayerUpload) ReadFrom(r io.Reader) (n int64, err error) { | ||||
| 	req, err := http.NewRequest("PATCH", hlu.location, ioutil.NopCloser(r)) | ||||
| func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { | ||||
| 	req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	defer req.Body.Close() | ||||
| 
 | ||||
| 	resp, err := hlu.client.Do(req) | ||||
| 	resp, err := hbu.client.Do(req) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	if resp.StatusCode != http.StatusAccepted { | ||||
| 		return 0, hlu.handleErrorResponse(resp) | ||||
| 		return 0, hbu.handleErrorResponse(resp) | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO(dmcgowan): Validate headers
 | ||||
| 	hlu.uuid = resp.Header.Get("Docker-Upload-UUID") | ||||
| 	hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) | ||||
| 	hbu.uuid = resp.Header.Get("Docker-Upload-UUID") | ||||
| 	hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | @ -67,27 +67,27 @@ func (hlu *httpLayerUpload) ReadFrom(r io.Reader) (n int64, err error) { | |||
| 
 | ||||
| } | ||||
| 
 | ||||
| func (hlu *httpLayerUpload) Write(p []byte) (n int, err error) { | ||||
| 	req, err := http.NewRequest("PATCH", hlu.location, bytes.NewReader(p)) | ||||
| func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { | ||||
| 	req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hlu.offset, hlu.offset+int64(len(p)-1))) | ||||
| 	req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) | ||||
| 	req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) | ||||
| 	req.Header.Set("Content-Type", "application/octet-stream") | ||||
| 
 | ||||
| 	resp, err := hlu.client.Do(req) | ||||
| 	resp, err := hbu.client.Do(req) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 
 | ||||
| 	if resp.StatusCode != http.StatusAccepted { | ||||
| 		return 0, hlu.handleErrorResponse(resp) | ||||
| 		return 0, hbu.handleErrorResponse(resp) | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO(dmcgowan): Validate headers
 | ||||
| 	hlu.uuid = resp.Header.Get("Docker-Upload-UUID") | ||||
| 	hlu.location, err = sanitizeLocation(resp.Header.Get("Location"), hlu.location) | ||||
| 	hbu.uuid = resp.Header.Get("Docker-Upload-UUID") | ||||
| 	hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
|  | @ -103,8 +103,8 @@ func (hlu *httpLayerUpload) Write(p []byte) (n int, err error) { | |||
| 
 | ||||
| } | ||||
| 
 | ||||
| func (hlu *httpLayerUpload) Seek(offset int64, whence int) (int64, error) { | ||||
| 	newOffset := hlu.offset | ||||
| func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { | ||||
| 	newOffset := hbu.offset | ||||
| 
 | ||||
| 	switch whence { | ||||
| 	case os.SEEK_CUR: | ||||
|  | @ -115,47 +115,47 @@ func (hlu *httpLayerUpload) Seek(offset int64, whence int) (int64, error) { | |||
| 		newOffset = int64(offset) | ||||
| 	} | ||||
| 
 | ||||
| 	hlu.offset = newOffset | ||||
| 	hbu.offset = newOffset | ||||
| 
 | ||||
| 	return hlu.offset, nil | ||||
| 	return hbu.offset, nil | ||||
| } | ||||
| 
 | ||||
| func (hlu *httpLayerUpload) UUID() string { | ||||
| 	return hlu.uuid | ||||
| func (hbu *httpBlobUpload) ID() string { | ||||
| 	return hbu.uuid | ||||
| } | ||||
| 
 | ||||
| func (hlu *httpLayerUpload) StartedAt() time.Time { | ||||
| 	return hlu.startedAt | ||||
| func (hbu *httpBlobUpload) StartedAt() time.Time { | ||||
| 	return hbu.startedAt | ||||
| } | ||||
| 
 | ||||
| func (hlu *httpLayerUpload) Finish(digest digest.Digest) (distribution.Layer, error) { | ||||
| func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { | ||||
| 	// TODO(dmcgowan): Check if already finished, if so just fetch
 | ||||
| 	req, err := http.NewRequest("PUT", hlu.location, nil) | ||||
| 	req, err := http.NewRequest("PUT", hbu.location, nil) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 		return distribution.Descriptor{}, err | ||||
| 	} | ||||
| 
 | ||||
| 	values := req.URL.Query() | ||||
| 	values.Set("digest", digest.String()) | ||||
| 	values.Set("digest", desc.Digest.String()) | ||||
| 	req.URL.RawQuery = values.Encode() | ||||
| 
 | ||||
| 	resp, err := hlu.client.Do(req) | ||||
| 	resp, err := hbu.client.Do(req) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 		return distribution.Descriptor{}, err | ||||
| 	} | ||||
| 
 | ||||
| 	if resp.StatusCode != http.StatusCreated { | ||||
| 		return nil, hlu.handleErrorResponse(resp) | ||||
| 		return distribution.Descriptor{}, hbu.handleErrorResponse(resp) | ||||
| 	} | ||||
| 
 | ||||
| 	return hlu.repo.Layers().Fetch(digest) | ||||
| 	return hbu.repo.Blobs(ctx).Stat(ctx, desc.Digest) | ||||
| } | ||||
| 
 | ||||
| func (hlu *httpLayerUpload) Cancel() error { | ||||
| func (hbu *httpBlobUpload) Rollback(ctx context.Context) error { | ||||
| 	panic("not implemented") | ||||
| } | ||||
| 
 | ||||
| func (hlu *httpLayerUpload) Close() error { | ||||
| 	hlu.closed = true | ||||
| func (hbu *httpBlobUpload) Close() error { | ||||
| 	hbu.closed = true | ||||
| 	return nil | ||||
| } | ||||
|  |  | |||
|  | @ -11,8 +11,8 @@ import ( | |||
| 	"github.com/docker/distribution/testutil" | ||||
| ) | ||||
| 
 | ||||
| // Test implements distribution.LayerUpload
 | ||||
| var _ distribution.LayerUpload = &httpLayerUpload{} | ||||
| // Test implements distribution.BlobWriter
 | ||||
| var _ distribution.BlobWriter = &httpBlobUpload{} | ||||
| 
 | ||||
| func TestUploadReadFrom(t *testing.T) { | ||||
| 	_, b := newRandomBlob(64) | ||||
|  | @ -124,13 +124,13 @@ func TestUploadReadFrom(t *testing.T) { | |||
| 	e, c := testServer(m) | ||||
| 	defer c() | ||||
| 
 | ||||
| 	layerUpload := &httpLayerUpload{ | ||||
| 	blobUpload := &httpBlobUpload{ | ||||
| 		client: &http.Client{}, | ||||
| 	} | ||||
| 
 | ||||
| 	// Valid case
 | ||||
| 	layerUpload.location = e + locationPath | ||||
| 	n, err := layerUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	blobUpload.location = e + locationPath | ||||
| 	n, err := blobUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Error calling ReadFrom: %s", err) | ||||
| 	} | ||||
|  | @ -139,15 +139,15 @@ func TestUploadReadFrom(t *testing.T) { | |||
| 	} | ||||
| 
 | ||||
| 	// Bad range
 | ||||
| 	layerUpload.location = e + locationPath | ||||
| 	_, err = layerUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	blobUpload.location = e + locationPath | ||||
| 	_, err = blobUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	if err == nil { | ||||
| 		t.Fatalf("Expected error when bad range received") | ||||
| 	} | ||||
| 
 | ||||
| 	// 404
 | ||||
| 	layerUpload.location = e + locationPath | ||||
| 	_, err = layerUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	blobUpload.location = e + locationPath | ||||
| 	_, err = blobUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	if err == nil { | ||||
| 		t.Fatalf("Expected error when not found") | ||||
| 	} | ||||
|  | @ -158,8 +158,8 @@ func TestUploadReadFrom(t *testing.T) { | |||
| 	} | ||||
| 
 | ||||
| 	// 400 valid json
 | ||||
| 	layerUpload.location = e + locationPath | ||||
| 	_, err = layerUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	blobUpload.location = e + locationPath | ||||
| 	_, err = blobUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	if err == nil { | ||||
| 		t.Fatalf("Expected error when not found") | ||||
| 	} | ||||
|  | @ -181,8 +181,8 @@ func TestUploadReadFrom(t *testing.T) { | |||
| 	} | ||||
| 
 | ||||
| 	// 400 invalid json
 | ||||
| 	layerUpload.location = e + locationPath | ||||
| 	_, err = layerUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	blobUpload.location = e + locationPath | ||||
| 	_, err = blobUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	if err == nil { | ||||
| 		t.Fatalf("Expected error when not found") | ||||
| 	} | ||||
|  | @ -196,8 +196,8 @@ func TestUploadReadFrom(t *testing.T) { | |||
| 	} | ||||
| 
 | ||||
| 	// 500
 | ||||
| 	layerUpload.location = e + locationPath | ||||
| 	_, err = layerUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	blobUpload.location = e + locationPath | ||||
| 	_, err = blobUpload.ReadFrom(bytes.NewReader(b)) | ||||
| 	if err == nil { | ||||
| 		t.Fatalf("Expected error when not found") | ||||
| 	} | ||||
|  |  | |||
|  | @ -4,6 +4,7 @@ import ( | |||
| 	"bytes" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"net/http" | ||||
| 	"net/url" | ||||
|  | @ -55,8 +56,8 @@ func (r *repository) Name() string { | |||
| 	return r.name | ||||
| } | ||||
| 
 | ||||
| func (r *repository) Layers() distribution.LayerService { | ||||
| 	return &layers{ | ||||
| func (r *repository) Blobs(ctx context.Context) distribution.BlobService { | ||||
| 	return &blobs{ | ||||
| 		repository: r, | ||||
| 	} | ||||
| } | ||||
|  | @ -229,7 +230,7 @@ func (ms *manifests) Delete(dgst digest.Digest) error { | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| type layers struct { | ||||
| type blobs struct { | ||||
| 	*repository | ||||
| } | ||||
| 
 | ||||
|  | @ -254,25 +255,55 @@ func sanitizeLocation(location, source string) (string, error) { | |||
| 	return location, nil | ||||
| } | ||||
| 
 | ||||
| func (ls *layers) Exists(dgst digest.Digest) (bool, error) { | ||||
| 	_, err := ls.fetchLayer(dgst) | ||||
| func (ls *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { | ||||
| 	desc, err := ls.Stat(ctx, dgst) | ||||
| 	if err != nil { | ||||
| 		switch err := err.(type) { | ||||
| 		case distribution.ErrUnknownLayer: | ||||
| 			return false, nil | ||||
| 		default: | ||||
| 			return false, err | ||||
| 		} | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	reader, err := ls.Open(ctx, desc) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer reader.Close() | ||||
| 
 | ||||
| 	return ioutil.ReadAll(reader) | ||||
| } | ||||
| 
 | ||||
| func (ls *blobs) Open(ctx context.Context, desc distribution.Descriptor) (distribution.ReadSeekCloser, error) { | ||||
| 	return &httpBlob{ | ||||
| 		repository: ls.repository, | ||||
| 		desc:       desc, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| func (ls *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, desc distribution.Descriptor) error { | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (ls *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { | ||||
| 	writer, err := ls.Writer(ctx) | ||||
| 	if err != nil { | ||||
| 		return distribution.Descriptor{}, err | ||||
| 	} | ||||
| 	dgstr := digest.NewCanonicalDigester() | ||||
| 	n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr)) | ||||
| 	if err != nil { | ||||
| 		return distribution.Descriptor{}, err | ||||
| 	} | ||||
| 	if n < int64(len(p)) { | ||||
| 		return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) | ||||
| 	} | ||||
| 
 | ||||
| 	return true, nil | ||||
| 	desc := distribution.Descriptor{ | ||||
| 		MediaType: mediaType, | ||||
| 		Length:    int64(len(p)), | ||||
| 		Digest:    dgstr.Digest(), | ||||
| 	} | ||||
| 
 | ||||
| 	return writer.Commit(ctx, desc) | ||||
| } | ||||
| 
 | ||||
| func (ls *layers) Fetch(dgst digest.Digest) (distribution.Layer, error) { | ||||
| 	return ls.fetchLayer(dgst) | ||||
| } | ||||
| 
 | ||||
| func (ls *layers) Upload() (distribution.LayerUpload, error) { | ||||
| func (ls *blobs) Writer(ctx context.Context) (distribution.BlobWriter, error) { | ||||
| 	u, err := ls.ub.BuildBlobUploadURL(ls.name) | ||||
| 
 | ||||
| 	resp, err := ls.client.Post(u, "", nil) | ||||
|  | @ -290,7 +321,7 @@ func (ls *layers) Upload() (distribution.LayerUpload, error) { | |||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		return &httpLayerUpload{ | ||||
| 		return &httpBlobUpload{ | ||||
| 			repo:      ls.repository, | ||||
| 			client:    ls.client, | ||||
| 			uuid:      uuid, | ||||
|  | @ -302,19 +333,19 @@ func (ls *layers) Upload() (distribution.LayerUpload, error) { | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (ls *layers) Resume(uuid string) (distribution.LayerUpload, error) { | ||||
| func (ls *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { | ||||
| 	panic("not implemented") | ||||
| } | ||||
| 
 | ||||
| func (ls *layers) fetchLayer(dgst digest.Digest) (distribution.Layer, error) { | ||||
| func (ls *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { | ||||
| 	u, err := ls.ub.BuildBlobURL(ls.name, dgst) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 		return distribution.Descriptor{}, err | ||||
| 	} | ||||
| 
 | ||||
| 	resp, err := ls.client.Head(u) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 		return distribution.Descriptor{}, err | ||||
| 	} | ||||
| 	defer resp.Body.Close() | ||||
| 
 | ||||
|  | @ -323,31 +354,17 @@ func (ls *layers) fetchLayer(dgst digest.Digest) (distribution.Layer, error) { | |||
| 		lengthHeader := resp.Header.Get("Content-Length") | ||||
| 		length, err := strconv.ParseInt(lengthHeader, 10, 64) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("error parsing content-length: %v", err) | ||||
| 			return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) | ||||
| 		} | ||||
| 
 | ||||
| 		var t time.Time | ||||
| 		lastModified := resp.Header.Get("Last-Modified") | ||||
| 		if lastModified != "" { | ||||
| 			t, err = http.ParseTime(lastModified) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("error parsing last-modified: %v", err) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		return &httpLayer{ | ||||
| 			layers:    ls, | ||||
| 			size:      length, | ||||
| 			digest:    dgst, | ||||
| 			createdAt: t, | ||||
| 		return distribution.Descriptor{ | ||||
| 			MediaType: resp.Header.Get("Content-Type"), | ||||
| 			Length:    length, | ||||
| 			Digest:    dgst, | ||||
| 		}, nil | ||||
| 	case resp.StatusCode == http.StatusNotFound: | ||||
| 		return nil, distribution.ErrUnknownLayer{ | ||||
| 			FSLayer: manifest.FSLayer{ | ||||
| 				BlobSum: dgst, | ||||
| 			}, | ||||
| 		} | ||||
| 		return distribution.Descriptor{}, distribution.ErrBlobUnknown | ||||
| 	default: | ||||
| 		return nil, handleErrorResponse(resp) | ||||
| 		return distribution.Descriptor{}, handleErrorResponse(resp) | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -5,7 +5,6 @@ import ( | |||
| 	"crypto/rand" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"net/http" | ||||
| 	"net/http/httptest" | ||||
|  | @ -15,6 +14,7 @@ import ( | |||
| 
 | ||||
| 	"code.google.com/p/go-uuid/uuid" | ||||
| 
 | ||||
| 	"github.com/docker/distribution" | ||||
| 	"github.com/docker/distribution/context" | ||||
| 	"github.com/docker/distribution/digest" | ||||
| 	"github.com/docker/distribution/manifest" | ||||
|  | @ -88,7 +88,7 @@ func addPing(m *testutil.RequestResponseMap) { | |||
| 	}) | ||||
| } | ||||
| 
 | ||||
| func TestLayerFetch(t *testing.T) { | ||||
| func TestBlobFetch(t *testing.T) { | ||||
| 	d1, b1 := newRandomBlob(1024) | ||||
| 	var m testutil.RequestResponseMap | ||||
| 	addTestFetch("test.example.com/repo1", d1, b1, &m) | ||||
|  | @ -97,17 +97,14 @@ func TestLayerFetch(t *testing.T) { | |||
| 	e, c := testServer(m) | ||||
| 	defer c() | ||||
| 
 | ||||
| 	r, err := NewRepository(context.Background(), "test.example.com/repo1", e, nil) | ||||
| 	ctx := context.Background() | ||||
| 	r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	l := r.Layers() | ||||
| 	l := r.Blobs(ctx) | ||||
| 
 | ||||
| 	layer, err := l.Fetch(d1) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	b, err := ioutil.ReadAll(layer) | ||||
| 	b, err := l.Get(ctx, d1) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | @ -118,7 +115,7 @@ func TestLayerFetch(t *testing.T) { | |||
| 	// TODO(dmcgowan): Test error cases
 | ||||
| } | ||||
| 
 | ||||
| func TestLayerExists(t *testing.T) { | ||||
| func TestBlobExists(t *testing.T) { | ||||
| 	d1, b1 := newRandomBlob(1024) | ||||
| 	var m testutil.RequestResponseMap | ||||
| 	addTestFetch("test.example.com/repo1", d1, b1, &m) | ||||
|  | @ -127,24 +124,30 @@ func TestLayerExists(t *testing.T) { | |||
| 	e, c := testServer(m) | ||||
| 	defer c() | ||||
| 
 | ||||
| 	r, err := NewRepository(context.Background(), "test.example.com/repo1", e, nil) | ||||
| 	ctx := context.Background() | ||||
| 	r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	l := r.Layers() | ||||
| 	l := r.Blobs(ctx) | ||||
| 
 | ||||
| 	ok, err := l.Exists(d1) | ||||
| 	stat, err := l.Stat(ctx, d1) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	if !ok { | ||||
| 		t.Fatalf("Blob does not exist: %s", d1) | ||||
| 
 | ||||
| 	if stat.Digest != d1 { | ||||
| 		t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO(dmcgowan): Test error cases
 | ||||
| 	if stat.Length != int64(len(b1)) { | ||||
| 		t.Fatalf("Unexpected length: %d, expected %d", stat.Length, len(b1)) | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO(dmcgowan): Test error cases and ErrBlobUnknown case
 | ||||
| } | ||||
| 
 | ||||
| func TestLayerUploadChunked(t *testing.T) { | ||||
| func TestBlobUploadChunked(t *testing.T) { | ||||
| 	dgst, b1 := newRandomBlob(1024) | ||||
| 	var m testutil.RequestResponseMap | ||||
| 	addPing(&m) | ||||
|  | @ -227,19 +230,20 @@ func TestLayerUploadChunked(t *testing.T) { | |||
| 	e, c := testServer(m) | ||||
| 	defer c() | ||||
| 
 | ||||
| 	r, err := NewRepository(context.Background(), repo, e, nil) | ||||
| 	ctx := context.Background() | ||||
| 	r, err := NewRepository(ctx, repo, e, nil) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	l := r.Layers() | ||||
| 	l := r.Blobs(ctx) | ||||
| 
 | ||||
| 	upload, err := l.Upload() | ||||
| 	upload, err := l.Writer(ctx) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 
 | ||||
| 	if upload.UUID() != uuids[0] { | ||||
| 		log.Fatalf("Unexpected UUID %s; expected %s", upload.UUID(), uuids[0]) | ||||
| 	if upload.ID() != uuids[0] { | ||||
| 		log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) | ||||
| 	} | ||||
| 
 | ||||
| 	for _, chunk := range chunks { | ||||
|  | @ -252,17 +256,20 @@ func TestLayerUploadChunked(t *testing.T) { | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	layer, err := upload.Finish(dgst) | ||||
| 	blob, err := upload.Commit(ctx, distribution.Descriptor{ | ||||
| 		Digest: dgst, | ||||
| 		Length: int64(len(b1)), | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 
 | ||||
| 	if layer.Length() != int64(len(b1)) { | ||||
| 		t.Fatalf("Unexpected layer size: %d; expected: %d", layer.Length(), len(b1)) | ||||
| 	if blob.Length != int64(len(b1)) { | ||||
| 		t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1)) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestLayerUploadMonolithic(t *testing.T) { | ||||
| func TestBlobUploadMonolithic(t *testing.T) { | ||||
| 	dgst, b1 := newRandomBlob(1024) | ||||
| 	var m testutil.RequestResponseMap | ||||
| 	addPing(&m) | ||||
|  | @ -334,19 +341,20 @@ func TestLayerUploadMonolithic(t *testing.T) { | |||
| 	e, c := testServer(m) | ||||
| 	defer c() | ||||
| 
 | ||||
| 	r, err := NewRepository(context.Background(), repo, e, nil) | ||||
| 	ctx := context.Background() | ||||
| 	r, err := NewRepository(ctx, repo, e, nil) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	l := r.Layers() | ||||
| 	l := r.Blobs(ctx) | ||||
| 
 | ||||
| 	upload, err := l.Upload() | ||||
| 	upload, err := l.Writer(ctx) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 
 | ||||
| 	if upload.UUID() != uploadID { | ||||
| 		log.Fatalf("Unexpected UUID %s; expected %s", upload.UUID(), uploadID) | ||||
| 	if upload.ID() != uploadID { | ||||
| 		log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) | ||||
| 	} | ||||
| 
 | ||||
| 	n, err := upload.ReadFrom(bytes.NewReader(b1)) | ||||
|  | @ -357,20 +365,19 @@ func TestLayerUploadMonolithic(t *testing.T) { | |||
| 		t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) | ||||
| 	} | ||||
| 
 | ||||
| 	layer, err := upload.Finish(dgst) | ||||
| 	blob, err := upload.Commit(ctx, distribution.Descriptor{ | ||||
| 		Digest: dgst, | ||||
| 		Length: int64(len(b1)), | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 
 | ||||
| 	if layer.Length() != int64(len(b1)) { | ||||
| 		t.Fatalf("Unexpected layer size: %d; expected: %d", layer.Length(), len(b1)) | ||||
| 	if blob.Length != int64(len(b1)) { | ||||
| 		t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Length, len(b1)) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestLayerUploadResume(t *testing.T) { | ||||
| 	// TODO(dmcgowan): implement
 | ||||
| } | ||||
| 
 | ||||
| func newRandomSchema1Manifest(name, tag string, blobCount int) (*manifest.SignedManifest, digest.Digest) { | ||||
| 	blobs := make([]manifest.FSLayer, blobCount) | ||||
| 	history := make([]manifest.History, blobCount) | ||||
|  | @ -447,7 +454,7 @@ func checkEqualManifest(m1, m2 *manifest.SignedManifest) error { | |||
| 		return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) | ||||
| 	} | ||||
| 	if len(m1.FSLayers) != len(m2.FSLayers) { | ||||
| 		return fmt.Errorf("fs layer length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) | ||||
| 		return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) | ||||
| 	} | ||||
| 	for i := range m1.FSLayers { | ||||
| 		if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue