Merge pull request #2815 from bainsy88/issue_2814
Add code to handle pagination of parts. Fixes max layer size of 10GB bugmaster
						commit
						a01c71e247
					
				|  | @ -539,9 +539,9 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read | ||||||
| 
 | 
 | ||||||
| // Writer returns a FileWriter which will store the content written to it
 | // Writer returns a FileWriter which will store the content written to it
 | ||||||
| // at the location designated by "path" after the call to Commit.
 | // at the location designated by "path" after the call to Commit.
 | ||||||
| func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) { | func (d *driver) Writer(ctx context.Context, path string, appendParam bool) (storagedriver.FileWriter, error) { | ||||||
| 	key := d.s3Path(path) | 	key := d.s3Path(path) | ||||||
| 	if !append { | 	if !appendParam { | ||||||
| 		// TODO (brianbland): cancel other uploads at this path
 | 		// TODO (brianbland): cancel other uploads at this path
 | ||||||
| 		resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ | 		resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ | ||||||
| 			Bucket:               aws.String(d.Bucket), | 			Bucket:               aws.String(d.Bucket), | ||||||
|  | @ -564,7 +564,7 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, parseError(path, err) | 		return nil, parseError(path, err) | ||||||
| 	} | 	} | ||||||
| 
 | 	var allParts []*s3.Part | ||||||
| 	for _, multi := range resp.Uploads { | 	for _, multi := range resp.Uploads { | ||||||
| 		if key != *multi.Key { | 		if key != *multi.Key { | ||||||
| 			continue | 			continue | ||||||
|  | @ -577,11 +577,20 @@ func (d *driver) Writer(ctx context.Context, path string, append bool) (storaged | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return nil, parseError(path, err) | 			return nil, parseError(path, err) | ||||||
| 		} | 		} | ||||||
| 		var multiSize int64 | 		allParts = append(allParts, resp.Parts...) | ||||||
| 		for _, part := range resp.Parts { | 		for *resp.IsTruncated { | ||||||
| 			multiSize += *part.Size | 			resp, err = d.S3.ListParts(&s3.ListPartsInput{ | ||||||
|  | 				Bucket:           aws.String(d.Bucket), | ||||||
|  | 				Key:              aws.String(key), | ||||||
|  | 				UploadId:         multi.UploadId, | ||||||
|  | 				PartNumberMarker: resp.NextPartNumberMarker, | ||||||
|  | 			}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return nil, parseError(path, err) | ||||||
|  | 			} | ||||||
|  | 			allParts = append(allParts, resp.Parts...) | ||||||
| 		} | 		} | ||||||
| 		return d.newWriter(key, *multi.UploadId, resp.Parts), nil | 		return d.newWriter(key, *multi.UploadId, allParts), nil | ||||||
| 	} | 	} | ||||||
| 	return nil, storagedriver.PathNotFoundError{Path: path} | 	return nil, storagedriver.PathNotFoundError{Path: path} | ||||||
| } | } | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue