format code with gofumpt
gofumpt (https://github.com/mvdan/gofumpt) provides a supserset of `gofmt` / `go fmt`, and addresses various formatting issues that linters may be checking for. We can consider enabling the `gofumpt` linter to verify the formatting in CI, although not every developer may have it installed, so for now this runs it once to get formatting in shape. Signed-off-by: Sebastiaan van Stijn <github@gone.nl>master
							parent
							
								
									7f9f86c411
								
							
						
					
					
						commit
						e0281dc609
					
				|  | @ -62,7 +62,6 @@ func main() { | |||
| 	if flag.NArg() > 0 { | ||||
| 		for _, path := range flag.Args() { | ||||
| 			fp, err := os.Open(path) | ||||
| 
 | ||||
| 			if err != nil { | ||||
| 				log.Printf("%s: %v", path, err) | ||||
| 				fail = true | ||||
|  |  | |||
|  | @ -4,7 +4,7 @@ | |||
| // For example, to generate a new API specification, one would execute the
 | ||||
| // following command from the repo root:
 | ||||
| //
 | ||||
| // 	$ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md
 | ||||
| //	$ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md
 | ||||
| //
 | ||||
| // The templates are passed in the api/v2.APIDescriptor object. Please see the
 | ||||
| // package documentation for fields available on that object. The template
 | ||||
|  | @ -27,7 +27,6 @@ import ( | |||
| var spaceRegex = regexp.MustCompile(`\n\s*`) | ||||
| 
 | ||||
| func main() { | ||||
| 
 | ||||
| 	if len(os.Args) != 2 { | ||||
| 		log.Fatalln("please specify a template to execute.") | ||||
| 	} | ||||
|  | @ -127,5 +126,4 @@ end: | |||
| 	} | ||||
| 
 | ||||
| 	return output | ||||
| 
 | ||||
| } | ||||
|  |  | |||
|  | @ -589,7 +589,7 @@ type Events struct { | |||
| 	IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events
 | ||||
| } | ||||
| 
 | ||||
| //Ignore configures mediaTypes and actions of the event, that it won't be propagated
 | ||||
| // Ignore configures mediaTypes and actions of the event, that it won't be propagated
 | ||||
| type Ignore struct { | ||||
| 	MediaTypes []string `yaml:"mediatypes"` // target media types to ignore
 | ||||
| 	Actions    []string `yaml:"actions"`    // ignore action types
 | ||||
|  |  | |||
|  | @ -360,7 +360,6 @@ func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) { | |||
| 
 | ||||
| 	_, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) | ||||
| 	c.Assert(err, NotNil) | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // TestParseWithDifferentEnvReporting validates that environment variables
 | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| //go:build gofuzz
 | ||||
| // +build gofuzz
 | ||||
| 
 | ||||
| package configuration | ||||
|  |  | |||
|  | @ -4,68 +4,68 @@ | |||
| //
 | ||||
| // The easiest way to get started is to get the background context:
 | ||||
| //
 | ||||
| // 	ctx := context.Background()
 | ||||
| //	ctx := context.Background()
 | ||||
| //
 | ||||
| // The returned context should be passed around your application and be the
 | ||||
| // root of all other context instances. If the application has a version, this
 | ||||
| // line should be called before anything else:
 | ||||
| //
 | ||||
| // 	ctx := context.WithVersion(context.Background(), version)
 | ||||
| //	ctx := context.WithVersion(context.Background(), version)
 | ||||
| //
 | ||||
| // The above will store the version in the context and will be available to
 | ||||
| // the logger.
 | ||||
| //
 | ||||
| // Logging
 | ||||
| // # Logging
 | ||||
| //
 | ||||
| // The most useful aspect of this package is GetLogger. This function takes
 | ||||
| // any context.Context interface and returns the current logger from the
 | ||||
| // context. Canonical usage looks like this:
 | ||||
| //
 | ||||
| // 	GetLogger(ctx).Infof("something interesting happened")
 | ||||
| //	GetLogger(ctx).Infof("something interesting happened")
 | ||||
| //
 | ||||
| // GetLogger also takes optional key arguments. The keys will be looked up in
 | ||||
| // the context and reported with the logger. The following example would
 | ||||
| // return a logger that prints the version with each log message:
 | ||||
| //
 | ||||
| // 	ctx := context.Context(context.Background(), "version", version)
 | ||||
| // 	GetLogger(ctx, "version").Infof("this log message has a version field")
 | ||||
| //	ctx := context.Context(context.Background(), "version", version)
 | ||||
| //	GetLogger(ctx, "version").Infof("this log message has a version field")
 | ||||
| //
 | ||||
| // The above would print out a log message like this:
 | ||||
| //
 | ||||
| // 	INFO[0000] this log message has a version field        version=v2.0.0-alpha.2.m
 | ||||
| //	INFO[0000] this log message has a version field        version=v2.0.0-alpha.2.m
 | ||||
| //
 | ||||
| // When used with WithLogger, we gain the ability to decorate the context with
 | ||||
| // loggers that have information from disparate parts of the call stack.
 | ||||
| // Following from the version example, we can build a new context with the
 | ||||
| // configured logger such that we always print the version field:
 | ||||
| //
 | ||||
| // 	ctx = WithLogger(ctx, GetLogger(ctx, "version"))
 | ||||
| //	ctx = WithLogger(ctx, GetLogger(ctx, "version"))
 | ||||
| //
 | ||||
| // Since the logger has been pushed to the context, we can now get the version
 | ||||
| // field for free with our log messages. Future calls to GetLogger on the new
 | ||||
| // context will have the version field:
 | ||||
| //
 | ||||
| // 	GetLogger(ctx).Infof("this log message has a version field")
 | ||||
| //	GetLogger(ctx).Infof("this log message has a version field")
 | ||||
| //
 | ||||
| // This becomes more powerful when we start stacking loggers. Let's say we
 | ||||
| // have the version logger from above but also want a request id. Using the
 | ||||
| // context above, in our request scoped function, we place another logger in
 | ||||
| // the context:
 | ||||
| //
 | ||||
| // 	ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
 | ||||
| // 	ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
 | ||||
| //	ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
 | ||||
| //	ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
 | ||||
| //
 | ||||
| // When GetLogger is called on the new context, "http.request.id" will be
 | ||||
| // included as a logger field, along with the original "version" field:
 | ||||
| //
 | ||||
| // 	INFO[0000] this log message has a version field        http.request.id=unique id version=v2.0.0-alpha.2.m
 | ||||
| //	INFO[0000] this log message has a version field        http.request.id=unique id version=v2.0.0-alpha.2.m
 | ||||
| //
 | ||||
| // Note that this only affects the new context, the previous context, with the
 | ||||
| // version field, can be used independently. Put another way, the new logger,
 | ||||
| // added to the request context, is unique to that context and can have
 | ||||
| // request scoped variables.
 | ||||
| //
 | ||||
| // HTTP Requests
 | ||||
| // # HTTP Requests
 | ||||
| //
 | ||||
| // This package also contains several methods for working with http requests.
 | ||||
| // The concepts are very similar to those described above. We simply place the
 | ||||
|  | @ -73,13 +73,13 @@ | |||
| // available. GetRequestLogger can then be called to get request specific
 | ||||
| // variables in a log line:
 | ||||
| //
 | ||||
| // 	ctx = WithRequest(ctx, req)
 | ||||
| // 	GetRequestLogger(ctx).Infof("request variables")
 | ||||
| //	ctx = WithRequest(ctx, req)
 | ||||
| //	GetRequestLogger(ctx).Infof("request variables")
 | ||||
| //
 | ||||
| // Like above, if we want to include the request data in all log messages in
 | ||||
| // the context, we push the logger to a new context and use that one:
 | ||||
| //
 | ||||
| // 	ctx = WithLogger(ctx, GetRequestLogger(ctx))
 | ||||
| //	ctx = WithLogger(ctx, GetRequestLogger(ctx))
 | ||||
| //
 | ||||
| // The concept is fairly powerful and ensures that calls throughout the stack
 | ||||
| // can be traced in log messages. Using the fields like "http.request.id", one
 | ||||
|  |  | |||
|  | @ -24,16 +24,16 @@ import ( | |||
| //
 | ||||
| // Here is an example of the usage:
 | ||||
| //
 | ||||
| // 	func timedOperation(ctx Context) {
 | ||||
| // 		ctx, done := WithTrace(ctx)
 | ||||
| // 		defer done("this will be the log message")
 | ||||
| // 		// ... function body ...
 | ||||
| // 	}
 | ||||
| //	func timedOperation(ctx Context) {
 | ||||
| //		ctx, done := WithTrace(ctx)
 | ||||
| //		defer done("this will be the log message")
 | ||||
| //		// ... function body ...
 | ||||
| //	}
 | ||||
| //
 | ||||
| // If the function ran for roughly 1s, such a usage would emit a log message
 | ||||
| // as follows:
 | ||||
| //
 | ||||
| // 	INFO[0001] this will be the log message  trace.duration=1.004575763s trace.func=github.com/distribution/distribution/context.traceOperation trace.id=<id> ...
 | ||||
| //	INFO[0001] this will be the log message  trace.duration=1.004575763s trace.func=github.com/distribution/distribution/context.traceOperation trace.id=<id> ...
 | ||||
| //
 | ||||
| // Notice that the function name is automatically resolved, along with the
 | ||||
| // package and a trace id is emitted that can be linked with parent ids.
 | ||||
|  |  | |||
|  | @ -20,9 +20,7 @@ import ( | |||
| 	"github.com/sirupsen/logrus" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	enforceRepoClass bool | ||||
| ) | ||||
| var enforceRepoClass bool | ||||
| 
 | ||||
| func main() { | ||||
| 	var ( | ||||
|  | @ -110,7 +108,6 @@ func main() { | |||
| 	if err != nil { | ||||
| 		logrus.Infof("Error serving: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // handlerWithContext wraps the given context-aware handler by setting up the
 | ||||
|  |  | |||
|  | @ -5,11 +5,10 @@ import ( | |||
| 	"crypto/rsa" | ||||
| 	"encoding/base64" | ||||
| 	"errors" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/distribution/distribution/v3/registry/auth" | ||||
| 	"github.com/docker/libtrust" | ||||
| ) | ||||
|  | @ -49,7 +48,6 @@ func TestCreateJWTSuccessWithEmptyACL(t *testing.T) { | |||
| 	if !strings.Contains(json, "test") { | ||||
| 		t.Fatal("Valid token was not generated.") | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func decodeJWT(rawToken string) (string, error) { | ||||
|  | @ -74,7 +72,7 @@ func joseBase64Decode(s string) (string, error) { | |||
| 	} | ||||
| 	data, err := base64.StdEncoding.DecodeString(s) | ||||
| 	if err != nil { | ||||
| 		return "", err //errors.New("Error in Decoding base64 String")
 | ||||
| 		return "", err // errors.New("Error in Decoding base64 String")
 | ||||
| 	} | ||||
| 	return string(data), nil | ||||
| } | ||||
|  |  | |||
|  | @ -187,7 +187,6 @@ func TestAll(t *testing.T) { | |||
| 			t.Fatalf("Missing element at position %d: %s", i, dgst) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func assertEqualShort(t *testing.T, actual, expected string) { | ||||
|  | @ -363,9 +362,11 @@ func BenchmarkLookup1000(b *testing.B) { | |||
| func BenchmarkShortCode10(b *testing.B) { | ||||
| 	benchShortCodeNTable(b, 10, 12) | ||||
| } | ||||
| 
 | ||||
| func BenchmarkShortCode100(b *testing.B) { | ||||
| 	benchShortCodeNTable(b, 100, 12) | ||||
| } | ||||
| 
 | ||||
| func BenchmarkShortCode1000(b *testing.B) { | ||||
| 	benchShortCodeNTable(b, 1000, 12) | ||||
| } | ||||
|  |  | |||
|  | @ -7,9 +7,7 @@ import ( | |||
| 	"github.com/distribution/distribution/v3/health" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	updater = health.NewStatusUpdater() | ||||
| ) | ||||
| var updater = health.NewStatusUpdater() | ||||
| 
 | ||||
| // DownHandler registers a manual_http_status that always returns an Error
 | ||||
| func DownHandler(w http.ResponseWriter, r *http.Request) { | ||||
|  |  | |||
|  | @ -13,29 +13,29 @@ | |||
| // particularly useful for checks that verify upstream connectivity or
 | ||||
| // database status, since they might take a long time to return/timeout.
 | ||||
| //
 | ||||
| // Installing
 | ||||
| // # Installing
 | ||||
| //
 | ||||
| // To install health, just import it in your application:
 | ||||
| //
 | ||||
| //  import "github.com/distribution/distribution/v3/health"
 | ||||
| //	import "github.com/distribution/distribution/v3/health"
 | ||||
| //
 | ||||
| // You can also (optionally) import "health/api" that will add two convenience
 | ||||
| // endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add
 | ||||
| // "manual" checks that allow the service to quickly be brought in/out of
 | ||||
| // rotation.
 | ||||
| //
 | ||||
| //  import _ "github.com/distribution/distribution/v3/health/api"
 | ||||
| //	import _ "github.com/distribution/distribution/v3/health/api"
 | ||||
| //
 | ||||
| //  # curl localhost:5001/debug/health
 | ||||
| //  {}
 | ||||
| //  # curl -X POST localhost:5001/debug/health/down
 | ||||
| //  # curl localhost:5001/debug/health
 | ||||
| //  {"manual_http_status":"Manual Check"}
 | ||||
| //	# curl localhost:5001/debug/health
 | ||||
| //	{}
 | ||||
| //	# curl -X POST localhost:5001/debug/health/down
 | ||||
| //	# curl localhost:5001/debug/health
 | ||||
| //	{"manual_http_status":"Manual Check"}
 | ||||
| //
 | ||||
| // After importing these packages to your main application, you can start
 | ||||
| // registering checks.
 | ||||
| //
 | ||||
| // Registering Checks
 | ||||
| // # Registering Checks
 | ||||
| //
 | ||||
| // The recommended way of registering checks is using a periodic Check.
 | ||||
| // PeriodicChecks run on a certain schedule and asynchronously update the
 | ||||
|  | @ -45,22 +45,22 @@ | |||
| // A trivial example of a check that runs every 5 seconds and shuts down our
 | ||||
| // server if the current minute is even, could be added as follows:
 | ||||
| //
 | ||||
| //  func currentMinuteEvenCheck() error {
 | ||||
| //    m := time.Now().Minute()
 | ||||
| //    if m%2 == 0 {
 | ||||
| //      return errors.New("Current minute is even!")
 | ||||
| //    }
 | ||||
| //    return nil
 | ||||
| //  }
 | ||||
| //	func currentMinuteEvenCheck() error {
 | ||||
| //	  m := time.Now().Minute()
 | ||||
| //	  if m%2 == 0 {
 | ||||
| //	    return errors.New("Current minute is even!")
 | ||||
| //	  }
 | ||||
| //	  return nil
 | ||||
| //	}
 | ||||
| //
 | ||||
| //  health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5)
 | ||||
| //	health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5)
 | ||||
| //
 | ||||
| // Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to
 | ||||
| // implement the exact same check, but add a threshold of failures after which
 | ||||
| // the check will be unhealthy. This is particularly useful for flaky Checks,
 | ||||
| // ensuring some stability of the service when handling them.
 | ||||
| //
 | ||||
| //  health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4)
 | ||||
| //	health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4)
 | ||||
| //
 | ||||
| // The lowest-level way to interact with the health package is calling
 | ||||
| // "Register" directly. Register allows you to pass in an arbitrary string and
 | ||||
|  | @ -72,7 +72,7 @@ | |||
| // Assuming you wish to register a method called "currentMinuteEvenCheck()
 | ||||
| // error" you could do that by doing:
 | ||||
| //
 | ||||
| //  health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck))
 | ||||
| //	health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck))
 | ||||
| //
 | ||||
| // CheckFunc is a convenience type that implements Checker.
 | ||||
| //
 | ||||
|  | @ -80,11 +80,11 @@ | |||
| // and the convenience method RegisterFunc. An example that makes the status
 | ||||
| // endpoint always return an error:
 | ||||
| //
 | ||||
| //  health.RegisterFunc("my_check", func() error {
 | ||||
| //   return Errors.new("This is an error!")
 | ||||
| //  }))
 | ||||
| //	health.RegisterFunc("my_check", func() error {
 | ||||
| //	 return Errors.new("This is an error!")
 | ||||
| //	}))
 | ||||
| //
 | ||||
| // Examples
 | ||||
| // # Examples
 | ||||
| //
 | ||||
| // You could also use the health checker mechanism to ensure your application
 | ||||
| // only comes up if certain conditions are met, or to allow the developer to
 | ||||
|  | @ -92,35 +92,35 @@ | |||
| // database connectivity and immediately takes the server out of rotation on
 | ||||
| // err:
 | ||||
| //
 | ||||
| //  updater = health.NewStatusUpdater()
 | ||||
| //   health.RegisterFunc("database_check", func() error {
 | ||||
| //    return updater.Check()
 | ||||
| //  }))
 | ||||
| //	updater = health.NewStatusUpdater()
 | ||||
| //	 health.RegisterFunc("database_check", func() error {
 | ||||
| //	  return updater.Check()
 | ||||
| //	}))
 | ||||
| //
 | ||||
| //  conn, err := Connect(...) // database call here
 | ||||
| //  if err != nil {
 | ||||
| //    updater.Update(errors.New("Error connecting to the database: " + err.Error()))
 | ||||
| //  }
 | ||||
| //	conn, err := Connect(...) // database call here
 | ||||
| //	if err != nil {
 | ||||
| //	  updater.Update(errors.New("Error connecting to the database: " + err.Error()))
 | ||||
| //	}
 | ||||
| //
 | ||||
| // You can also use the predefined Checkers that come included with the health
 | ||||
| // package. First, import the checks:
 | ||||
| //
 | ||||
| //  import "github.com/distribution/distribution/v3/health/checks
 | ||||
| //	import "github.com/distribution/distribution/v3/health/checks
 | ||||
| //
 | ||||
| // After that you can make use of any of the provided checks. An example of
 | ||||
| // using a `FileChecker` to take the application out of rotation if a certain
 | ||||
| // file exists can be done as follows:
 | ||||
| //
 | ||||
| //  health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5))
 | ||||
| //	health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5))
 | ||||
| //
 | ||||
| // After registering the check, it is trivial to take an application out of
 | ||||
| // rotation from the console:
 | ||||
| //
 | ||||
| //  # curl localhost:5001/debug/health
 | ||||
| //  {}
 | ||||
| //  # touch /tmp/disable
 | ||||
| //  # curl localhost:5001/debug/health
 | ||||
| //  {"fileChecker":"file exists"}
 | ||||
| //	# curl localhost:5001/debug/health
 | ||||
| //	{}
 | ||||
| //	# touch /tmp/disable
 | ||||
| //	# curl localhost:5001/debug/health
 | ||||
| //	{"fileChecker":"file exists"}
 | ||||
| //
 | ||||
| // FileChecker only accepts absolute or relative file path. It does not work
 | ||||
| // properly with tilde(~). You should make sure that the application has
 | ||||
|  | @ -132,5 +132,5 @@ | |||
| // "HTTPChecker", but ensure that you only mark the test unhealthy if there
 | ||||
| // are a minimum of two failures in a row:
 | ||||
| //
 | ||||
| //  health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2))
 | ||||
| //	health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2))
 | ||||
| package health | ||||
|  |  | |||
|  | @ -11,14 +11,12 @@ import ( | |||
| 	v1 "github.com/opencontainers/image-spec/specs-go/v1" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// SchemaVersion provides a pre-initialized version structure for this
 | ||||
| 	// packages version of the manifest.
 | ||||
| 	SchemaVersion = manifest.Versioned{ | ||||
| 		SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version
 | ||||
| 		MediaType:     v1.MediaTypeImageManifest, | ||||
| 	} | ||||
| ) | ||||
| // SchemaVersion provides a pre-initialized version structure for this
 | ||||
| // packages version of the manifest.
 | ||||
| var SchemaVersion = manifest.Versioned{ | ||||
| 	SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version
 | ||||
| 	MediaType:     v1.MediaTypeImageManifest, | ||||
| } | ||||
| 
 | ||||
| func init() { | ||||
| 	ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { | ||||
|  |  | |||
|  | @ -20,13 +20,11 @@ const ( | |||
| 	MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// SchemaVersion provides a pre-initialized version structure for this
 | ||||
| 	// packages version of the manifest.
 | ||||
| 	SchemaVersion = manifest.Versioned{ | ||||
| 		SchemaVersion: 1, | ||||
| 	} | ||||
| ) | ||||
| // SchemaVersion provides a pre-initialized version structure for this
 | ||||
| // packages version of the manifest.
 | ||||
| var SchemaVersion = manifest.Versioned{ | ||||
| 	SchemaVersion: 1, | ||||
| } | ||||
| 
 | ||||
| func init() { | ||||
| 	schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { | ||||
|  | @ -149,7 +147,6 @@ func (sm SignedManifest) References() []distribution.Descriptor { | |||
| 	} | ||||
| 
 | ||||
| 	return dependencies | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
 | ||||
|  |  | |||
|  | @ -42,7 +42,6 @@ func TestManifestUnmarshaling(t *testing.T) { | |||
| 	if !reflect.DeepEqual(&signed, env.signed) { | ||||
| 		t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestManifestVerification(t *testing.T) { | ||||
|  |  | |||
|  | @ -65,7 +65,6 @@ func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) | |||
| 	mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) | ||||
| 	mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) | ||||
| 	return nil | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // References returns the current references added to this builder
 | ||||
|  |  | |||
|  | @ -33,14 +33,12 @@ const ( | |||
| 	MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// SchemaVersion provides a pre-initialized version structure for this
 | ||||
| 	// packages version of the manifest.
 | ||||
| 	SchemaVersion = manifest.Versioned{ | ||||
| 		SchemaVersion: 2, | ||||
| 		MediaType:     MediaTypeManifest, | ||||
| 	} | ||||
| ) | ||||
| // SchemaVersion provides a pre-initialized version structure for this
 | ||||
| // packages version of the manifest.
 | ||||
| var SchemaVersion = manifest.Versioned{ | ||||
| 	SchemaVersion: 2, | ||||
| 	MediaType:     MediaTypeManifest, | ||||
| } | ||||
| 
 | ||||
| func init() { | ||||
| 	schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { | ||||
|  | @ -119,7 +117,6 @@ func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { | |||
| 	if manifest.MediaType != MediaTypeManifest { | ||||
| 		return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", | ||||
| 			MediaTypeManifest, manifest.MediaType) | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
| 	m.Manifest = manifest | ||||
|  |  | |||
|  | @ -233,7 +233,6 @@ func checkCommon(t *testing.T, event events.Event) { | |||
| 	if event.(Event).Target.Repository != repo { | ||||
| 		t.Fatalf("unexpected repository: %q != %q", event.(Event).Target.Repository, repo) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| type testSinkFn func(event events.Event) error | ||||
|  |  | |||
|  | @ -143,9 +143,7 @@ type SourceRecord struct { | |||
| 	InstanceID string `json:"instanceID,omitempty"` | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	// ErrSinkClosed is returned if a write is issued to a sink that has been
 | ||||
| 	// closed. If encountered, the error should be considered terminal and
 | ||||
| 	// retries will not be successful.
 | ||||
| 	ErrSinkClosed = fmt.Errorf("sink: closed") | ||||
| ) | ||||
| // ErrSinkClosed is returned if a write is issued to a sink that has been
 | ||||
| // closed. If encountered, the error should be considered terminal and
 | ||||
| // retries will not be successful.
 | ||||
| var ErrSinkClosed = fmt.Errorf("sink: closed") | ||||
|  |  | |||
|  | @ -13,7 +13,7 @@ import ( | |||
| // envelope has changed. If this code fails, the revision of the protocol may
 | ||||
| // need to be incremented.
 | ||||
| func TestEventEnvelopeJSONFormat(t *testing.T) { | ||||
| 	var expected = strings.TrimSpace(` | ||||
| 	expected := strings.TrimSpace(` | ||||
| { | ||||
|    "events": [ | ||||
|       { | ||||
|  | @ -114,7 +114,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { | |||
| 	prototype.Request.UserAgent = "test/0.1" | ||||
| 	prototype.Source.Addr = "hostname.local:port" | ||||
| 
 | ||||
| 	var manifestPush = prototype | ||||
| 	manifestPush := prototype | ||||
| 	manifestPush.ID = "asdf-asdf-asdf-asdf-0" | ||||
| 	manifestPush.Target.Digest = "sha256:0123456789abcdef0" | ||||
| 	manifestPush.Target.Length = 1 | ||||
|  | @ -123,7 +123,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { | |||
| 	manifestPush.Target.Repository = "library/test" | ||||
| 	manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" | ||||
| 
 | ||||
| 	var layerPush0 = prototype | ||||
| 	layerPush0 := prototype | ||||
| 	layerPush0.ID = "asdf-asdf-asdf-asdf-1" | ||||
| 	layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" | ||||
| 	layerPush0.Target.Length = 2 | ||||
|  | @ -132,7 +132,7 @@ func TestEventEnvelopeJSONFormat(t *testing.T) { | |||
| 	layerPush0.Target.Repository = "library/test" | ||||
| 	layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" | ||||
| 
 | ||||
| 	var layerPush1 = prototype | ||||
| 	layerPush1 := prototype | ||||
| 	layerPush1.ID = "asdf-asdf-asdf-asdf-2" | ||||
| 	layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6" | ||||
| 	layerPush1.Target.Length = 3 | ||||
|  |  | |||
|  | @ -135,7 +135,7 @@ type headerRoundTripper struct { | |||
| } | ||||
| 
 | ||||
| func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { | ||||
| 	var nreq = *req | ||||
| 	nreq := *req | ||||
| 	nreq.Header = make(http.Header) | ||||
| 
 | ||||
| 	merge := func(headers http.Header) { | ||||
|  |  | |||
|  | @ -197,7 +197,6 @@ func TestHTTPSink(t *testing.T) { | |||
| 	if err := sink.Close(); err == nil { | ||||
| 		t.Fatalf("second close should have returned error: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func createTestEvent(action, repo, typ string) Event { | ||||
|  |  | |||
|  | @ -3,13 +3,12 @@ package notifications | |||
| import ( | ||||
| 	"reflect" | ||||
| 	"sync" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	events "github.com/docker/go-events" | ||||
| 
 | ||||
| 	"github.com/sirupsen/logrus" | ||||
| 
 | ||||
| 	"testing" | ||||
| ) | ||||
| 
 | ||||
| func TestEventQueue(t *testing.T) { | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| //go:build gofuzz
 | ||||
| // +build gofuzz
 | ||||
| 
 | ||||
| package reference | ||||
|  |  | |||
|  | @ -84,7 +84,7 @@ func TestValidateRemoteName(t *testing.T) { | |||
| 		// Allow multiple hyphens as well.
 | ||||
| 		"docker---rules/docker", | ||||
| 
 | ||||
| 		//Username doc and image name docker being tested.
 | ||||
| 		// Username doc and image name docker being tested.
 | ||||
| 		"doc/docker", | ||||
| 
 | ||||
| 		// single character names are now allowed.
 | ||||
|  | @ -129,7 +129,7 @@ func TestValidateRemoteName(t *testing.T) { | |||
| 		// No repository.
 | ||||
| 		"docker/", | ||||
| 
 | ||||
| 		//namespace too long
 | ||||
| 		// namespace too long
 | ||||
| 		"this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", | ||||
| 	} | ||||
| 	for _, repositoryName := range invalidRepositoryNames { | ||||
|  |  | |||
|  | @ -3,7 +3,7 @@ | |||
| //
 | ||||
| // Grammar
 | ||||
| //
 | ||||
| // 	reference                       := name [ ":" tag ] [ "@" digest ]
 | ||||
| //	reference                       := name [ ":" tag ] [ "@" digest ]
 | ||||
| //	name                            := [domain '/'] path-component ['/' path-component]*
 | ||||
| //	domain                          := host [':' port-number]
 | ||||
| //	host                            := domain-name | IPv4address | \[ IPv6address \]	; rfc3986 appendix-A
 | ||||
|  | @ -11,7 +11,7 @@ | |||
| //	domain-component                := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
 | ||||
| //	port-number                     := /[0-9]+/
 | ||||
| //	path-component                  := alpha-numeric [separator alpha-numeric]*
 | ||||
| // 	alpha-numeric                   := /[a-z0-9]+/
 | ||||
| //	alpha-numeric                   := /[a-z0-9]+/
 | ||||
| //	separator                       := /[_.]|__|[-]*/
 | ||||
| //
 | ||||
| //	tag                             := /[\w][\w.-]{0,127}/
 | ||||
|  |  | |||
|  | @ -525,7 +525,6 @@ func TestReferenceRegexp(t *testing.T) { | |||
| 	for i := range testcases { | ||||
| 		checkRegexp(t, ReferenceRegexp, testcases[i]) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestIdentifierRegexp(t *testing.T) { | ||||
|  |  | |||
|  | @ -86,7 +86,6 @@ func TestErrorCodes(t *testing.T) { | |||
| 			t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestErrorsManagement(t *testing.T) { | ||||
|  | @ -99,7 +98,6 @@ func TestErrorsManagement(t *testing.T) { | |||
| 	errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) | ||||
| 
 | ||||
| 	p, err := json.Marshal(errs) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("error marashaling errors: %v", err) | ||||
| 	} | ||||
|  | @ -181,5 +179,4 @@ func TestErrorsManagement(t *testing.T) { | |||
| 	if e2.Detail != `stuff2` { | ||||
| 		t.Fatalf("e2 had wrong detail: %q", e2.Detail) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
|  |  | |||
|  | @ -75,8 +75,10 @@ var ( | |||
| 	}) | ||||
| ) | ||||
| 
 | ||||
| var nextCode = 1000 | ||||
| var registerLock sync.Mutex | ||||
| var ( | ||||
| 	nextCode     = 1000 | ||||
| 	registerLock sync.Mutex | ||||
| ) | ||||
| 
 | ||||
| // Register will make the passed-in error known to the environment and
 | ||||
| // return a new ErrorCode
 | ||||
|  |  | |||
|  | @ -262,7 +262,6 @@ type RouteDescriptor struct { | |||
| // MethodDescriptor provides a description of the requests that may be
 | ||||
| // conducted with the target method.
 | ||||
| type MethodDescriptor struct { | ||||
| 
 | ||||
| 	// Method is an HTTP method, such as GET, PUT or POST.
 | ||||
| 	Method string | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| //go:build gofuzz
 | ||||
| // +build gofuzz
 | ||||
| 
 | ||||
| package v2 | ||||
|  |  | |||
|  | @ -265,7 +265,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee | |||
| 		u := server.URL + testcase.RequestURI | ||||
| 
 | ||||
| 		resp, err := http.Get(u) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("error issuing get request: %v", err) | ||||
| 		} | ||||
|  | @ -316,7 +315,6 @@ func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, dee | |||
| 
 | ||||
| 		resp.Body.Close() | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // -------------- START LICENSED CODE --------------
 | ||||
|  |  | |||
|  | @ -8,28 +8,27 @@ | |||
| // An implementation registers its access controller by name with a constructor
 | ||||
| // which accepts an options map for configuring the access controller.
 | ||||
| //
 | ||||
| //		options := map[string]interface{}{"sillySecret": "whysosilly?"}
 | ||||
| // 		accessController, _ := auth.GetAccessController("silly", options)
 | ||||
| //	options := map[string]interface{}{"sillySecret": "whysosilly?"}
 | ||||
| //	accessController, _ := auth.GetAccessController("silly", options)
 | ||||
| //
 | ||||
| // This `accessController` can then be used in a request handler like so:
 | ||||
| //
 | ||||
| // 		func updateOrder(w http.ResponseWriter, r *http.Request) {
 | ||||
| //			orderNumber := r.FormValue("orderNumber")
 | ||||
| //			resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
 | ||||
| // 			access := auth.Access{Resource: resource, Action: "update"}
 | ||||
| //	func updateOrder(w http.ResponseWriter, r *http.Request) {
 | ||||
| //		orderNumber := r.FormValue("orderNumber")
 | ||||
| //		resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
 | ||||
| //		access := auth.Access{Resource: resource, Action: "update"}
 | ||||
| //
 | ||||
| // 			if ctx, err := accessController.Authorized(ctx, access); err != nil {
 | ||||
| //				if challenge, ok := err.(auth.Challenge) {
 | ||||
| //					// Let the challenge write the response.
 | ||||
| //					challenge.SetHeaders(r, w)
 | ||||
| //					w.WriteHeader(http.StatusUnauthorized)
 | ||||
| //					return
 | ||||
| //				} else {
 | ||||
| //					// Some other error.
 | ||||
| //				}
 | ||||
| //		if ctx, err := accessController.Authorized(ctx, access); err != nil {
 | ||||
| //			if challenge, ok := err.(auth.Challenge) {
 | ||||
| //				// Let the challenge write the response.
 | ||||
| //				challenge.SetHeaders(r, w)
 | ||||
| //				w.WriteHeader(http.StatusUnauthorized)
 | ||||
| //				return
 | ||||
| //			} else {
 | ||||
| //				// Some other error.
 | ||||
| //			}
 | ||||
| // 		}
 | ||||
| //
 | ||||
| //		}
 | ||||
| //	}
 | ||||
| package auth | ||||
| 
 | ||||
| import ( | ||||
|  |  | |||
|  | @ -128,10 +128,10 @@ func createHtpasswdFile(path string) error { | |||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { | ||||
| 	if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) | ||||
| 	f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0o600) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("failed to open htpasswd path %s", err) | ||||
| 	} | ||||
|  |  | |||
|  | @ -42,7 +42,7 @@ func TestBasicAccessController(t *testing.T) { | |||
| 
 | ||||
| 	tempFile.Close() | ||||
| 
 | ||||
| 	var userNumber = 0 | ||||
| 	userNumber := 0 | ||||
| 
 | ||||
| 	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		ctx := context.WithRequest(ctx, r) | ||||
|  | @ -76,7 +76,6 @@ func TestBasicAccessController(t *testing.T) { | |||
| 
 | ||||
| 	req, _ := http.NewRequest("GET", server.URL, nil) | ||||
| 	resp, err := client.Do(req) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("unexpected error during GET: %v", err) | ||||
| 	} | ||||
|  | @ -120,7 +119,6 @@ func TestBasicAccessController(t *testing.T) { | |||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestCreateHtpasswdFile(t *testing.T) { | ||||
|  |  | |||
|  | @ -8,7 +8,6 @@ import ( | |||
| ) | ||||
| 
 | ||||
| func TestParseHTPasswd(t *testing.T) { | ||||
| 
 | ||||
| 	for _, tc := range []struct { | ||||
| 		desc    string | ||||
| 		input   string | ||||
|  | @ -81,5 +80,4 @@ asdf | |||
| 			t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
|  |  | |||
|  | @ -70,7 +70,6 @@ func (ac *accessController) Authorized(ctx context.Context, accessRecords ...aut | |||
| 	ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, auth.UserNameKey, auth.UserKey)) | ||||
| 
 | ||||
| 	return ctx, nil | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| type challenge struct { | ||||
|  |  | |||
|  | @ -185,13 +185,15 @@ func (t *Token) Verify(verifyOpts VerifyOptions) error { | |||
| 
 | ||||
| // VerifySigningKey attempts to get the key which was used to sign this token.
 | ||||
| // The token header should contain either of these 3 fields:
 | ||||
| //      `x5c` - The x509 certificate chain for the signing key. Needs to be
 | ||||
| //              verified.
 | ||||
| //      `jwk` - The JSON Web Key representation of the signing key.
 | ||||
| //              May contain its own `x5c` field which needs to be verified.
 | ||||
| //      `kid` - The unique identifier for the key. This library interprets it
 | ||||
| //              as a libtrust fingerprint. The key itself can be looked up in
 | ||||
| //              the trustedKeys field of the given verify options.
 | ||||
| //
 | ||||
| //	`x5c` - The x509 certificate chain for the signing key. Needs to be
 | ||||
| //	        verified.
 | ||||
| //	`jwk` - The JSON Web Key representation of the signing key.
 | ||||
| //	        May contain its own `x5c` field which needs to be verified.
 | ||||
| //	`kid` - The unique identifier for the key. This library interprets it
 | ||||
| //	        as a libtrust fingerprint. The key itself can be looked up in
 | ||||
| //	        the trustedKeys field of the given verify options.
 | ||||
| //
 | ||||
| // Each of these methods are tried in that order of preference until the
 | ||||
| // signing key is found or an error is returned.
 | ||||
| func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { | ||||
|  |  | |||
|  | @ -307,10 +307,10 @@ func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err er | |||
| // TestAccessController tests complete integration of the token auth package.
 | ||||
| // It starts by mocking the options for a token auth accessController which
 | ||||
| // it creates. It then tries a few mock requests:
 | ||||
| // 		- don't supply a token; should error with challenge
 | ||||
| //		- supply an invalid token; should error with challenge
 | ||||
| // 		- supply a token with insufficient access; should error with challenge
 | ||||
| //		- supply a valid token; should not error
 | ||||
| //   - don't supply a token; should error with challenge
 | ||||
| //   - supply an invalid token; should error with challenge
 | ||||
| //   - supply a token with insufficient access; should error with challenge
 | ||||
| //   - supply a valid token; should not error
 | ||||
| func TestAccessController(t *testing.T) { | ||||
| 	// Make 2 keys; only the first is to be a trusted root key.
 | ||||
| 	rootKeys, err := makeRootKeys(2) | ||||
|  | @ -493,7 +493,7 @@ func TestNewAccessControllerPemBlock(t *testing.T) { | |||
| 	defer os.Remove(rootCertBundleFilename) | ||||
| 
 | ||||
| 	// Add something other than a certificate to the rootcertbundle
 | ||||
| 	file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0666) | ||||
| 	file, err := os.OpenFile(rootCertBundleFilename, os.O_WRONLY|os.O_APPEND, 0o666) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  |  | |||
|  | @ -38,7 +38,6 @@ func TestAuthChallengeParse(t *testing.T) { | |||
| 	if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { | ||||
| 		t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestAuthChallengeNormalization(t *testing.T) { | ||||
|  | @ -49,7 +48,6 @@ func TestAuthChallengeNormalization(t *testing.T) { | |||
| } | ||||
| 
 | ||||
| func testAuthChallengeNormalization(t *testing.T, host string) { | ||||
| 
 | ||||
| 	scm := NewSimpleManager() | ||||
| 
 | ||||
| 	url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) | ||||
|  | @ -85,7 +83,6 @@ func testAuthChallengeNormalization(t *testing.T, host string) { | |||
| } | ||||
| 
 | ||||
| func testAuthChallengeConcurrent(t *testing.T, host string) { | ||||
| 
 | ||||
| 	scm := NewSimpleManager() | ||||
| 
 | ||||
| 	url, err := url.Parse(fmt.Sprintf("http://%s/v2/", host)) | ||||
|  |  | |||
|  | @ -50,7 +50,6 @@ func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Re | |||
| func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { | ||||
| 	h := testutil.NewHandler(rrm) | ||||
| 	wrapper := &testAuthenticationWrapper{ | ||||
| 
 | ||||
| 		headers: http.Header(map[string][]string{ | ||||
| 			"X-API-Version":       {"registry/2.0"}, | ||||
| 			"X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, | ||||
|  |  | |||
|  | @ -296,7 +296,6 @@ func descriptorFromResponse(response *http.Response) (distribution.Descriptor, e | |||
| 	desc.Size = length | ||||
| 
 | ||||
| 	return desc, nil | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // Get issues a HEAD request for a Manifest against its named endpoint in order
 | ||||
|  | @ -529,7 +528,6 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis | |||
| 		} | ||||
| 		mt := resp.Header.Get("Content-Type") | ||||
| 		body, err := ioutil.ReadAll(resp.Body) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | @ -667,7 +665,6 @@ func sanitizeLocation(location, base string) (string, error) { | |||
| 
 | ||||
| func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { | ||||
| 	return bs.statter.Stat(ctx, dgst) | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { | ||||
|  |  | |||
|  | @ -319,7 +319,6 @@ func TestBlobDelete(t *testing.T) { | |||
| 	if err != nil { | ||||
| 		t.Errorf("Error deleting blob: %s", err.Error()) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestBlobFetch(t *testing.T) { | ||||
|  | @ -399,7 +398,6 @@ func TestBlobExistsNoContentLength(t *testing.T) { | |||
| 	if !strings.Contains(err.Error(), "missing content-length heade") { | ||||
| 		t.Fatalf("Expected missing content-length error message") | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestBlobExists(t *testing.T) { | ||||
|  | @ -986,7 +984,6 @@ func addTestManifestWithEtag(repo reference.Named, reference string, content []b | |||
| 				"Content-Type":   {schema1.MediaTypeSignedManifest}, | ||||
| 			}), | ||||
| 		} | ||||
| 
 | ||||
| 	} | ||||
| 	*m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) | ||||
| } | ||||
|  | @ -1535,6 +1532,7 @@ func TestObtainsManifestForTagWithoutHeaders(t *testing.T) { | |||
| 		t.Fatalf("Unexpected digest") | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestManifestTagsPaginated(t *testing.T) { | ||||
| 	s := httptest.NewServer(http.NotFoundHandler()) | ||||
| 	defer s.Close() | ||||
|  |  | |||
|  | @ -87,7 +87,8 @@ func TestCatalogAPI(t *testing.T) { | |||
| 
 | ||||
| 	values := url.Values{ | ||||
| 		"last": []string{""}, | ||||
| 		"n":    []string{strconv.Itoa(chunkLen)}} | ||||
| 		"n":    []string{strconv.Itoa(chunkLen)}, | ||||
| 	} | ||||
| 
 | ||||
| 	catalogURL, err := env.builder.BuildCatalogURL(values) | ||||
| 	if err != nil { | ||||
|  | @ -453,7 +454,6 @@ func TestBlobAPI(t *testing.T) { | |||
| 	defer env2.Shutdown() | ||||
| 	args = makeBlobArgs(t) | ||||
| 	testBlobAPI(t, env2, args) | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestBlobDelete(t *testing.T) { | ||||
|  | @ -1110,7 +1110,7 @@ const ( | |||
| 
 | ||||
| func (factory *storageManifestErrDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { | ||||
| 	// Initialize the mock driver
 | ||||
| 	var errGenericStorage = errors.New("generic storage error") | ||||
| 	errGenericStorage := errors.New("generic storage error") | ||||
| 	return &mockErrorDriver{ | ||||
| 		returnErrs: []mockErrorMapping{ | ||||
| 			{ | ||||
|  | @ -1346,7 +1346,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name | |||
| 
 | ||||
| 	for i := range unsignedManifest.FSLayers { | ||||
| 		rs, dgst, err := testutil.CreateRandomTarFile() | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("error creating random layer %d: %v", i, err) | ||||
| 		} | ||||
|  | @ -1450,7 +1449,6 @@ func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Name | |||
| 	sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
| 	// Re-push with a few different Content-Types. The official schema1
 | ||||
|  | @ -1684,7 +1682,6 @@ func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Name | |||
| 
 | ||||
| 	for i := range manifest.Layers { | ||||
| 		rs, dgst, err := testutil.CreateRandomTarFile() | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("error creating random layer %d: %v", i, err) | ||||
| 		} | ||||
|  | @ -2279,7 +2276,6 @@ func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { | |||
| 	if len(tagsResponse.Tags) != 0 { | ||||
| 		t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| type testEnv struct { | ||||
|  | @ -2308,7 +2304,6 @@ func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { | |||
| 	config.Compatibility.Schema1.Enabled = true | ||||
| 
 | ||||
| 	return newTestEnvWithConfig(t, &config) | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { | ||||
|  | @ -2334,7 +2329,6 @@ func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *te | |||
| 	app := NewApp(ctx, config) | ||||
| 	server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) | ||||
| 	builder, err := v2.NewURLBuilderFromString(server.URL+config.HTTP.Prefix, false) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("error creating url builder: %v", err) | ||||
| 	} | ||||
|  | @ -2832,7 +2826,6 @@ func TestRegistryAsCacheMutationAPIs(t *testing.T) { | |||
| 	blobURL, _ := env.builder.BuildBlobURL(ref) | ||||
| 	resp, _ = httpDelete(blobURL) | ||||
| 	checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestProxyManifestGetByTag(t *testing.T) { | ||||
|  |  | |||
|  | @ -703,7 +703,6 @@ func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { | |||
| 				return | ||||
| 			} | ||||
| 			repository, err := app.registry.Repository(context, nameRef) | ||||
| 
 | ||||
| 			if err != nil { | ||||
| 				dcontext.GetLogger(context).Errorf("error resolving repository: %v", err) | ||||
| 
 | ||||
|  | @ -983,7 +982,6 @@ func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespac | |||
| 		registry = rmw | ||||
| 	} | ||||
| 	return registry, nil | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // applyRepoMiddleware wraps a repository with the configured middlewares
 | ||||
|  |  | |||
|  | @ -120,13 +120,11 @@ func TestAppDispatcher(t *testing.T) { | |||
| 		app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) | ||||
| 		route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) | ||||
| 		u, err := route.URL(testcase.vars...) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			t.Fatal(err) | ||||
| 		} | ||||
| 
 | ||||
| 		resp, err := http.Get(u.String()) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			t.Fatal(err) | ||||
| 		} | ||||
|  | @ -275,5 +273,4 @@ func TestAppendAccessRecords(t *testing.T) { | |||
| 	if ok := reflect.DeepEqual(result, expectedResult); !ok { | ||||
| 		t.Fatalf("Actual access record differs from expected") | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
|  |  | |||
|  | @ -79,7 +79,6 @@ func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Req | |||
| 
 | ||||
| 	blobs := buh.Repository.Blobs(buh) | ||||
| 	upload, err := blobs.Create(buh, options...) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		if ebm, ok := err.(distribution.ErrBlobMounted); ok { | ||||
| 			if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { | ||||
|  | @ -219,7 +218,6 @@ func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *ht | |||
| 		// really set the mediatype. For now, we can let the backend take care
 | ||||
| 		// of this.
 | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		switch err := err.(type) { | ||||
| 		case distribution.ErrBlobInvalidDigest: | ||||
|  |  | |||
|  | @ -34,7 +34,7 @@ type catalogAPIResponse struct { | |||
| } | ||||
| 
 | ||||
| func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { | ||||
| 	var moreEntries = true | ||||
| 	moreEntries := true | ||||
| 
 | ||||
| 	q := r.URL.Query() | ||||
| 	lastEntry := q.Get("last") | ||||
|  |  | |||
|  | @ -31,7 +31,7 @@ func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { | |||
| func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error { | ||||
| 	// Get a channel that tells us if the client disconnects
 | ||||
| 	clientClosed := r.Context().Done() | ||||
| 	var body = r.Body | ||||
| 	body := r.Body | ||||
| 	if limit > 0 { | ||||
| 		body = http.MaxBytesReader(responseWriter, body, limit) | ||||
| 	} | ||||
|  |  | |||
|  | @ -479,7 +479,6 @@ func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) | |||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // DeleteManifest removes the manifest with the given digest or the tag with the given name from the registry.
 | ||||
|  |  | |||
|  | @ -12,8 +12,10 @@ import ( | |||
| // used to register the constructor for different RegistryMiddleware backends.
 | ||||
| type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) | ||||
| 
 | ||||
| var middlewares map[string]InitFunc | ||||
| var registryoptions []storage.RegistryOption | ||||
| var ( | ||||
| 	middlewares     map[string]InitFunc | ||||
| 	registryoptions []storage.RegistryOption | ||||
| ) | ||||
| 
 | ||||
| // Register is used to register an InitFunc for
 | ||||
| // a RegistryMiddleware backend with the given name.
 | ||||
|  |  | |||
|  | @ -221,6 +221,7 @@ func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { | |||
| 	te.inRemote = inRemote | ||||
| 	te.numUnique = numUnique | ||||
| } | ||||
| 
 | ||||
| func TestProxyStoreGet(t *testing.T) { | ||||
| 	te := makeTestEnv(t, "foo/bar") | ||||
| 
 | ||||
|  | @ -253,7 +254,6 @@ func TestProxyStoreGet(t *testing.T) { | |||
| 	if (*remoteStats)["get"] != 1 { | ||||
| 		t.Errorf("Unexpected remote get count") | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestProxyStoreStat(t *testing.T) { | ||||
|  | @ -284,7 +284,6 @@ func TestProxyStoreStat(t *testing.T) { | |||
| 	if te.store.authChallenger.(*mockChallenger).count != len(te.inRemote) { | ||||
| 		t.Fatalf("Unexpected auth challenge count, got %#v", te.store.authChallenger) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestProxyStoreServeHighConcurrency(t *testing.T) { | ||||
|  |  | |||
|  | @ -79,7 +79,7 @@ func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, optio | |||
| 
 | ||||
| 		pms.scheduler.AddManifest(repoBlob, repositoryTTL) | ||||
| 		// Ensure the manifest blob is cleaned up
 | ||||
| 		//pms.scheduler.AddBlob(blobRef, repositoryTTL)
 | ||||
| 		// pms.scheduler.AddBlob(blobRef, repositoryTTL)
 | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -271,5 +271,4 @@ func TestProxyManifests(t *testing.T) { | |||
| 	if env.manifests.authChallenger.(*mockChallenger).count != 2 { | ||||
| 		t.Fatalf("Expected 2 auth challenges, got %#v", env.manifests.authChallenger) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
|  |  | |||
|  | @ -70,5 +70,4 @@ func init() { | |||
| 	pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { | ||||
| 		return proxyMetrics.manifestMetrics | ||||
| 	})) | ||||
| 
 | ||||
| } | ||||
|  |  | |||
|  | @ -69,7 +69,6 @@ func TestSchedule(t *testing.T) { | |||
| 		s.Lock() | ||||
| 		s.add(ref3, 1*timeUnit, entryTypeBlob) | ||||
| 		s.Unlock() | ||||
| 
 | ||||
| 	}() | ||||
| 
 | ||||
| 	// Ensure all repos are deleted
 | ||||
|  | @ -195,7 +194,6 @@ func TestStopRestore(t *testing.T) { | |||
| 	if len(remainingRepos) != 0 { | ||||
| 		t.Fatalf("Repositories remaining: %#v", remainingRepos) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestDoubleStart(t *testing.T) { | ||||
|  |  | |||
|  | @ -73,12 +73,14 @@ var defaultCipherSuites = []uint16{ | |||
| } | ||||
| 
 | ||||
| // maps tls version strings to constants
 | ||||
| var defaultTLSVersionStr = "tls1.2" | ||||
| var tlsVersions = map[string]uint16{ | ||||
| 	// user specified values
 | ||||
| 	"tls1.2": tls.VersionTLS12, | ||||
| 	"tls1.3": tls.VersionTLS13, | ||||
| } | ||||
| var ( | ||||
| 	defaultTLSVersionStr = "tls1.2" | ||||
| 	tlsVersions          = map[string]uint16{ | ||||
| 		// user specified values
 | ||||
| 		"tls1.2": tls.VersionTLS12, | ||||
| 		"tls1.3": tls.VersionTLS13, | ||||
| 	} | ||||
| ) | ||||
| 
 | ||||
| // this channel gets notified when process receives signal. It is global to ease unit testing
 | ||||
| var quit = make(chan os.Signal, 1) | ||||
|  | @ -89,7 +91,6 @@ var ServeCmd = &cobra.Command{ | |||
| 	Short: "`serve` stores and distributes Docker images", | ||||
| 	Long:  "`serve` stores and distributes Docker images.", | ||||
| 	Run: func(cmd *cobra.Command, args []string) { | ||||
| 
 | ||||
| 		// setup context
 | ||||
| 		ctx := dcontext.WithVersion(dcontext.Background(), version.Version) | ||||
| 
 | ||||
|  |  | |||
|  | @ -152,7 +152,7 @@ func TestGetCipherSuite(t *testing.T) { | |||
| 		t.Error("did not return expected error about unknown cipher suite") | ||||
| 	} | ||||
| 
 | ||||
| 	var insecureCipherSuites = []string{ | ||||
| 	insecureCipherSuites := []string{ | ||||
| 		"TLS_RSA_WITH_RC4_128_SHA", | ||||
| 		"TLS_RSA_WITH_AES_128_CBC_SHA256", | ||||
| 		"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", | ||||
|  | @ -234,7 +234,7 @@ func buildRegistryTLSConfig(name, keyType string, cipherSuites []string) (*regis | |||
| 	} | ||||
| 
 | ||||
| 	keyPath := path.Join(os.TempDir(), name+".key") | ||||
| 	keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) | ||||
| 	keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("failed to open %s for writing: %v", keyPath, err) | ||||
| 	} | ||||
|  |  | |||
|  | @ -36,8 +36,10 @@ var RootCmd = &cobra.Command{ | |||
| 	}, | ||||
| } | ||||
| 
 | ||||
| var dryRun bool | ||||
| var removeUntagged bool | ||||
| var ( | ||||
| 	dryRun         bool | ||||
| 	removeUntagged bool | ||||
| ) | ||||
| 
 | ||||
| // GCCmd is the cobra command that corresponds to the garbage-collect subcommand
 | ||||
| var GCCmd = &cobra.Command{ | ||||
|  |  | |||
|  | @ -36,7 +36,6 @@ func TestWriteSeek(t *testing.T) { | |||
| 	bs := repository.Blobs(ctx) | ||||
| 
 | ||||
| 	blobUpload, err := bs.Create(ctx) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("unexpected error starting layer upload: %s", err) | ||||
| 	} | ||||
|  | @ -47,7 +46,6 @@ func TestWriteSeek(t *testing.T) { | |||
| 	if offset != int64(len(contents)) { | ||||
| 		t.Fatalf("unexpected value for blobUpload offset:  %v != %v", offset, len(contents)) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // TestSimpleBlobUpload covers the blob upload process, exercising common
 | ||||
|  | @ -75,7 +73,6 @@ func TestSimpleBlobUpload(t *testing.T) { | |||
| 	rd := io.TeeReader(randomDataReader, h) | ||||
| 
 | ||||
| 	blobUpload, err := bs.Create(ctx) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("unexpected error starting layer upload: %s", err) | ||||
| 	} | ||||
|  | @ -385,7 +382,6 @@ func TestBlobMount(t *testing.T) { | |||
| 	sbs := sourceRepository.Blobs(ctx) | ||||
| 
 | ||||
| 	blobUpload, err := sbs.Create(ctx) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("unexpected error starting layer upload: %s", err) | ||||
| 	} | ||||
|  |  | |||
|  | @ -121,7 +121,6 @@ func (bs *blobStore) path(dgst digest.Digest) (string, error) { | |||
| 	bp, err := pathFor(blobDataPathSpec{ | ||||
| 		digest: dgst, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | @ -165,7 +164,6 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi | |||
| 	path, err := pathFor(blobDataPathSpec{ | ||||
| 		digest: dgst, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return distribution.Descriptor{}, err | ||||
| 	} | ||||
|  |  | |||
|  | @ -15,9 +15,7 @@ import ( | |||
| 	"github.com/sirupsen/logrus" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	errResumableDigestNotAvailable = errors.New("resumable digest not available") | ||||
| ) | ||||
| var errResumableDigestNotAvailable = errors.New("resumable digest not available") | ||||
| 
 | ||||
| const ( | ||||
| 	// digestSha256Empty is the canonical sha256 digest of empty data
 | ||||
|  | @ -296,7 +294,6 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor | |||
| 	blobPath, err := pathFor(blobDataPathSpec{ | ||||
| 		digest: desc.Digest, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | @ -355,7 +352,6 @@ func (bw *blobWriter) removeResources(ctx context.Context) error { | |||
| 		name: bw.blobStore.repository.Named().Name(), | ||||
| 		id:   bw.id, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  |  | |||
|  | @ -85,7 +85,6 @@ func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry | |||
| 		alg:  bw.digester.Digest().Algorithm(), | ||||
| 		list: true, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -136,7 +135,6 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { | |||
| 		alg:    bw.digester.Digest().Algorithm(), | ||||
| 		offset: bw.written, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  |  | |||
|  | @ -39,14 +39,16 @@ func checkBlobDescriptorCacheEmptyRepository(ctx context.Context, t *testing.T, | |||
| 	if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ | ||||
| 		Digest:    "sha384:abc", | ||||
| 		Size:      10, | ||||
| 		MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { | ||||
| 		MediaType: "application/octet-stream", | ||||
| 	}); err != digest.ErrDigestInvalidFormat { | ||||
| 		t.Fatalf("expected error with invalid digest: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ | ||||
| 		Digest:    "", | ||||
| 		Size:      10, | ||||
| 		MediaType: "application/octet-stream"}); err == nil { | ||||
| 		MediaType: "application/octet-stream", | ||||
| 	}); err == nil { | ||||
| 		t.Fatalf("expected error setting value on invalid descriptor") | ||||
| 	} | ||||
| 
 | ||||
|  | @ -68,7 +70,8 @@ func checkBlobDescriptorCacheSetAndRead(ctx context.Context, t *testing.T, provi | |||
| 	expected := distribution.Descriptor{ | ||||
| 		Digest:    "sha256:abc1111111111111111111111111111111111111111111111111111111111111", | ||||
| 		Size:      10, | ||||
| 		MediaType: "application/octet-stream"} | ||||
| 		MediaType: "application/octet-stream", | ||||
| 	} | ||||
| 
 | ||||
| 	cache, err := provider.RepositoryScoped("foo/bar") | ||||
| 	if err != nil { | ||||
|  | @ -152,7 +155,8 @@ func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider c | |||
| 	expected := distribution.Descriptor{ | ||||
| 		Digest:    "sha256:def1111111111111111111111111111111111111111111111111111111111111", | ||||
| 		Size:      10, | ||||
| 		MediaType: "application/octet-stream"} | ||||
| 		MediaType: "application/octet-stream", | ||||
| 	} | ||||
| 
 | ||||
| 	cache, err := provider.RepositoryScoped("foo/bar") | ||||
| 	if err != nil { | ||||
|  |  | |||
|  | @ -14,10 +14,8 @@ type cachedBlobStatter struct { | |||
| 	backend distribution.BlobDescriptorService | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	// cacheCount is the number of total cache request received/hits/misses
 | ||||
| 	cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") | ||||
| ) | ||||
| // cacheCount is the number of total cache request received/hits/misses
 | ||||
| var cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") | ||||
| 
 | ||||
| // NewCachedBlobStatter creates a new statter which prefers a cache and
 | ||||
| // falls back to a backend.
 | ||||
|  |  | |||
|  | @ -102,7 +102,6 @@ func makeRepo(ctx context.Context, t *testing.T, name string, reg distribution.N | |||
| 	if err != nil { | ||||
| 		t.Fatalf("manifest upload failed: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestCatalog(t *testing.T) { | ||||
|  | @ -289,8 +288,10 @@ func BenchmarkPathCompareNativeEqual(B *testing.B) { | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") | ||||
| var separatorChars = []byte("._-") | ||||
| var ( | ||||
| 	filenameChars  = []byte("abcdefghijklmnopqrstuvwxyz0123456789") | ||||
| 	separatorChars = []byte("._-") | ||||
| ) | ||||
| 
 | ||||
| func randomPath(length int64) string { | ||||
| 	path := "/" | ||||
|  |  | |||
|  | @ -93,7 +93,8 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) { | |||
| 
 | ||||
| 	d := &driver{ | ||||
| 		client:    blobClient, | ||||
| 		container: container} | ||||
| 		container: container, | ||||
| 	} | ||||
| 	return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil | ||||
| } | ||||
| 
 | ||||
|  | @ -412,7 +413,6 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) { | |||
| 			Marker: marker, | ||||
| 			Prefix: virtPath, | ||||
| 		}) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			return out, err | ||||
| 		} | ||||
|  |  | |||
|  | @ -6,14 +6,14 @@ | |||
| // struct such that calls are proxied through this implementation. First,
 | ||||
| // declare the internal driver, as follows:
 | ||||
| //
 | ||||
| // 	type driver struct { ... internal ...}
 | ||||
| //	type driver struct { ... internal ...}
 | ||||
| //
 | ||||
| // The resulting type should implement StorageDriver such that it can be the
 | ||||
| // target of a Base struct. The exported type can then be declared as follows:
 | ||||
| //
 | ||||
| // 	type Driver struct {
 | ||||
| // 		Base
 | ||||
| // 	}
 | ||||
| //	type Driver struct {
 | ||||
| //		Base
 | ||||
| //	}
 | ||||
| //
 | ||||
| // Because Driver embeds Base, it effectively implements Base. If the driver
 | ||||
| // needs to intercept a call, before going to base, Driver should implement
 | ||||
|  | @ -23,15 +23,15 @@ | |||
| // To further shield the embed from other packages, it is recommended to
 | ||||
| // employ a private embed struct:
 | ||||
| //
 | ||||
| // 	type baseEmbed struct {
 | ||||
| // 		base.Base
 | ||||
| // 	}
 | ||||
| //	type baseEmbed struct {
 | ||||
| //		base.Base
 | ||||
| //	}
 | ||||
| //
 | ||||
| // Then, declare driver to embed baseEmbed, rather than Base directly:
 | ||||
| //
 | ||||
| // 	type Driver struct {
 | ||||
| // 		baseEmbed
 | ||||
| // 	}
 | ||||
| //	type Driver struct {
 | ||||
| //		baseEmbed
 | ||||
| //	}
 | ||||
| //
 | ||||
| // The type now implements StorageDriver, proxying through Base, without
 | ||||
| // exporting an unnecessary field.
 | ||||
|  | @ -48,10 +48,8 @@ import ( | |||
| 	"github.com/docker/go-metrics" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// storageAction is the metrics of blob related operations
 | ||||
| 	storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action") | ||||
| ) | ||||
| // storageAction is the metrics of blob related operations
 | ||||
| var storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action") | ||||
| 
 | ||||
| func init() { | ||||
| 	metrics.Register(prometheus.StorageNamespace) | ||||
|  |  | |||
|  | @ -145,7 +145,7 @@ func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileIn | |||
| } | ||||
| 
 | ||||
| // List returns a list of the objects that are direct descendants of the
 | ||||
| //given path.
 | ||||
| // given path.
 | ||||
| func (r *regulator) List(ctx context.Context, path string) ([]string, error) { | ||||
| 	r.enter() | ||||
| 	defer r.exit() | ||||
|  |  | |||
|  | @ -52,8 +52,10 @@ type FileInfoInternal struct { | |||
| 	FileInfoFields | ||||
| } | ||||
| 
 | ||||
| var _ FileInfo = FileInfoInternal{} | ||||
| var _ FileInfo = &FileInfoInternal{} | ||||
| var ( | ||||
| 	_ FileInfo = FileInfoInternal{} | ||||
| 	_ FileInfo = &FileInfoInternal{} | ||||
| ) | ||||
| 
 | ||||
| // Path provides the full path of the target of this file info.
 | ||||
| func (fi FileInfoInternal) Path() string { | ||||
|  |  | |||
|  | @ -149,7 +149,7 @@ func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte | |||
| // Reader retrieves an io.ReadCloser for the content stored at "path" with a
 | ||||
| // given byte offset.
 | ||||
| func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { | ||||
| 	file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) | ||||
| 	file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0o644) | ||||
| 	if err != nil { | ||||
| 		if os.IsNotExist(err) { | ||||
| 			return nil, storagedriver.PathNotFoundError{Path: path} | ||||
|  | @ -173,11 +173,11 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read | |||
| func (d *driver) Writer(ctx context.Context, subPath string, append bool) (storagedriver.FileWriter, error) { | ||||
| 	fullPath := d.fullPath(subPath) | ||||
| 	parentDir := path.Dir(fullPath) | ||||
| 	if err := os.MkdirAll(parentDir, 0777); err != nil { | ||||
| 	if err := os.MkdirAll(parentDir, 0o777); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) | ||||
| 	fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0o666) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -260,7 +260,7 @@ func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) e | |||
| 		return storagedriver.PathNotFoundError{Path: sourcePath} | ||||
| 	} | ||||
| 
 | ||||
| 	if err := os.MkdirAll(path.Dir(dest), 0777); err != nil { | ||||
| 	if err := os.MkdirAll(path.Dir(dest), 0o777); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -34,7 +34,6 @@ func init() { | |||
| } | ||||
| 
 | ||||
| func TestFromParametersImpl(t *testing.T) { | ||||
| 
 | ||||
| 	tests := []struct { | ||||
| 		params   map[string]interface{} // technically the yaml can contain anything
 | ||||
| 		expected DriverParameters | ||||
|  | @ -109,5 +108,4 @@ func TestFromParametersImpl(t *testing.T) { | |||
| 			t.Fatalf("unexpected params from filesystem driver. expected %+v, got %+v", item.expected, params) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
|  |  | |||
|  | @ -2,7 +2,7 @@ | |||
| // store blobs in Google cloud storage.
 | ||||
| //
 | ||||
| // This package leverages the google.golang.org/cloud/storage client library
 | ||||
| //for interfacing with gcs.
 | ||||
| // for interfacing with gcs.
 | ||||
| //
 | ||||
| // Because gcs is a key, value store the Stat call does not support last modification
 | ||||
| // time for directories (directories are an abstraction for key, value stores)
 | ||||
|  | @ -445,7 +445,6 @@ func putContentsClose(wc *storage.Writer, contents []byte) error { | |||
| // available for future calls to StorageDriver.GetContent and
 | ||||
| // StorageDriver.Reader.
 | ||||
| func (w *writer) Commit() error { | ||||
| 
 | ||||
| 	if err := w.checkClosed(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | @ -597,7 +596,7 @@ func retry(req request) error { | |||
| // size in bytes and the creation time.
 | ||||
| func (d *driver) Stat(context context.Context, path string) (storagedriver.FileInfo, error) { | ||||
| 	var fi storagedriver.FileInfoFields | ||||
| 	//try to get as file
 | ||||
| 	// try to get as file
 | ||||
| 	gcsContext := d.context(context) | ||||
| 	obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) | ||||
| 	if err == nil { | ||||
|  | @ -612,7 +611,7 @@ func (d *driver) Stat(context context.Context, path string) (storagedriver.FileI | |||
| 		} | ||||
| 		return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil | ||||
| 	} | ||||
| 	//try to get as folder
 | ||||
| 	// try to get as folder
 | ||||
| 	dirpath := d.pathToDirKey(path) | ||||
| 
 | ||||
| 	var query *storage.Query | ||||
|  | @ -640,7 +639,7 @@ func (d *driver) Stat(context context.Context, path string) (storagedriver.FileI | |||
| } | ||||
| 
 | ||||
| // List returns a list of the objects that are direct descendants of the
 | ||||
| //given path.
 | ||||
| // given path.
 | ||||
| func (d *driver) List(context context.Context, path string) ([]string, error) { | ||||
| 	var query *storage.Query | ||||
| 	query = &storage.Query{} | ||||
|  |  | |||
|  | @ -22,8 +22,10 @@ import ( | |||
| // Hook up gocheck into the "go test" runner.
 | ||||
| func Test(t *testing.T) { check.TestingT(t) } | ||||
| 
 | ||||
| var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) | ||||
| var skipGCS func() string | ||||
| var ( | ||||
| 	gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) | ||||
| 	skipGCS              func() string | ||||
| ) | ||||
| 
 | ||||
| func init() { | ||||
| 	bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") | ||||
|  |  | |||
|  | @ -190,7 +190,6 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) { | |||
| 	} | ||||
| 
 | ||||
| 	entries, err := found.(*dir).list(normalized) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		switch err { | ||||
| 		case errNotExists: | ||||
|  |  | |||
|  | @ -163,7 +163,6 @@ func (d *dir) mkdirs(p string) (*dir, error) { | |||
| 	components := strings.Split(relative, "/") | ||||
| 	for _, component := range components { | ||||
| 		d, err := dd.mkdir(component) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			// This should actually never happen, since there are no children.
 | ||||
| 			return nil, err | ||||
|  |  | |||
|  | @ -98,7 +98,6 @@ func newAliCDNStorageMiddleware(storageDriver storagedriver.StorageDriver, optio | |||
| 
 | ||||
| // URLFor attempts to find a url which may be used to retrieve the file at the given path.
 | ||||
| func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { | ||||
| 
 | ||||
| 	if ac.StorageDriver.Name() != "oss" { | ||||
| 		dcontext.GetLogger(ctx).Warn("the AliCDN middleware does not support this backend storage driver") | ||||
| 		return ac.StorageDriver.URLFor(ctx, path, options) | ||||
|  | @ -112,5 +111,5 @@ func (ac *aliCDNStorageMiddleware) URLFor(ctx context.Context, path string, opti | |||
| 
 | ||||
| // init registers the alicdn layerHandler backend.
 | ||||
| func init() { | ||||
| 	storagemiddleware.Register("alicdn", storagemiddleware.InitFunc(newAliCDNStorageMiddleware)) | ||||
| 	storagemiddleware.Register("alicdn", newAliCDNStorageMiddleware) | ||||
| } | ||||
|  |  | |||
|  | @ -1,6 +1,5 @@ | |||
| // Package middleware - cloudfront wrapper for storage libs
 | ||||
| // N.B. currently only works with S3, not arbitrary sites
 | ||||
| //
 | ||||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
|  | @ -34,12 +33,21 @@ var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} | |||
| 
 | ||||
| // newCloudFrontLayerHandler constructs and returns a new CloudFront
 | ||||
| // LayerHandler implementation.
 | ||||
| // Required options: baseurl, privatekey, keypairid
 | ||||
| 
 | ||||
| // Optional options: ipFilteredBy, awsregion
 | ||||
| // ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP, default value. "aws", only aws IP goes
 | ||||
| //               to S3 directly. "awsregion", only regions listed in awsregion options goes to S3 directly
 | ||||
| // awsregion: a comma separated string of AWS regions.
 | ||||
| //
 | ||||
| // Required options:
 | ||||
| //
 | ||||
| //   - baseurl
 | ||||
| //   - privatekey
 | ||||
| //   - keypairid
 | ||||
| //
 | ||||
| // Optional options:
 | ||||
| //
 | ||||
| //   - ipFilteredBy
 | ||||
| //   - awsregion
 | ||||
| //   - ipfilteredby: valid value "none|aws|awsregion". "none", do not filter any IP,
 | ||||
| //     default value. "aws", only aws IP goes to S3 directly. "awsregion", only
 | ||||
| //     regions listed in awsregion options goes to S3 directly
 | ||||
| //   - awsregion: a comma separated string of AWS regions.
 | ||||
| func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { | ||||
| 	// parse baseurl
 | ||||
| 	base, ok := options["baseurl"] | ||||
|  | @ -211,5 +219,5 @@ func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, | |||
| 
 | ||||
| // init registers the cloudfront layerHandler backend.
 | ||||
| func init() { | ||||
| 	storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) | ||||
| 	storagemiddleware.Register("cloudfront", newCloudFrontStorageMiddleware) | ||||
| } | ||||
|  |  | |||
|  | @ -21,11 +21,10 @@ func (s *MiddlewareSuite) TestNoConfig(c *check.C) { | |||
| } | ||||
| 
 | ||||
| func TestCloudFrontStorageMiddlewareGenerateKey(t *testing.T) { | ||||
| 
 | ||||
| 	options := make(map[string]interface{}) | ||||
| 	options["baseurl"] = "example.com" | ||||
| 
 | ||||
| 	var privk = `-----BEGIN RSA PRIVATE KEY----- | ||||
| 	privk := `-----BEGIN RSA PRIVATE KEY----- | ||||
| MIICXQIBAAKBgQCy0ZZsItDuYoX3y6hWqyU9YdH/0B+tlOhvjlaJqvkmAIBBatVV | ||||
| VAShnEAEircBwV3i08439WYgjXnrZ0FjXBTjTKWwCsbpuWJY1w8hqHW3VDivUo1n | ||||
| F9WTeclVJuEMhmiAhek3dhUdATaEDqBNskXMofSgKmQHqhPdXCgDmnzKoQIDAQAB | ||||
|  |  | |||
|  | @ -113,7 +113,6 @@ func (s *awsIPs) tryUpdate() error { | |||
| 		if regionAllowed { | ||||
| 			*output = append(*output, *network) | ||||
| 		} | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
| 	for _, prefix := range response.Prefixes { | ||||
|  |  | |||
|  | @ -35,7 +35,6 @@ func (m mockIPRangeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { | |||
| 		return | ||||
| 	} | ||||
| 	w.Write(bytes) | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func newTestHandler(data awsIPResponse) *httptest.Server { | ||||
|  | @ -68,7 +67,6 @@ func TestS3TryUpdate(t *testing.T) { | |||
| 
 | ||||
| 	assertEqual(t, 1, len(ips.ipv4)) | ||||
| 	assertEqual(t, 0, len(ips.ipv6)) | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestMatchIPV6(t *testing.T) { | ||||
|  | @ -215,7 +213,7 @@ func TestInvalidNetworkType(t *testing.T) { | |||
| } | ||||
| 
 | ||||
| func TestParsing(t *testing.T) { | ||||
| 	var data = `{ | ||||
| 	data := `{ | ||||
|       "prefixes": [{ | ||||
|         "ip_prefix": "192.168.0.0", | ||||
|         "region": "someregion", | ||||
|  |  | |||
|  | @ -46,5 +46,5 @@ func (r *redirectStorageMiddleware) URLFor(ctx context.Context, path string, opt | |||
| } | ||||
| 
 | ||||
| func init() { | ||||
| 	storagemiddleware.Register("redirect", storagemiddleware.InitFunc(newRedirectStorageMiddleware)) | ||||
| 	storagemiddleware.Register("redirect", newRedirectStorageMiddleware) | ||||
| } | ||||
|  |  | |||
|  | @ -37,13 +37,15 @@ const driverName = "oss" | |||
| // OSS API requires multipart upload chunks to be at least 5MB
 | ||||
| const minChunkSize = 5 << 20 | ||||
| 
 | ||||
| const defaultChunkSize = 2 * minChunkSize | ||||
| const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk
 | ||||
| const ( | ||||
| 	defaultChunkSize = 2 * minChunkSize | ||||
| 	defaultTimeout   = 2 * time.Minute // 2 minute timeout per chunk
 | ||||
| ) | ||||
| 
 | ||||
| // listMax is the largest amount of objects you can request from OSS in a list call
 | ||||
| const listMax = 1000 | ||||
| 
 | ||||
| //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
 | ||||
| // DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
 | ||||
| type DriverParameters struct { | ||||
| 	AccessKeyID     string | ||||
| 	AccessKeySecret string | ||||
|  | @ -202,7 +204,6 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { | |||
| // New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and
 | ||||
| // bucketName
 | ||||
| func New(params DriverParameters) (*Driver, error) { | ||||
| 
 | ||||
| 	client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) | ||||
| 	client.SetEndpoint(params.Endpoint) | ||||
| 	bucket := client.Bucket(params.Bucket) | ||||
|  |  | |||
|  | @ -24,15 +24,18 @@ var ossDriverConstructor func(rootDirectory string) (*Driver, error) | |||
| var skipCheck func() string | ||||
| 
 | ||||
| func init() { | ||||
| 	accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") | ||||
| 	secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") | ||||
| 	bucket := os.Getenv("OSS_BUCKET") | ||||
| 	region := os.Getenv("OSS_REGION") | ||||
| 	internal := os.Getenv("OSS_INTERNAL") | ||||
| 	encrypt := os.Getenv("OSS_ENCRYPT") | ||||
| 	secure := os.Getenv("OSS_SECURE") | ||||
| 	endpoint := os.Getenv("OSS_ENDPOINT") | ||||
| 	encryptionKeyID := os.Getenv("OSS_ENCRYPTIONKEYID") | ||||
| 	var ( | ||||
| 		accessKey       = os.Getenv("ALIYUN_ACCESS_KEY_ID") | ||||
| 		secretKey       = os.Getenv("ALIYUN_ACCESS_KEY_SECRET") | ||||
| 		bucket          = os.Getenv("OSS_BUCKET") | ||||
| 		region          = os.Getenv("OSS_REGION") | ||||
| 		internal        = os.Getenv("OSS_INTERNAL") | ||||
| 		encrypt         = os.Getenv("OSS_ENCRYPT") | ||||
| 		secure          = os.Getenv("OSS_SECURE") | ||||
| 		endpoint        = os.Getenv("OSS_ENDPOINT") | ||||
| 		encryptionKeyID = os.Getenv("OSS_ENCRYPTIONKEYID") | ||||
| 	) | ||||
| 
 | ||||
| 	root, err := ioutil.TempDir("", "driver-") | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
|  |  | |||
|  | @ -93,7 +93,7 @@ var validRegions = map[string]struct{}{} | |||
| // validObjectACLs contains known s3 object Acls
 | ||||
| var validObjectACLs = map[string]struct{}{} | ||||
| 
 | ||||
| //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
 | ||||
| // DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
 | ||||
| type DriverParameters struct { | ||||
| 	AccessKey                   string | ||||
| 	SecretKey                   string | ||||
|  | @ -632,7 +632,6 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read | |||
| 		Key:    aws.String(d.s3Path(path)), | ||||
| 		Range:  aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { | ||||
| 			return ioutil.NopCloser(bytes.NewReader(nil)), nil | ||||
|  | @ -1166,16 +1165,22 @@ func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, path, pre | |||
| // directoryDiff finds all directories that are not in common between
 | ||||
| // the previous and current paths in sorted order.
 | ||||
| //
 | ||||
| // Eg 1 directoryDiff("/path/to/folder", "/path/to/folder/folder/file")
 | ||||
| //   => [ "/path/to/folder/folder" ],
 | ||||
| // Eg 2 directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file")
 | ||||
| //   => [ "/path/to/folder/folder2" ]
 | ||||
| // Eg 3 directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file")
 | ||||
| //  => [ "/path/to/folder/folder2" ]
 | ||||
| // Eg 4 directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file")
 | ||||
| //   => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ]
 | ||||
| // Eg 5 directoryDiff("/", "/path/to/folder/folder/file")
 | ||||
| //   => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ],
 | ||||
| // # Examples
 | ||||
| //
 | ||||
| //	directoryDiff("/path/to/folder", "/path/to/folder/folder/file")
 | ||||
| //	// => [ "/path/to/folder/folder" ]
 | ||||
| //
 | ||||
| //	directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file")
 | ||||
| //	// => [ "/path/to/folder/folder2" ]
 | ||||
| //
 | ||||
| //	directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file")
 | ||||
| //	// => [ "/path/to/folder/folder2" ]
 | ||||
| //
 | ||||
| //	directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file")
 | ||||
| //	// => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ]
 | ||||
| //
 | ||||
| //	directoryDiff("/", "/path/to/folder/folder/file")
 | ||||
| //	// => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ]
 | ||||
| func directoryDiff(prev, current string) []string { | ||||
| 	var paths []string | ||||
| 
 | ||||
|  |  | |||
|  | @ -27,27 +27,32 @@ import ( | |||
| // Hook up gocheck into the "go test" runner.
 | ||||
| func Test(t *testing.T) { check.TestingT(t) } | ||||
| 
 | ||||
| var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) | ||||
| var skipS3 func() string | ||||
| var ( | ||||
| 	s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) | ||||
| 	skipS3              func() string | ||||
| ) | ||||
| 
 | ||||
| func init() { | ||||
| 	accessKey := os.Getenv("AWS_ACCESS_KEY") | ||||
| 	secretKey := os.Getenv("AWS_SECRET_KEY") | ||||
| 	bucket := os.Getenv("S3_BUCKET") | ||||
| 	encrypt := os.Getenv("S3_ENCRYPT") | ||||
| 	keyID := os.Getenv("S3_KEY_ID") | ||||
| 	secure := os.Getenv("S3_SECURE") | ||||
| 	skipVerify := os.Getenv("S3_SKIP_VERIFY") | ||||
| 	v4Auth := os.Getenv("S3_V4_AUTH") | ||||
| 	region := os.Getenv("AWS_REGION") | ||||
| 	objectACL := os.Getenv("S3_OBJECT_ACL") | ||||
| 	var ( | ||||
| 		accessKey        = os.Getenv("AWS_ACCESS_KEY") | ||||
| 		secretKey        = os.Getenv("AWS_SECRET_KEY") | ||||
| 		bucket           = os.Getenv("S3_BUCKET") | ||||
| 		encrypt          = os.Getenv("S3_ENCRYPT") | ||||
| 		keyID            = os.Getenv("S3_KEY_ID") | ||||
| 		secure           = os.Getenv("S3_SECURE") | ||||
| 		skipVerify       = os.Getenv("S3_SKIP_VERIFY") | ||||
| 		v4Auth           = os.Getenv("S3_V4_AUTH") | ||||
| 		region           = os.Getenv("AWS_REGION") | ||||
| 		objectACL        = os.Getenv("S3_OBJECT_ACL") | ||||
| 		regionEndpoint   = os.Getenv("REGION_ENDPOINT") | ||||
| 		forcePathStyle   = os.Getenv("AWS_S3_FORCE_PATH_STYLE") | ||||
| 		sessionToken     = os.Getenv("AWS_SESSION_TOKEN") | ||||
| 		useDualStack     = os.Getenv("S3_USE_DUALSTACK") | ||||
| 		combineSmallPart = os.Getenv("MULTIPART_COMBINE_SMALL_PART") | ||||
| 		accelerate       = os.Getenv("S3_ACCELERATE") | ||||
| 	) | ||||
| 
 | ||||
| 	root, err := ioutil.TempDir("", "driver-") | ||||
| 	regionEndpoint := os.Getenv("REGION_ENDPOINT") | ||||
| 	forcePathStyle := os.Getenv("AWS_S3_FORCE_PATH_STYLE") | ||||
| 	sessionToken := os.Getenv("AWS_SESSION_TOKEN") | ||||
| 	useDualStack := os.Getenv("S3_USE_DUALSTACK") | ||||
| 	combineSmallPart := os.Getenv("MULTIPART_COMBINE_SMALL_PART") | ||||
| 	accelerate := os.Getenv("S3_ACCELERATE") | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
|  | @ -343,7 +348,7 @@ func TestDelete(t *testing.T) { | |||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	var objs = []string{ | ||||
| 	objs := []string{ | ||||
| 		"/file1", | ||||
| 		"/file1-2", | ||||
| 		"/file1/2", | ||||
|  | @ -411,7 +416,7 @@ func TestDelete(t *testing.T) { | |||
| 	} | ||||
| 
 | ||||
| 	// objects to skip auto-created test case
 | ||||
| 	var skipCase = map[string]bool{ | ||||
| 	skipCase := map[string]bool{ | ||||
| 		// special case where deleting "/file1" also deletes "/file1/2" is tested explicitly
 | ||||
| 		"/file1": true, | ||||
| 	} | ||||
|  | @ -536,7 +541,7 @@ func TestWalk(t *testing.T) { | |||
| 		t.Fatalf("unexpected error creating driver with standard storage: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	var fileset = []string{ | ||||
| 	fileset := []string{ | ||||
| 		"/file1", | ||||
| 		"/folder1/file1", | ||||
| 		"/folder2/file1", | ||||
|  |  | |||
|  | @ -66,7 +66,7 @@ type StorageDriver interface { | |||
| 	Stat(ctx context.Context, path string) (FileInfo, error) | ||||
| 
 | ||||
| 	// List returns a list of the objects that are direct descendants of the
 | ||||
| 	//given path.
 | ||||
| 	// given path.
 | ||||
| 	List(ctx context.Context, path string) ([]string, error) | ||||
| 
 | ||||
| 	// Move moves an object stored at sourcePath to destPath, removing the
 | ||||
|  |  | |||
|  | @ -341,8 +341,8 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read | |||
| 			return file, err | ||||
| 		} | ||||
| 
 | ||||
| 		//if this is a DLO and it is clear that segments are still missing,
 | ||||
| 		//wait until they show up
 | ||||
| 		// if this is a DLO and it is clear that segments are still missing,
 | ||||
| 		// wait until they show up
 | ||||
| 		_, isDLO := headers["X-Object-Manifest"] | ||||
| 		size, err := file.Length() | ||||
| 		if err != nil { | ||||
|  | @ -357,7 +357,7 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read | |||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		//if not, then this reader will be fine
 | ||||
| 		// if not, then this reader will be fine
 | ||||
| 		return file, nil | ||||
| 	} | ||||
| } | ||||
|  | @ -436,9 +436,9 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	//Don't trust an empty `objects` slice. A container listing can be
 | ||||
| 	//outdated. For files, we can make a HEAD request on the object which
 | ||||
| 	//reports existence (at least) much more reliably.
 | ||||
| 	// Don't trust an empty `objects` slice. A container listing can be
 | ||||
| 	// outdated. For files, we can make a HEAD request on the object which
 | ||||
| 	// reports existence (at least) much more reliably.
 | ||||
| 	waitingTime := readAfterWriteWait | ||||
| 	endTime := time.Now().Add(readAfterWriteTimeout) | ||||
| 
 | ||||
|  | @ -451,8 +451,8 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, | |||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		//if this is a DLO and it is clear that segments are still missing,
 | ||||
| 		//wait until they show up
 | ||||
| 		// if this is a DLO and it is clear that segments are still missing,
 | ||||
| 		// wait until they show up
 | ||||
| 		_, isDLO := headers["X-Object-Manifest"] | ||||
| 		if isDLO && info.Bytes == 0 { | ||||
| 			if time.Now().Add(waitingTime).After(endTime) { | ||||
|  | @ -463,7 +463,7 @@ func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, | |||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		//otherwise, accept the result
 | ||||
| 		// otherwise, accept the result
 | ||||
| 		fi.IsDir = false | ||||
| 		fi.Size = info.Bytes | ||||
| 		fi.ModTime = info.LastModified | ||||
|  | @ -681,7 +681,7 @@ func (d *driver) swiftSegmentPath(path string) (string, error) { | |||
| } | ||||
| 
 | ||||
| func (d *driver) getAllSegments(path string) ([]swift.Object, error) { | ||||
| 	//a simple container listing works 99.9% of the time
 | ||||
| 	// a simple container listing works 99.9% of the time
 | ||||
| 	segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) | ||||
| 	if err != nil { | ||||
| 		if err == swift.ContainerNotFound { | ||||
|  | @ -690,15 +690,15 @@ func (d *driver) getAllSegments(path string) ([]swift.Object, error) { | |||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	//build a lookup table by object name
 | ||||
| 	// build a lookup table by object name
 | ||||
| 	hasObjectName := make(map[string]struct{}) | ||||
| 	for _, segment := range segments { | ||||
| 		hasObjectName[segment.Name] = struct{}{} | ||||
| 	} | ||||
| 
 | ||||
| 	//The container listing might be outdated (i.e. not contain all existing
 | ||||
| 	//segment objects yet) because of temporary inconsistency (Swift is only
 | ||||
| 	//eventually consistent!). Check its completeness.
 | ||||
| 	// The container listing might be outdated (i.e. not contain all existing
 | ||||
| 	// segment objects yet) because of temporary inconsistency (Swift is only
 | ||||
| 	// eventually consistent!). Check its completeness.
 | ||||
| 	segmentNumber := 0 | ||||
| 	for { | ||||
| 		segmentNumber++ | ||||
|  | @ -708,23 +708,23 @@ func (d *driver) getAllSegments(path string) ([]swift.Object, error) { | |||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		//This segment is missing in the container listing. Use a more reliable
 | ||||
| 		//request to check its existence. (HEAD requests on segments are
 | ||||
| 		//guaranteed to return the correct metadata, except for the pathological
 | ||||
| 		//case of an outage of large parts of the Swift cluster or its network,
 | ||||
| 		//since every segment is only written once.)
 | ||||
| 		// This segment is missing in the container listing. Use a more reliable
 | ||||
| 		// request to check its existence. (HEAD requests on segments are
 | ||||
| 		// guaranteed to return the correct metadata, except for the pathological
 | ||||
| 		// case of an outage of large parts of the Swift cluster or its network,
 | ||||
| 		// since every segment is only written once.)
 | ||||
| 		segment, _, err := d.Conn.Object(d.Container, segmentPath) | ||||
| 		switch err { | ||||
| 		case nil: | ||||
| 			//found new segment -> keep going, more might be missing
 | ||||
| 			// found new segment -> keep going, more might be missing
 | ||||
| 			segments = append(segments, segment) | ||||
| 			continue | ||||
| 		case swift.ObjectNotFound: | ||||
| 			//This segment is missing. Since we upload segments sequentially,
 | ||||
| 			//there won't be any more segments after it.
 | ||||
| 			// This segment is missing. Since we upload segments sequentially,
 | ||||
| 			// there won't be any more segments after it.
 | ||||
| 			return segments, nil | ||||
| 		default: | ||||
| 			return nil, err //unexpected error
 | ||||
| 			return nil, err // unexpected error
 | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -24,48 +24,29 @@ var swiftDriverConstructor func(prefix string) (*Driver, error) | |||
| 
 | ||||
| func init() { | ||||
| 	var ( | ||||
| 		username           string | ||||
| 		password           string | ||||
| 		authURL            string | ||||
| 		tenant             string | ||||
| 		tenantID           string | ||||
| 		domain             string | ||||
| 		domainID           string | ||||
| 		tenantDomain       string | ||||
| 		tenantDomainID     string | ||||
| 		trustID            string | ||||
| 		container          string | ||||
| 		region             string | ||||
| 		AuthVersion        int | ||||
| 		endpointType       string | ||||
| 		insecureSkipVerify bool | ||||
| 		secretKey          string | ||||
| 		accessKey          string | ||||
| 		containerKey       bool | ||||
| 		tempURLMethods     []string | ||||
| 		username              = os.Getenv("SWIFT_USERNAME") | ||||
| 		password              = os.Getenv("SWIFT_PASSWORD") | ||||
| 		authURL               = os.Getenv("SWIFT_AUTH_URL") | ||||
| 		tenant                = os.Getenv("SWIFT_TENANT_NAME") | ||||
| 		tenantID              = os.Getenv("SWIFT_TENANT_ID") | ||||
| 		domain                = os.Getenv("SWIFT_DOMAIN_NAME") | ||||
| 		domainID              = os.Getenv("SWIFT_DOMAIN_ID") | ||||
| 		tenantDomain          = os.Getenv("SWIFT_DOMAIN_NAME") | ||||
| 		tenantDomainID        = os.Getenv("SWIFT_DOMAIN_ID") | ||||
| 		trustID               = os.Getenv("SWIFT_TRUST_ID") | ||||
| 		container             = os.Getenv("SWIFT_CONTAINER_NAME") | ||||
| 		region                = os.Getenv("SWIFT_REGION_NAME") | ||||
| 		AuthVersion, _        = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) | ||||
| 		endpointType          = os.Getenv("SWIFT_ENDPOINT_TYPE") | ||||
| 		insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) | ||||
| 		secretKey             = os.Getenv("SWIFT_SECRET_KEY") | ||||
| 		accessKey             = os.Getenv("SWIFT_ACCESS_KEY") | ||||
| 		containerKey, _       = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) | ||||
| 		tempURLMethods        = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") | ||||
| 
 | ||||
| 		swiftServer *swifttest.SwiftServer | ||||
| 		err         error | ||||
| 	) | ||||
| 	username = os.Getenv("SWIFT_USERNAME") | ||||
| 	password = os.Getenv("SWIFT_PASSWORD") | ||||
| 	authURL = os.Getenv("SWIFT_AUTH_URL") | ||||
| 	tenant = os.Getenv("SWIFT_TENANT_NAME") | ||||
| 	tenantID = os.Getenv("SWIFT_TENANT_ID") | ||||
| 	domain = os.Getenv("SWIFT_DOMAIN_NAME") | ||||
| 	domainID = os.Getenv("SWIFT_DOMAIN_ID") | ||||
| 	tenantDomain = os.Getenv("SWIFT_DOMAIN_NAME") | ||||
| 	tenantDomainID = os.Getenv("SWIFT_DOMAIN_ID") | ||||
| 	trustID = os.Getenv("SWIFT_TRUST_ID") | ||||
| 	container = os.Getenv("SWIFT_CONTAINER_NAME") | ||||
| 	region = os.Getenv("SWIFT_REGION_NAME") | ||||
| 	AuthVersion, _ = strconv.Atoi(os.Getenv("SWIFT_AUTH_VERSION")) | ||||
| 	endpointType = os.Getenv("SWIFT_ENDPOINT_TYPE") | ||||
| 	insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) | ||||
| 	secretKey = os.Getenv("SWIFT_SECRET_KEY") | ||||
| 	accessKey = os.Getenv("SWIFT_ACCESS_KEY") | ||||
| 	containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) | ||||
| 	tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") | ||||
| 
 | ||||
| 	if username == "" || password == "" || authURL == "" || container == "" { | ||||
| 		if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { | ||||
|  |  | |||
|  | @ -116,7 +116,8 @@ func (suite *DriverSuite) TestValidPaths(c *check.C) { | |||
| 		"/a-.b", | ||||
| 		"/_.abc", | ||||
| 		"/Docker/docker-registry", | ||||
| 		"/Abc/Cba"} | ||||
| 		"/Abc/Cba", | ||||
| 	} | ||||
| 
 | ||||
| 	for _, filename := range validFiles { | ||||
| 		err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) | ||||
|  | @ -154,7 +155,8 @@ func (suite *DriverSuite) TestInvalidPaths(c *check.C) { | |||
| 		"abc", | ||||
| 		"123.abc", | ||||
| 		"//bcd", | ||||
| 		"/abc_123/"} | ||||
| 		"/abc_123/", | ||||
| 	} | ||||
| 
 | ||||
| 	for _, filename := range invalidFiles { | ||||
| 		err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) | ||||
|  | @ -1175,8 +1177,10 @@ func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, c | |||
| 	c.Assert(readContents, check.DeepEquals, contents) | ||||
| } | ||||
| 
 | ||||
| var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") | ||||
| var separatorChars = []byte("._-") | ||||
| var ( | ||||
| 	filenameChars  = []byte("abcdefghijklmnopqrstuvwxyz0123456789") | ||||
| 	separatorChars = []byte("._-") | ||||
| ) | ||||
| 
 | ||||
| func randomPath(length int64) string { | ||||
| 	path := "/" | ||||
|  |  | |||
|  | @ -16,6 +16,7 @@ type changingFileSystem struct { | |||
| func (cfs *changingFileSystem) List(_ context.Context, _ string) ([]string, error) { | ||||
| 	return cfs.fileset, nil | ||||
| } | ||||
| 
 | ||||
| func (cfs *changingFileSystem) Stat(_ context.Context, path string) (FileInfo, error) { | ||||
| 	kept, ok := cfs.keptFiles[path] | ||||
| 	if ok && kept { | ||||
|  | @ -48,6 +49,7 @@ func (cfs *fileSystem) Stat(_ context.Context, path string) (FileInfo, error) { | |||
| 		}, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| func (cfs *fileSystem) isDir(path string) bool { | ||||
| 	_, isDir := cfs.fileset[path] | ||||
| 	return isDir | ||||
|  | @ -167,7 +169,6 @@ func TestWalkFallback(t *testing.T) { | |||
| 			compareWalked(t, tc.expected, walked) | ||||
| 		}) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func compareWalked(t *testing.T, expected, walked []string) { | ||||
|  |  | |||
|  | @ -61,7 +61,6 @@ func TestFileReaderSeek(t *testing.T) { | |||
| 	} | ||||
| 
 | ||||
| 	fr, err := newFileReader(ctx, driver, path, int64(len(content))) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("unexpected error creating file reader: %v", err) | ||||
| 	} | ||||
|  |  | |||
|  | @ -109,7 +109,6 @@ func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, regis | |||
| 
 | ||||
| 		return err | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("failed to mark: %v", err) | ||||
| 	} | ||||
|  |  | |||
|  | @ -173,7 +173,8 @@ func TestNoDeletionNoEffect(t *testing.T) { | |||
| 	// construct manifestlist for fun.
 | ||||
| 	blobstatter := registry.BlobStatter() | ||||
| 	manifestList, err := testutil.MakeManifestList(blobstatter, []digest.Digest{ | ||||
| 		image1.manifestDigest, image2.manifestDigest}) | ||||
| 		image1.manifestDigest, image2.manifestDigest, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to make manifest list: %v", err) | ||||
| 	} | ||||
|  |  | |||
|  | @ -150,7 +150,6 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution. | |||
| 		name: lbs.repository.Named().Name(), | ||||
| 		id:   uuid, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -159,7 +158,6 @@ func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution. | |||
| 		name: lbs.repository.Named().Name(), | ||||
| 		id:   uuid, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -179,7 +177,6 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution | |||
| 		name: lbs.repository.Named().Name(), | ||||
| 		id:   id, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | @ -203,7 +200,6 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution | |||
| 		name: lbs.repository.Named().Name(), | ||||
| 		id:   id, | ||||
| 	}) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  |  | |||
|  | @ -540,7 +540,6 @@ func testOCIManifestStorage(t *testing.T, testname string, includeMediaTypes boo | |||
| 	if payloadMediaType != v1.MediaTypeImageIndex { | ||||
| 		t.Fatalf("%s: unexpected MediaType for index payload, %s", testname, payloadMediaType) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| // TestLinkPathFuncs ensures that the link path functions behavior are locked
 | ||||
|  |  | |||
|  | @ -12,7 +12,7 @@ import ( | |||
| 	v1 "github.com/opencontainers/image-spec/specs-go/v1" | ||||
| ) | ||||
| 
 | ||||
| //ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests.
 | ||||
| // ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests.
 | ||||
| type ocischemaManifestHandler struct { | ||||
| 	repository   distribution.Repository | ||||
| 	blobStore    distribution.BlobStore | ||||
|  |  | |||
|  | @ -23,25 +23,33 @@ const ( | |||
| //
 | ||||
| // The path layout in the storage backend is roughly as follows:
 | ||||
| //
 | ||||
| //		<root>/v2
 | ||||
| //			-> repositories/
 | ||||
| // 				-><name>/
 | ||||
| // 					-> _manifests/
 | ||||
| // 						revisions
 | ||||
| //							-> <manifest digest path>
 | ||||
| //								-> link
 | ||||
| // 						tags/<tag>
 | ||||
| //							-> current/link
 | ||||
| // 							-> index
 | ||||
| //								-> <algorithm>/<hex digest>/link
 | ||||
| // 					-> _layers/
 | ||||
| // 						<layer links to blob store>
 | ||||
| // 					-> _uploads/<id>
 | ||||
| // 						data
 | ||||
| // 						startedat
 | ||||
| // 						hashstates/<algorithm>/<offset>
 | ||||
| //			-> blob/<algorithm>
 | ||||
| //				<split directory content addressable storage>
 | ||||
| //	<root>/v2
 | ||||
| //	├── blob
 | ||||
| //	│   └── <algorithm>
 | ||||
| //	│       └── <split directory content addressable storage>
 | ||||
| //	└── repositories
 | ||||
| //	    └── <name>
 | ||||
| //	        ├── _layers
 | ||||
| //	        │   └── <layer links to blob store>
 | ||||
| //	        ├── _manifests
 | ||||
| //	        │   ├── revisions
 | ||||
| //	        │   │   └── <manifest digest path>
 | ||||
| //	        │   │       └── link
 | ||||
| //	        │   └── tags
 | ||||
| //	        │       └── <tag>
 | ||||
| //	        │           ├── current
 | ||||
| //	        │           │   └── link
 | ||||
| //	        │           └── index
 | ||||
| //	        │               └── <algorithm>
 | ||||
| //	        │                   └── <hex digest>
 | ||||
| //	        │                       └── link
 | ||||
| //	        └── _uploads
 | ||||
| //	            └── <id>
 | ||||
| //	                ├── data
 | ||||
| //	                ├── hashstates
 | ||||
| //	                │   └── <algorithm>
 | ||||
| //	                │       └── <offset>
 | ||||
| //	                └── startedat
 | ||||
| //
 | ||||
| // The storage backend layout is broken up into a content-addressable blob
 | ||||
| // store and repositories. The content-addressable blob store holds most data
 | ||||
|  | @ -71,41 +79,40 @@ const ( | |||
| //
 | ||||
| //	Manifests:
 | ||||
| //
 | ||||
| // 	manifestRevisionsPathSpec:      <root>/v2/repositories/<name>/_manifests/revisions/
 | ||||
| // 	manifestRevisionPathSpec:      <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/
 | ||||
| // 	manifestRevisionLinkPathSpec:  <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link
 | ||||
| //	manifestRevisionsPathSpec:      <root>/v2/repositories/<name>/_manifests/revisions/
 | ||||
| //	manifestRevisionPathSpec:      <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/
 | ||||
| //	manifestRevisionLinkPathSpec:  <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link
 | ||||
| //
 | ||||
| //	Tags:
 | ||||
| //
 | ||||
| // 	manifestTagsPathSpec:                  <root>/v2/repositories/<name>/_manifests/tags/
 | ||||
| // 	manifestTagPathSpec:                   <root>/v2/repositories/<name>/_manifests/tags/<tag>/
 | ||||
| // 	manifestTagCurrentPathSpec:            <root>/v2/repositories/<name>/_manifests/tags/<tag>/current/link
 | ||||
| // 	manifestTagIndexPathSpec:              <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/
 | ||||
| // 	manifestTagIndexEntryPathSpec:         <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/
 | ||||
| // 	manifestTagIndexEntryLinkPathSpec:     <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/link
 | ||||
| //	manifestTagsPathSpec:                  <root>/v2/repositories/<name>/_manifests/tags/
 | ||||
| //	manifestTagPathSpec:                   <root>/v2/repositories/<name>/_manifests/tags/<tag>/
 | ||||
| //	manifestTagCurrentPathSpec:            <root>/v2/repositories/<name>/_manifests/tags/<tag>/current/link
 | ||||
| //	manifestTagIndexPathSpec:              <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/
 | ||||
| //	manifestTagIndexEntryPathSpec:         <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/
 | ||||
| //	manifestTagIndexEntryLinkPathSpec:     <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/link
 | ||||
| //
 | ||||
| // 	Blobs:
 | ||||
| //	Blobs:
 | ||||
| //
 | ||||
| // 	layerLinkPathSpec:            <root>/v2/repositories/<name>/_layers/<algorithm>/<hex digest>/link
 | ||||
| // 	layersPathSpec:               <root>/v2/repositories/<name>/_layers
 | ||||
| //	layerLinkPathSpec:            <root>/v2/repositories/<name>/_layers/<algorithm>/<hex digest>/link
 | ||||
| //	layersPathSpec:               <root>/v2/repositories/<name>/_layers
 | ||||
| //
 | ||||
| //	Uploads:
 | ||||
| //
 | ||||
| // 	uploadDataPathSpec:             <root>/v2/repositories/<name>/_uploads/<id>/data
 | ||||
| // 	uploadStartedAtPathSpec:        <root>/v2/repositories/<name>/_uploads/<id>/startedat
 | ||||
| // 	uploadHashStatePathSpec:        <root>/v2/repositories/<name>/_uploads/<id>/hashstates/<algorithm>/<offset>
 | ||||
| //	uploadDataPathSpec:             <root>/v2/repositories/<name>/_uploads/<id>/data
 | ||||
| //	uploadStartedAtPathSpec:        <root>/v2/repositories/<name>/_uploads/<id>/startedat
 | ||||
| //	uploadHashStatePathSpec:        <root>/v2/repositories/<name>/_uploads/<id>/hashstates/<algorithm>/<offset>
 | ||||
| //
 | ||||
| //	Blob Store:
 | ||||
| //
 | ||||
| //	blobsPathSpec:                  <root>/v2/blobs/
 | ||||
| // 	blobPathSpec:                   <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
 | ||||
| // 	blobDataPathSpec:               <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
 | ||||
| // 	blobMediaTypePathSpec:               <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
 | ||||
| //	blobPathSpec:                   <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
 | ||||
| //	blobDataPathSpec:               <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
 | ||||
| //	blobMediaTypePathSpec:               <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
 | ||||
| //
 | ||||
| // For more information on the semantic meaning of each path and their
 | ||||
| // contents, please see the path spec documentation.
 | ||||
| func pathFor(spec pathSpec) (string, error) { | ||||
| 
 | ||||
| 	// Switch on the path object type and return the appropriate path. At
 | ||||
| 	// first glance, one may wonder why we don't use an interface to
 | ||||
| 	// accomplish this. By keep the formatting separate from the pathSpec, we
 | ||||
|  | @ -135,7 +142,6 @@ func pathFor(spec pathSpec) (string, error) { | |||
| 		return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil | ||||
| 	case manifestRevisionLinkPathSpec: | ||||
| 		root, err := pathFor(manifestRevisionPathSpec(v)) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
|  | @ -147,7 +153,6 @@ func pathFor(spec pathSpec) (string, error) { | |||
| 		root, err := pathFor(manifestTagsPathSpec{ | ||||
| 			name: v.name, | ||||
| 		}) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
|  | @ -155,7 +160,6 @@ func pathFor(spec pathSpec) (string, error) { | |||
| 		return path.Join(root, v.tag), nil | ||||
| 	case manifestTagCurrentPathSpec: | ||||
| 		root, err := pathFor(manifestTagPathSpec(v)) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
|  | @ -163,7 +167,6 @@ func pathFor(spec pathSpec) (string, error) { | |||
| 		return path.Join(root, "current", "link"), nil | ||||
| 	case manifestTagIndexPathSpec: | ||||
| 		root, err := pathFor(manifestTagPathSpec(v)) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
|  | @ -171,7 +174,6 @@ func pathFor(spec pathSpec) (string, error) { | |||
| 		return path.Join(root, "index"), nil | ||||
| 	case manifestTagIndexEntryLinkPathSpec: | ||||
| 		root, err := pathFor(manifestTagIndexEntryPathSpec(v)) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
|  | @ -182,7 +184,6 @@ func pathFor(spec pathSpec) (string, error) { | |||
| 			name: v.name, | ||||
| 			tag:  v.tag, | ||||
| 		}) | ||||
| 
 | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
|  | @ -349,11 +350,11 @@ func (layersPathSpec) pathSpec() {} | |||
| // blob id. The blob link will contain a content addressable blob id reference
 | ||||
| // into the blob store. The format of the contents is as follows:
 | ||||
| //
 | ||||
| // 	<algorithm>:<hex digest of layer data>
 | ||||
| //	<algorithm>:<hex digest of layer data>
 | ||||
| //
 | ||||
| // The following example of the file contents is more illustrative:
 | ||||
| //
 | ||||
| // 	sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36
 | ||||
| //	sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36
 | ||||
| //
 | ||||
| // This  indicates that there is a blob with the id/digest, calculated via
 | ||||
| // sha256 that can be fetched from the blob store.
 | ||||
|  | @ -431,21 +432,19 @@ type uploadHashStatePathSpec struct { | |||
| func (uploadHashStatePathSpec) pathSpec() {} | ||||
| 
 | ||||
| // repositoriesRootPathSpec returns the root of repositories
 | ||||
| type repositoriesRootPathSpec struct { | ||||
| } | ||||
| type repositoriesRootPathSpec struct{} | ||||
| 
 | ||||
| func (repositoriesRootPathSpec) pathSpec() {} | ||||
| 
 | ||||
| // digestPathComponents provides a consistent path breakdown for a given
 | ||||
| // digest. For a generic digest, it will be as follows:
 | ||||
| //
 | ||||
| // 	<algorithm>/<hex digest>
 | ||||
| //	<algorithm>/<hex digest>
 | ||||
| //
 | ||||
| // If multilevel is true, the first two bytes of the digest will separate
 | ||||
| // groups of digest folder. It will be as follows:
 | ||||
| //
 | ||||
| // 	<algorithm>/<first two bytes of digest>/<full digest>
 | ||||
| //
 | ||||
| //	<algorithm>/<first two bytes of digest>/<full digest>
 | ||||
| func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { | ||||
| 	if err := dgst.Validate(); err != nil { | ||||
| 		return nil, err | ||||
|  | @ -468,7 +467,6 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) | |||
| 
 | ||||
| // Reconstructs a digest from a path
 | ||||
| func digestFromPath(digestPath string) (digest.Digest, error) { | ||||
| 
 | ||||
| 	digestPath = strings.TrimSuffix(digestPath, "/data") | ||||
| 	dir, hex := path.Split(digestPath) | ||||
| 	dir = path.Dir(dir) | ||||
|  |  | |||
|  | @ -108,7 +108,6 @@ func TestPathMapper(t *testing.T) { | |||
| 	if err == nil { | ||||
| 		t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) | ||||
| 	} | ||||
| 
 | ||||
| } | ||||
| 
 | ||||
| func TestDigestFromPath(t *testing.T) { | ||||
|  | @ -132,7 +131,6 @@ func TestDigestFromPath(t *testing.T) { | |||
| 
 | ||||
| 		if result != testcase.expected { | ||||
| 			t.Fatalf("Unexpected result value %v when we wanted %v", result, testcase.expected) | ||||
| 
 | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -98,7 +98,6 @@ func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriv | |||
| 			} else { | ||||
| 				errors = pushError(errors, filePath, err) | ||||
| 			} | ||||
| 
 | ||||
| 		} | ||||
| 
 | ||||
| 		uploads[uuid] = ud | ||||
|  |  | |||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue